code
stringlengths
87
55.2k
code_codestyle
int64
0
349
style_context
stringlengths
135
49.1k
style_context_codestyle
int64
0
349
label
int64
0
1
import math import os import sys def lowerCamelCase_ ( _a : str ): '''simple docstring''' UpperCAmelCase_ : Union[str, Any] = """""" try: with open(_a , """rb""" ) as binary_file: UpperCAmelCase_ : Optional[Any] = binary_file.read() for dat in data: UpperCAmelCase_ : Union[str, Any] = F'''{dat:08b}''' result += curr_byte return result except OSError: print("""File not accessible""" ) sys.exit() def lowerCamelCase_ ( _a : dict[str, str] , _a : str , _a : int , _a : str ): '''simple docstring''' lexicon.pop(_a ) UpperCAmelCase_ : Union[str, Any] = last_match_id if math.loga(_a ).is_integer(): for curr_key in lexicon: UpperCAmelCase_ : int = """0""" + lexicon[curr_key] UpperCAmelCase_ : Tuple = bin(_a )[2:] def lowerCamelCase_ ( _a : str ): '''simple docstring''' UpperCAmelCase_ : Any = {"""0""": """0""", """1""": """1"""} UpperCAmelCase_ , UpperCAmelCase_ : List[str] = """""", """""" UpperCAmelCase_ : str = len(_a ) for i in range(len(_a ) ): curr_string += data_bits[i] if curr_string not in lexicon: continue UpperCAmelCase_ : str = lexicon[curr_string] result += last_match_id add_key_to_lexicon(_a , _a , _a , _a ) index += 1 UpperCAmelCase_ : int = """""" while curr_string != "" and curr_string not in lexicon: curr_string += "0" if curr_string != "": UpperCAmelCase_ : Optional[Any] = lexicon[curr_string] result += last_match_id return result def lowerCamelCase_ ( _a : str , _a : str ): '''simple docstring''' UpperCAmelCase_ : str = os.path.getsize(_a ) UpperCAmelCase_ : Union[str, Any] = bin(_a )[2:] UpperCAmelCase_ : Optional[Any] = len(_a ) return "0" * (length_length - 1) + file_length_binary + compressed def lowerCamelCase_ ( _a : str , _a : str ): '''simple docstring''' UpperCAmelCase_ : Optional[Any] = 8 try: with open(_a , """wb""" ) as opened_file: UpperCAmelCase_ : Tuple = [ to_write[i : i + byte_length] for i in range(0 , len(_a ) , _a ) ] if len(result_byte_array[-1] ) % byte_length == 0: result_byte_array.append("""10000000""" ) else: result_byte_array[-1] += "1" + "0" * ( byte_length - len(result_byte_array[-1] ) - 1 ) for elem in result_byte_array: opened_file.write(int(_a , 2 ).to_bytes(1 , byteorder="""big""" ) ) except OSError: print("""File not accessible""" ) sys.exit() def lowerCamelCase_ ( _a : str , _a : str ): '''simple docstring''' UpperCAmelCase_ : Union[str, Any] = read_file_binary(_a ) UpperCAmelCase_ : str = compress_data(_a ) UpperCAmelCase_ : List[Any] = add_file_length(_a , _a ) write_file_binary(_a , _a ) if __name__ == "__main__": compress(sys.argv[1], sys.argv[2])
345
import pickle import numpy as np from matplotlib import pyplot as plt class _snake_case : '''simple docstring''' def __init__( self: Any ,lowerCamelCase_: Dict ,lowerCamelCase_: Tuple ,lowerCamelCase_: Dict ,lowerCamelCase_: Tuple ,lowerCamelCase_: Any ,lowerCamelCase_: Tuple=0.2 ,lowerCamelCase_: Union[str, Any]=0.2 ) -> List[str]: UpperCAmelCase_ : List[Any] = bp_numa UpperCAmelCase_ : str = bp_numa UpperCAmelCase_ : List[Any] = bp_numa UpperCAmelCase_ : Optional[int] = conva_get[:2] UpperCAmelCase_ : List[Any] = conva_get[2] UpperCAmelCase_ : str = size_pa UpperCAmelCase_ : Optional[int] = rate_w UpperCAmelCase_ : Dict = rate_t UpperCAmelCase_ : List[Any] = [ np.mat(-1 * np.random.rand(self.conva[0] ,self.conva[0] ) + 0.5 ) for i in range(self.conva[1] ) ] UpperCAmelCase_ : int = np.mat(-1 * np.random.rand(self.num_bpa ,self.num_bpa ) + 0.5 ) UpperCAmelCase_ : int = np.mat(-1 * np.random.rand(self.num_bpa ,self.num_bpa ) + 0.5 ) UpperCAmelCase_ : Dict = -2 * np.random.rand(self.conva[1] ) + 1 UpperCAmelCase_ : str = -2 * np.random.rand(self.num_bpa ) + 1 UpperCAmelCase_ : Union[str, Any] = -2 * np.random.rand(self.num_bpa ) + 1 def A__ ( self: str ,lowerCamelCase_: Optional[Any] ) -> Tuple: # save model dict with pickle UpperCAmelCase_ : Dict = { """num_bp1""": self.num_bpa, """num_bp2""": self.num_bpa, """num_bp3""": self.num_bpa, """conv1""": self.conva, """step_conv1""": self.step_conva, """size_pooling1""": self.size_poolinga, """rate_weight""": self.rate_weight, """rate_thre""": self.rate_thre, """w_conv1""": self.w_conva, """wkj""": self.wkj, """vji""": self.vji, """thre_conv1""": self.thre_conva, """thre_bp2""": self.thre_bpa, """thre_bp3""": self.thre_bpa, } with open(lowerCamelCase_ ,"""wb""" ) as f: pickle.dump(lowerCamelCase_ ,lowerCamelCase_ ) print(F'''Model saved: {save_path}''' ) @classmethod def A__ ( cls: List[str] ,lowerCamelCase_: str ) -> List[str]: # read saved model with open(lowerCamelCase_ ,"""rb""" ) as f: UpperCAmelCase_ : Any = pickle.load(lowerCamelCase_ ) # noqa: S301 UpperCAmelCase_ : Union[str, Any] = model_dic.get("""conv1""" ) conv_get.append(model_dic.get("""step_conv1""" ) ) UpperCAmelCase_ : List[str] = model_dic.get("""size_pooling1""" ) UpperCAmelCase_ : Tuple = model_dic.get("""num_bp1""" ) UpperCAmelCase_ : Optional[Any] = model_dic.get("""num_bp2""" ) UpperCAmelCase_ : List[str] = model_dic.get("""num_bp3""" ) UpperCAmelCase_ : List[Any] = model_dic.get("""rate_weight""" ) UpperCAmelCase_ : Dict = model_dic.get("""rate_thre""" ) # create model instance UpperCAmelCase_ : List[Any] = CNN(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ) # modify model parameter UpperCAmelCase_ : Any = model_dic.get("""w_conv1""" ) UpperCAmelCase_ : int = model_dic.get("""wkj""" ) UpperCAmelCase_ : int = model_dic.get("""vji""" ) UpperCAmelCase_ : Optional[int] = model_dic.get("""thre_conv1""" ) UpperCAmelCase_ : List[str] = model_dic.get("""thre_bp2""" ) UpperCAmelCase_ : Dict = model_dic.get("""thre_bp3""" ) return conv_ins def A__ ( self: List[Any] ,lowerCamelCase_: Union[str, Any] ) -> Tuple: return 1 / (1 + np.exp(-1 * x )) def A__ ( self: Union[str, Any] ,lowerCamelCase_: Union[str, Any] ) -> Optional[Any]: return round(lowerCamelCase_ ,3 ) def A__ ( self: Tuple ,lowerCamelCase_: Any ,lowerCamelCase_: List[str] ,lowerCamelCase_: str ,lowerCamelCase_: Any ,lowerCamelCase_: Union[str, Any] ) -> Any: # convolution process UpperCAmelCase_ : Optional[Any] = convs[0] UpperCAmelCase_ : int = convs[1] UpperCAmelCase_ : int = np.shape(lowerCamelCase_ )[0] # get the data slice of original image data, data_focus UpperCAmelCase_ : Dict = [] for i_focus in range(0 ,size_data - size_conv + 1 ,lowerCamelCase_ ): for j_focus in range(0 ,size_data - size_conv + 1 ,lowerCamelCase_ ): UpperCAmelCase_ : Union[str, Any] = data[ i_focus : i_focus + size_conv, j_focus : j_focus + size_conv ] data_focus.append(lowerCamelCase_ ) # calculate the feature map of every single kernel, and saved as list of matrix UpperCAmelCase_ : Any = [] UpperCAmelCase_ : Tuple = int((size_data - size_conv) / conv_step + 1 ) for i_map in range(lowerCamelCase_ ): UpperCAmelCase_ : Optional[int] = [] for i_focus in range(len(lowerCamelCase_ ) ): UpperCAmelCase_ : int = ( np.sum(np.multiply(data_focus[i_focus] ,w_convs[i_map] ) ) - thre_convs[i_map] ) featuremap.append(self.sig(lowerCamelCase_ ) ) UpperCAmelCase_ : Union[str, Any] = np.asmatrix(lowerCamelCase_ ).reshape( lowerCamelCase_ ,lowerCamelCase_ ) data_featuremap.append(lowerCamelCase_ ) # expanding the data slice to One dimenssion UpperCAmelCase_ : Optional[Any] = [] for each_focus in data_focus: focusa_list.extend(self.Expand_Mat(lowerCamelCase_ ) ) UpperCAmelCase_ : Optional[int] = np.asarray(lowerCamelCase_ ) return focus_list, data_featuremap def A__ ( self: Tuple ,lowerCamelCase_: Optional[int] ,lowerCamelCase_: Tuple ,lowerCamelCase_: Optional[Any]="average_pool" ) -> List[Any]: # pooling process UpperCAmelCase_ : Optional[Any] = len(featuremaps[0] ) UpperCAmelCase_ : Any = int(size_map / size_pooling ) UpperCAmelCase_ : Optional[int] = [] for i_map in range(len(lowerCamelCase_ ) ): UpperCAmelCase_ : Any = featuremaps[i_map] UpperCAmelCase_ : Tuple = [] for i_focus in range(0 ,lowerCamelCase_ ,lowerCamelCase_ ): for j_focus in range(0 ,lowerCamelCase_ ,lowerCamelCase_ ): UpperCAmelCase_ : str = feature_map[ i_focus : i_focus + size_pooling, j_focus : j_focus + size_pooling, ] if pooling_type == "average_pool": # average pooling map_pooled.append(np.average(lowerCamelCase_ ) ) elif pooling_type == "max_pooling": # max pooling map_pooled.append(np.max(lowerCamelCase_ ) ) UpperCAmelCase_ : int = np.asmatrix(lowerCamelCase_ ).reshape(lowerCamelCase_ ,lowerCamelCase_ ) featuremap_pooled.append(lowerCamelCase_ ) return featuremap_pooled def A__ ( self: Union[str, Any] ,lowerCamelCase_: Tuple ) -> Optional[int]: # expanding three dimension data to one dimension list UpperCAmelCase_ : List[Any] = [] for i in range(len(lowerCamelCase_ ) ): UpperCAmelCase_ : Tuple = np.shape(data[i] ) UpperCAmelCase_ : Optional[int] = data[i].reshape(1 ,shapes[0] * shapes[1] ) UpperCAmelCase_ : Optional[int] = data_listed.getA().tolist()[0] data_expanded.extend(lowerCamelCase_ ) UpperCAmelCase_ : int = np.asarray(lowerCamelCase_ ) return data_expanded def A__ ( self: Optional[Any] ,lowerCamelCase_: Optional[int] ) -> Union[str, Any]: # expanding matrix to one dimension list UpperCAmelCase_ : List[Any] = np.asarray(lowerCamelCase_ ) UpperCAmelCase_ : str = np.shape(lowerCamelCase_ ) UpperCAmelCase_ : Dict = data_mat.reshape(1 ,shapes[0] * shapes[1] ) return data_expanded def A__ ( self: str ,lowerCamelCase_: Dict ,lowerCamelCase_: int ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: Any ) -> Union[str, Any]: UpperCAmelCase_ : Any = [] UpperCAmelCase_ : Tuple = 0 for i_map in range(lowerCamelCase_ ): UpperCAmelCase_ : Optional[Any] = np.ones((size_map, size_map) ) for i in range(0 ,lowerCamelCase_ ,lowerCamelCase_ ): for j in range(0 ,lowerCamelCase_ ,lowerCamelCase_ ): UpperCAmelCase_ : Any = pd_pool[ i_pool ] UpperCAmelCase_ : List[str] = i_pool + 1 UpperCAmelCase_ : Optional[Any] = np.multiply( lowerCamelCase_ ,np.multiply(out_map[i_map] ,(1 - out_map[i_map]) ) ) pd_all.append(lowerCamelCase_ ) return pd_all def A__ ( self: str ,lowerCamelCase_: int ,lowerCamelCase_: int ,lowerCamelCase_: List[Any] ,lowerCamelCase_: Any ,lowerCamelCase_: List[str] ,lowerCamelCase_: Any=bool ) -> Optional[int]: # model traning print("""----------------------Start Training-------------------------""" ) print((""" - - Shape: Train_Data """, np.shape(lowerCamelCase_ )) ) print((""" - - Shape: Teach_Data """, np.shape(lowerCamelCase_ )) ) UpperCAmelCase_ : str = 0 UpperCAmelCase_ : Tuple = [] UpperCAmelCase_ : Any = 10000 while rp < n_repeat and mse >= error_accuracy: UpperCAmelCase_ : List[str] = 0 print(F'''-------------Learning Time {rp}--------------''' ) for p in range(len(lowerCamelCase_ ) ): # print('------------Learning Image: %d--------------'%p) UpperCAmelCase_ : str = np.asmatrix(datas_train[p] ) UpperCAmelCase_ : Optional[Any] = np.asarray(datas_teach[p] ) UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = self.convolute( lowerCamelCase_ ,self.conva ,self.w_conva ,self.thre_conva ,conv_step=self.step_conva ,) UpperCAmelCase_ : List[Any] = self.pooling(lowerCamelCase_ ,self.size_poolinga ) UpperCAmelCase_ : int = np.shape(lowerCamelCase_ ) UpperCAmelCase_ : Dict = self._expand(lowerCamelCase_ ) UpperCAmelCase_ : Union[str, Any] = data_bp_input UpperCAmelCase_ : Optional[Any] = np.dot(lowerCamelCase_ ,self.vji.T ) - self.thre_bpa UpperCAmelCase_ : int = self.sig(lowerCamelCase_ ) UpperCAmelCase_ : Union[str, Any] = np.dot(lowerCamelCase_ ,self.wkj.T ) - self.thre_bpa UpperCAmelCase_ : Optional[Any] = self.sig(lowerCamelCase_ ) # --------------Model Leaning ------------------------ # calculate error and gradient--------------- UpperCAmelCase_ : List[str] = np.multiply( (data_teach - bp_outa) ,np.multiply(lowerCamelCase_ ,(1 - bp_outa) ) ) UpperCAmelCase_ : List[Any] = np.multiply( np.dot(lowerCamelCase_ ,self.wkj ) ,np.multiply(lowerCamelCase_ ,(1 - bp_outa) ) ) UpperCAmelCase_ : Any = np.dot(lowerCamelCase_ ,self.vji ) UpperCAmelCase_ : Tuple = pd_i_all / (self.size_poolinga * self.size_poolinga) UpperCAmelCase_ : List[str] = pd_conva_pooled.T.getA().tolist() UpperCAmelCase_ : str = self._calculate_gradient_from_pool( lowerCamelCase_ ,lowerCamelCase_ ,shape_featuremapa[0] ,shape_featuremapa[1] ,self.size_poolinga ,) # weight and threshold learning process--------- # convolution layer for k_conv in range(self.conva[1] ): UpperCAmelCase_ : List[str] = self._expand_mat(pd_conva_all[k_conv] ) UpperCAmelCase_ : Optional[Any] = self.rate_weight * np.dot(lowerCamelCase_ ,lowerCamelCase_ ) UpperCAmelCase_ : int = self.w_conva[k_conv] + delta_w.reshape( (self.conva[0], self.conva[0]) ) UpperCAmelCase_ : str = ( self.thre_conva[k_conv] - np.sum(pd_conva_all[k_conv] ) * self.rate_thre ) # all connected layer UpperCAmelCase_ : int = self.wkj + pd_k_all.T * bp_outa * self.rate_weight UpperCAmelCase_ : Tuple = self.vji + pd_j_all.T * bp_outa * self.rate_weight UpperCAmelCase_ : int = self.thre_bpa - pd_k_all * self.rate_thre UpperCAmelCase_ : str = self.thre_bpa - pd_j_all * self.rate_thre # calculate the sum error of all single image UpperCAmelCase_ : int = np.sum(abs(data_teach - bp_outa ) ) error_count += errors # print(' ----Teach ',data_teach) # print(' ----BP_output ',bp_out3) UpperCAmelCase_ : int = rp + 1 UpperCAmelCase_ : Any = error_count / patterns all_mse.append(lowerCamelCase_ ) def draw_error(): UpperCAmelCase_ : Any = [error_accuracy for i in range(int(n_repeat * 1.2 ) )] plt.plot(lowerCamelCase_ ,"""+-""" ) plt.plot(lowerCamelCase_ ,"""r--""" ) plt.xlabel("""Learning Times""" ) plt.ylabel("""All_mse""" ) plt.grid(lowerCamelCase_ ,alpha=0.5 ) plt.show() print("""------------------Training Complished---------------------""" ) print((""" - - Training epoch: """, rp, F''' - - Mse: {mse:.6f}''') ) if draw_e: draw_error() return mse def A__ ( self: Optional[int] ,lowerCamelCase_: Any ) -> Tuple: # model predict UpperCAmelCase_ : Union[str, Any] = [] print("""-------------------Start Testing-------------------------""" ) print((""" - - Shape: Test_Data """, np.shape(lowerCamelCase_ )) ) for p in range(len(lowerCamelCase_ ) ): UpperCAmelCase_ : int = np.asmatrix(datas_test[p] ) UpperCAmelCase_ , UpperCAmelCase_ : List[str] = self.convolute( lowerCamelCase_ ,self.conva ,self.w_conva ,self.thre_conva ,conv_step=self.step_conva ,) UpperCAmelCase_ : Optional[Any] = self.pooling(lowerCamelCase_ ,self.size_poolinga ) UpperCAmelCase_ : str = self._expand(lowerCamelCase_ ) UpperCAmelCase_ : str = data_bp_input UpperCAmelCase_ : Union[str, Any] = bp_outa * self.vji.T - self.thre_bpa UpperCAmelCase_ : Optional[int] = self.sig(lowerCamelCase_ ) UpperCAmelCase_ : Tuple = bp_outa * self.wkj.T - self.thre_bpa UpperCAmelCase_ : List[Any] = self.sig(lowerCamelCase_ ) produce_out.extend(bp_outa.getA().tolist() ) UpperCAmelCase_ : int = [list(map(self.do_round ,lowerCamelCase_ ) ) for each in produce_out] return np.asarray(lowerCamelCase_ ) def A__ ( self: Optional[Any] ,lowerCamelCase_: Dict ) -> Tuple: # return the data of image after convoluting process so we can check it out UpperCAmelCase_ : Optional[int] = np.asmatrix(lowerCamelCase_ ) UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = self.convolute( lowerCamelCase_ ,self.conva ,self.w_conva ,self.thre_conva ,conv_step=self.step_conva ,) UpperCAmelCase_ : Dict = self.pooling(lowerCamelCase_ ,self.size_poolinga ) return data_conveda, data_pooleda if __name__ == "__main__": pass
345
1
import argparse import math import os from copy import deepcopy import torch from audio_diffusion.models import DiffusionAttnUnetaD from diffusion import sampling from torch import nn from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel UpperCamelCase_ = { '''gwf-440k''': { '''url''': '''https://model-server.zqevans2.workers.dev/gwf-440k.ckpt''', '''sample_rate''': 48000, '''sample_size''': 65536, }, '''jmann-small-190k''': { '''url''': '''https://model-server.zqevans2.workers.dev/jmann-small-190k.ckpt''', '''sample_rate''': 48000, '''sample_size''': 65536, }, '''jmann-large-580k''': { '''url''': '''https://model-server.zqevans2.workers.dev/jmann-large-580k.ckpt''', '''sample_rate''': 48000, '''sample_size''': 131072, }, '''maestro-uncond-150k''': { '''url''': '''https://model-server.zqevans2.workers.dev/maestro-uncond-150k.ckpt''', '''sample_rate''': 16000, '''sample_size''': 65536, }, '''unlocked-uncond-250k''': { '''url''': '''https://model-server.zqevans2.workers.dev/unlocked-uncond-250k.ckpt''', '''sample_rate''': 16000, '''sample_size''': 65536, }, '''honk-140k''': { '''url''': '''https://model-server.zqevans2.workers.dev/honk-140k.ckpt''', '''sample_rate''': 16000, '''sample_size''': 65536, }, } def lowerCamelCase_ ( _a : Optional[Any] , _a : Optional[Any] ): '''simple docstring''' return torch.atana(_a , _a ) / math.pi * 2 def lowerCamelCase_ ( _a : List[Any] ): '''simple docstring''' UpperCAmelCase_ : Union[str, Any] = torch.sin(t * math.pi / 2 ) ** 2 UpperCAmelCase_ : Union[str, Any] = (1 - sigma**2) ** 0.5 return alpha_sigma_to_t(_a , _a ) class _snake_case ( __snake_case ): '''simple docstring''' pass class _snake_case ( nn.Module ): '''simple docstring''' def __init__( self: List[Any] ,lowerCamelCase_: Union[str, Any] ) -> int: super().__init__() UpperCAmelCase_ : Optional[Any] = DiffusionAttnUnetaD(lowerCamelCase_ ,n_attn_layers=4 ) UpperCAmelCase_ : Optional[Any] = deepcopy(self.diffusion ) UpperCAmelCase_ : List[str] = torch.quasirandom.SobolEngine(1 ,scramble=lowerCamelCase_ ) def lowerCamelCase_ ( _a : str ): '''simple docstring''' UpperCAmelCase_ : List[Any] = MODELS_MAP[model_name]["""url"""] os.system(F'''wget {url} ./''' ) return F'''./{model_name}.ckpt''' UpperCamelCase_ = { '''1''': '''resnets.0''', '''2''': '''attentions.0''', '''3''': '''resnets.1''', '''4''': '''attentions.1''', '''5''': '''resnets.2''', '''6''': '''attentions.2''', } UpperCamelCase_ = { '''8''': '''resnets.0''', '''9''': '''attentions.0''', '''10''': '''resnets.1''', '''11''': '''attentions.1''', '''12''': '''resnets.2''', '''13''': '''attentions.2''', } UpperCamelCase_ = { '''1''': '''resnets.0''', '''2''': '''attentions.0''', '''3''': '''resnets.1''', '''4''': '''attentions.1''', '''5''': '''resnets.2''', '''6''': '''attentions.2''', '''8''': '''resnets.3''', '''9''': '''attentions.3''', '''10''': '''resnets.4''', '''11''': '''attentions.4''', '''12''': '''resnets.5''', '''13''': '''attentions.5''', } UpperCamelCase_ = { '''0''': '''resnets.0''', '''1''': '''resnets.1''', '''2''': '''resnets.2''', '''4''': '''resnets.0''', '''5''': '''resnets.1''', '''6''': '''resnets.2''', } UpperCamelCase_ = { '''skip''': '''conv_skip''', '''main.0''': '''conv_1''', '''main.1''': '''group_norm_1''', '''main.3''': '''conv_2''', '''main.4''': '''group_norm_2''', } UpperCamelCase_ = { '''norm''': '''group_norm''', '''qkv_proj''': ['''query''', '''key''', '''value'''], '''out_proj''': ['''proj_attn'''], } def lowerCamelCase_ ( _a : List[str] ): '''simple docstring''' if name.startswith("""skip""" ): return name.replace("""skip""" , RES_CONV_MAP["""skip"""] ) # name has to be of format main.{digit} if not name.startswith("""main.""" ): raise ValueError(F'''ResConvBlock error with {name}''' ) return name.replace(name[:6] , RES_CONV_MAP[name[:6]] ) def lowerCamelCase_ ( _a : Optional[int] ): '''simple docstring''' for key, value in ATTN_MAP.items(): if name.startswith(_a ) and not isinstance(_a , _a ): return name.replace(_a , _a ) elif name.startswith(_a ): return [name.replace(_a , _a ) for v in value] raise ValueError(F'''Attn error with {name}''' ) def lowerCamelCase_ ( _a : Dict , _a : Any=13 ): '''simple docstring''' UpperCAmelCase_ : int = input_string if string.split(""".""" )[0] == "timestep_embed": return string.replace("""timestep_embed""" , """time_proj""" ) UpperCAmelCase_ : Optional[int] = 0 if string.startswith("""net.3.""" ): depth += 1 UpperCAmelCase_ : Union[str, Any] = string[6:] elif string.startswith("""net.""" ): UpperCAmelCase_ : Optional[int] = string[4:] while string.startswith("""main.7.""" ): depth += 1 UpperCAmelCase_ : Any = string[7:] if string.startswith("""main.""" ): UpperCAmelCase_ : int = string[5:] # mid block if string[:2].isdigit(): UpperCAmelCase_ : Any = string[:2] UpperCAmelCase_ : List[Any] = string[2:] else: UpperCAmelCase_ : int = string[0] UpperCAmelCase_ : int = string[1:] if depth == max_depth: UpperCAmelCase_ : List[Any] = MID_NUM_TO_LAYER[layer_num] UpperCAmelCase_ : List[str] = """mid_block""" elif depth > 0 and int(_a ) < 7: UpperCAmelCase_ : Tuple = DOWN_NUM_TO_LAYER[layer_num] UpperCAmelCase_ : Optional[Any] = F'''down_blocks.{depth}''' elif depth > 0 and int(_a ) > 7: UpperCAmelCase_ : Union[str, Any] = UP_NUM_TO_LAYER[layer_num] UpperCAmelCase_ : Optional[int] = F'''up_blocks.{max_depth - depth - 1}''' elif depth == 0: UpperCAmelCase_ : List[Any] = DEPTH_0_TO_LAYER[layer_num] UpperCAmelCase_ : int = F'''up_blocks.{max_depth - 1}''' if int(_a ) > 3 else """down_blocks.0""" if not string_left.startswith(""".""" ): raise ValueError(F'''Naming error with {input_string} and string_left: {string_left}.''' ) UpperCAmelCase_ : Any = string_left[1:] if "resnets" in new_layer: UpperCAmelCase_ : Dict = convert_resconv_naming(_a ) elif "attentions" in new_layer: UpperCAmelCase_ : Optional[int] = convert_attn_naming(_a ) UpperCAmelCase_ : str = new_string_left if not isinstance(_a , _a ): UpperCAmelCase_ : Optional[int] = prefix + """.""" + new_layer + """.""" + string_left else: UpperCAmelCase_ : str = [prefix + """.""" + new_layer + """.""" + s for s in string_left] return new_string def lowerCamelCase_ ( _a : Dict ): '''simple docstring''' UpperCAmelCase_ : Optional[Any] = {} for k, v in state_dict.items(): if k.endswith("""kernel""" ): # up- and downsample layers, don't have trainable weights continue UpperCAmelCase_ : List[str] = rename(_a ) # check if we need to transform from Conv => Linear for attention if isinstance(_a , _a ): UpperCAmelCase_ : Union[str, Any] = transform_conv_attns(_a , _a , _a ) else: UpperCAmelCase_ : str = v return new_state_dict def lowerCamelCase_ ( _a : int , _a : Dict , _a : Optional[int] ): '''simple docstring''' if len(_a ) == 1: if len(v.shape ) == 3: # weight UpperCAmelCase_ : Tuple = v[:, :, 0] else: # bias UpperCAmelCase_ : Optional[Any] = v else: # qkv matrices UpperCAmelCase_ : int = v.shape[0] UpperCAmelCase_ : Union[str, Any] = trippled_shape // 3 for i in range(3 ): if len(v.shape ) == 3: UpperCAmelCase_ : List[str] = v[i * single_shape : (i + 1) * single_shape, :, 0] else: UpperCAmelCase_ : List[Any] = v[i * single_shape : (i + 1) * single_shape] return new_state_dict def lowerCamelCase_ ( _a : List[str] ): '''simple docstring''' UpperCAmelCase_ : Union[str, Any] = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" ) UpperCAmelCase_ : Tuple = args.model_path.split("""/""" )[-1].split(""".""" )[0] if not os.path.isfile(args.model_path ): assert ( model_name == args.model_path ), F'''Make sure to provide one of the official model names {MODELS_MAP.keys()}''' UpperCAmelCase_ : Dict = download(_a ) UpperCAmelCase_ : List[str] = MODELS_MAP[model_name]["""sample_rate"""] UpperCAmelCase_ : List[str] = MODELS_MAP[model_name]["""sample_size"""] UpperCAmelCase_ : List[Any] = Object() UpperCAmelCase_ : Optional[int] = sample_size UpperCAmelCase_ : str = sample_rate UpperCAmelCase_ : str = 0 UpperCAmelCase_ : Tuple = UNetaDModel(sample_size=_a , sample_rate=_a ) UpperCAmelCase_ : List[Any] = diffusers_model.state_dict() UpperCAmelCase_ : Union[str, Any] = DiffusionUncond(_a ) orig_model.load_state_dict(torch.load(args.model_path , map_location=_a )["""state_dict"""] ) UpperCAmelCase_ : List[str] = orig_model.diffusion_ema.eval() UpperCAmelCase_ : List[str] = orig_model.state_dict() UpperCAmelCase_ : List[str] = rename_orig_weights(_a ) UpperCAmelCase_ : str = set(renamed_state_dict.keys() ) - set(diffusers_state_dict.keys() ) UpperCAmelCase_ : Dict = set(diffusers_state_dict.keys() ) - set(renamed_state_dict.keys() ) assert len(_a ) == 0, F'''Problem with {renamed_minus_diffusers}''' assert all(k.endswith("""kernel""" ) for k in list(_a ) ), F'''Problem with {diffusers_minus_renamed}''' for key, value in renamed_state_dict.items(): assert ( diffusers_state_dict[key].squeeze().shape == value.squeeze().shape ), F'''Shape for {key} doesn\'t match. Diffusers: {diffusers_state_dict[key].shape} vs. {value.shape}''' if key == "time_proj.weight": UpperCAmelCase_ : int = value.squeeze() UpperCAmelCase_ : str = value diffusers_model.load_state_dict(_a ) UpperCAmelCase_ : List[str] = 100 UpperCAmelCase_ : Tuple = 33 UpperCAmelCase_ : str = IPNDMScheduler(num_train_timesteps=_a ) UpperCAmelCase_ : Optional[Any] = torch.manual_seed(_a ) UpperCAmelCase_ : List[str] = torch.randn([1, 2, config.sample_size] , generator=_a ).to(_a ) UpperCAmelCase_ : List[Any] = torch.linspace(1 , 0 , steps + 1 , device=_a )[:-1] UpperCAmelCase_ : str = get_crash_schedule(_a ) UpperCAmelCase_ : int = DanceDiffusionPipeline(unet=_a , scheduler=_a ) UpperCAmelCase_ : str = torch.manual_seed(33 ) UpperCAmelCase_ : str = pipe(num_inference_steps=_a , generator=_a ).audios UpperCAmelCase_ : Optional[int] = sampling.iplms_sample(_a , _a , _a , {} ) UpperCAmelCase_ : int = generated.clamp(-1 , 1 ) UpperCAmelCase_ : Union[str, Any] = (generated - audio).abs().sum() UpperCAmelCase_ : int = (generated - audio).abs().max() if args.save: pipe.save_pretrained(args.checkpoint_path ) print("""Diff sum""" , _a ) print("""Diff max""" , _a ) assert diff_max < 1E-3, F'''Diff max: {diff_max} is too much :-/''' print(F'''Conversion for {model_name} successful!''' ) if __name__ == "__main__": UpperCamelCase_ = argparse.ArgumentParser() parser.add_argument('''--model_path''', default=None, type=str, required=True, help='''Path to the model to convert.''') parser.add_argument( '''--save''', default=True, type=bool, required=False, help='''Whether to save the converted model or not.''' ) parser.add_argument('''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the output model.''') UpperCamelCase_ = parser.parse_args() main(args)
345
import json import os import unittest from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer from ...test_tokenization_common import TokenizerTesterMixin class _snake_case ( __snake_case , unittest.TestCase ): '''simple docstring''' A__ : Optional[Any] = CTRLTokenizer A__ : Optional[Any] = False A__ : str = False def A__ ( self: Optional[int] ) -> List[Any]: super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt UpperCAmelCase_ : Dict = ["""adapt""", """re@@""", """a@@""", """apt""", """c@@""", """t""", """<unk>"""] UpperCAmelCase_ : Union[str, Any] = dict(zip(lowerCamelCase_ ,range(len(lowerCamelCase_ ) ) ) ) UpperCAmelCase_ : List[Any] = ["""#version: 0.2""", """a p""", """ap t</w>""", """r e""", """a d""", """ad apt</w>""", """"""] UpperCAmelCase_ : Optional[Any] = {"""unk_token""": """<unk>"""} UpperCAmelCase_ : Union[str, Any] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""vocab_file"""] ) UpperCAmelCase_ : Optional[Any] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""merges_file"""] ) with open(self.vocab_file ,"""w""" ,encoding="""utf-8""" ) as fp: fp.write(json.dumps(lowerCamelCase_ ) + """\n""" ) with open(self.merges_file ,"""w""" ,encoding="""utf-8""" ) as fp: fp.write("""\n""".join(lowerCamelCase_ ) ) def A__ ( self: Optional[int] ,**lowerCamelCase_: Any ) -> str: kwargs.update(self.special_tokens_map ) return CTRLTokenizer.from_pretrained(self.tmpdirname ,**lowerCamelCase_ ) def A__ ( self: int ,lowerCamelCase_: int ) -> str: UpperCAmelCase_ : List[str] = """adapt react readapt apt""" UpperCAmelCase_ : List[Any] = """adapt react readapt apt""" return input_text, output_text def A__ ( self: Union[str, Any] ) -> Optional[int]: UpperCAmelCase_ : Union[str, Any] = CTRLTokenizer(self.vocab_file ,self.merges_file ,**self.special_tokens_map ) UpperCAmelCase_ : List[Any] = """adapt react readapt apt""" UpperCAmelCase_ : Optional[int] = """adapt re@@ a@@ c@@ t re@@ adapt apt""".split() UpperCAmelCase_ : Tuple = tokenizer.tokenize(lowerCamelCase_ ) self.assertListEqual(lowerCamelCase_ ,lowerCamelCase_ ) UpperCAmelCase_ : Union[str, Any] = tokens + [tokenizer.unk_token] UpperCAmelCase_ : List[str] = [0, 1, 2, 4, 5, 1, 0, 3, 6] self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase_ ) ,lowerCamelCase_ )
345
1
def lowerCamelCase_ ( _a : list , _a : int , _a : int = 0 , _a : int = 0 ): '''simple docstring''' UpperCAmelCase_ : str = right or len(_a ) - 1 if left > right: return -1 elif list_data[left] == key: return left elif list_data[right] == key: return right else: return search(_a , _a , left + 1 , right - 1 ) if __name__ == "__main__": import doctest doctest.testmod()
345
from __future__ import annotations from typing import Dict from ...configuration_utils import PretrainedConfig UpperCamelCase_ = { '''susnato/ernie-m-base_pytorch''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/config.json''', '''susnato/ernie-m-large_pytorch''': '''https://huggingface.co/susnato/ernie-m-large_pytorch/blob/main/config.json''', } class _snake_case ( __snake_case ): '''simple docstring''' A__ : Union[str, Any] = "ernie_m" A__ : Dict[str, str] = {"dropout": "classifier_dropout", "num_classes": "num_labels"} def __init__( self: str ,lowerCamelCase_: int = 250002 ,lowerCamelCase_: int = 768 ,lowerCamelCase_: int = 12 ,lowerCamelCase_: int = 12 ,lowerCamelCase_: int = 3072 ,lowerCamelCase_: str = "gelu" ,lowerCamelCase_: float = 0.1 ,lowerCamelCase_: float = 0.1 ,lowerCamelCase_: int = 514 ,lowerCamelCase_: float = 0.0_2 ,lowerCamelCase_: int = 1 ,lowerCamelCase_: float = 1e-05 ,lowerCamelCase_: Any=None ,lowerCamelCase_: List[Any]=False ,lowerCamelCase_: Tuple=0.0 ,**lowerCamelCase_: Optional[int] ,) -> Optional[Any]: super().__init__(pad_token_id=lowerCamelCase_ ,**lowerCamelCase_ ) UpperCAmelCase_ : Optional[Any] = vocab_size UpperCAmelCase_ : Any = hidden_size UpperCAmelCase_ : Optional[Any] = num_hidden_layers UpperCAmelCase_ : Union[str, Any] = num_attention_heads UpperCAmelCase_ : List[Any] = intermediate_size UpperCAmelCase_ : List[Any] = hidden_act UpperCAmelCase_ : Any = hidden_dropout_prob UpperCAmelCase_ : List[Any] = attention_probs_dropout_prob UpperCAmelCase_ : str = max_position_embeddings UpperCAmelCase_ : Union[str, Any] = initializer_range UpperCAmelCase_ : Union[str, Any] = layer_norm_eps UpperCAmelCase_ : List[Any] = classifier_dropout UpperCAmelCase_ : str = is_decoder UpperCAmelCase_ : List[str] = act_dropout
345
1
def lowerCamelCase_ ( _a : int , _a : int ): '''simple docstring''' return numa ^ numa < 0 if __name__ == "__main__": import doctest doctest.testmod()
345
import logging import os from dataclasses import dataclass, field from functools import partial from pathlib import Path from tempfile import TemporaryDirectory from typing import List, Optional import faiss import torch from datasets import Features, Sequence, Value, load_dataset from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser UpperCamelCase_ = logging.getLogger(__name__) torch.set_grad_enabled(False) UpperCamelCase_ = '''cuda''' if torch.cuda.is_available() else '''cpu''' def lowerCamelCase_ ( _a : str , _a : Any=100 , _a : int=" " ): '''simple docstring''' UpperCAmelCase_ : Optional[int] = text.split(_a ) return [character.join(text[i : i + n] ).strip() for i in range(0 , len(_a ) , _a )] def lowerCamelCase_ ( _a : dict ): '''simple docstring''' UpperCAmelCase_ , UpperCAmelCase_ : Dict = [], [] for title, text in zip(documents["""title"""] , documents["""text"""] ): if text is not None: for passage in split_text(_a ): titles.append(title if title is not None else """""" ) texts.append(_a ) return {"title": titles, "text": texts} def lowerCamelCase_ ( _a : dict , _a : DPRContextEncoder , _a : DPRContextEncoderTokenizerFast ): '''simple docstring''' UpperCAmelCase_ : List[str] = ctx_tokenizer( documents["""title"""] , documents["""text"""] , truncation=_a , padding="""longest""" , return_tensors="""pt""" )["""input_ids"""] UpperCAmelCase_ : Tuple = ctx_encoder(input_ids.to(device=_a ) , return_dict=_a ).pooler_output return {"embeddings": embeddings.detach().cpu().numpy()} def lowerCamelCase_ ( _a : "RagExampleArguments" , _a : "ProcessingArguments" , _a : "IndexHnswArguments" , ): '''simple docstring''' logger.info("""Step 1 - Create the dataset""" ) ###################################### # The dataset needed for RAG must have three columns: # - title (string): title of the document # - text (string): text of a passage of the document # - embeddings (array of dimension d): DPR representation of the passage # Let's say you have documents in tab-separated csv files with columns "title" and "text" assert os.path.isfile(rag_example_args.csv_path ), "Please provide a valid path to a csv file" # You can load a Dataset object this way UpperCAmelCase_ : Optional[int] = load_dataset( """csv""" , data_files=[rag_example_args.csv_path] , split="""train""" , delimiter="""\t""" , column_names=["""title""", """text"""] ) # More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files # Then split the documents into passages of 100 words UpperCAmelCase_ : Tuple = dataset.map(_a , batched=_a , num_proc=processing_args.num_proc ) # And compute the embeddings UpperCAmelCase_ : List[str] = DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ).to(device=_a ) UpperCAmelCase_ : Dict = DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ) UpperCAmelCase_ : Any = Features( {"""text""": Value("""string""" ), """title""": Value("""string""" ), """embeddings""": Sequence(Value("""float32""" ) )} ) # optional, save as float32 instead of float64 to save space UpperCAmelCase_ : List[str] = dataset.map( partial(_a , ctx_encoder=_a , ctx_tokenizer=_a ) , batched=_a , batch_size=processing_args.batch_size , features=_a , ) # And finally save your dataset UpperCAmelCase_ : Union[str, Any] = os.path.join(rag_example_args.output_dir , """my_knowledge_dataset""" ) dataset.save_to_disk(_a ) # from datasets import load_from_disk # dataset = load_from_disk(passages_path) # to reload the dataset ###################################### logger.info("""Step 2 - Index the dataset""" ) ###################################### # Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search UpperCAmelCase_ : Union[str, Any] = faiss.IndexHNSWFlat(index_hnsw_args.d , index_hnsw_args.m , faiss.METRIC_INNER_PRODUCT ) dataset.add_faiss_index("""embeddings""" , custom_index=_a ) # And save the index UpperCAmelCase_ : Optional[Any] = os.path.join(rag_example_args.output_dir , """my_knowledge_dataset_hnsw_index.faiss""" ) dataset.get_index("""embeddings""" ).save(_a ) # dataset.load_faiss_index("embeddings", index_path) # to reload the index @dataclass class _snake_case : '''simple docstring''' A__ : str = field( default=str(Path(__snake_case ).parent / "test_run" / "dummy-kb" / "my_knowledge_dataset.csv" ) , metadata={"help": "Path to a tab-separated csv file with columns 'title' and 'text'"} , ) A__ : Optional[str] = field( default=__snake_case , metadata={"help": "Question that is passed as input to RAG. Default is 'What does Moses' rod turn into ?'."} , ) A__ : str = field( default="facebook/rag-sequence-nq" , metadata={"help": "The RAG model to use. Either 'facebook/rag-sequence-nq' or 'facebook/rag-token-nq'"} , ) A__ : str = field( default="facebook/dpr-ctx_encoder-multiset-base" , metadata={ "help": ( "The DPR context encoder model to use. Either 'facebook/dpr-ctx_encoder-single-nq-base' or" " 'facebook/dpr-ctx_encoder-multiset-base'" ) } , ) A__ : Optional[str] = field( default=str(Path(__snake_case ).parent / "test_run" / "dummy-kb" ) , metadata={"help": "Path to a directory where the dataset passages and the index will be saved"} , ) @dataclass class _snake_case : '''simple docstring''' A__ : Optional[int] = field( default=__snake_case , metadata={ "help": "The number of processes to use to split the documents into passages. Default is single process." } , ) A__ : int = field( default=16 , metadata={ "help": "The batch size to use when computing the passages embeddings using the DPR context encoder." } , ) @dataclass class _snake_case : '''simple docstring''' A__ : int = field( default=768 , metadata={"help": "The dimension of the embeddings to pass to the HNSW Faiss index."} , ) A__ : int = field( default=128 , metadata={ "help": ( "The number of bi-directional links created for every new element during the HNSW index construction." ) } , ) if __name__ == "__main__": logging.basicConfig(level=logging.WARNING) logger.setLevel(logging.INFO) UpperCamelCase_ = HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments)) UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ = parser.parse_args_into_dataclasses() with TemporaryDirectory() as tmp_dir: UpperCamelCase_ = rag_example_args.output_dir or tmp_dir main(rag_example_args, processing_args, index_hnsw_args)
345
1
import copy from typing import Dict, List, Optional from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto import CONFIG_MAPPING UpperCamelCase_ = { '''facebook/mask2former-swin-small-coco-instance''': ( '''https://huggingface.co/facebook/mask2former-swin-small-coco-instance/blob/main/config.json''' ) # See all Mask2Former models at https://huggingface.co/models?filter=mask2former } UpperCamelCase_ = logging.get_logger(__name__) class _snake_case ( __snake_case ): '''simple docstring''' A__ : List[str] = "mask2former" A__ : Union[str, Any] = ["swin"] A__ : int = {"hidden_size": "hidden_dim"} def __init__( self: Any ,lowerCamelCase_: Optional[Dict] = None ,lowerCamelCase_: int = 256 ,lowerCamelCase_: int = 256 ,lowerCamelCase_: int = 256 ,lowerCamelCase_: int = 1024 ,lowerCamelCase_: str = "relu" ,lowerCamelCase_: int = 6 ,lowerCamelCase_: int = 10 ,lowerCamelCase_: int = 8 ,lowerCamelCase_: float = 0.0 ,lowerCamelCase_: int = 2048 ,lowerCamelCase_: bool = False ,lowerCamelCase_: bool = False ,lowerCamelCase_: int = 4 ,lowerCamelCase_: int = 255 ,lowerCamelCase_: int = 100 ,lowerCamelCase_: float = 0.1 ,lowerCamelCase_: float = 2.0 ,lowerCamelCase_: float = 5.0 ,lowerCamelCase_: float = 5.0 ,lowerCamelCase_: int = 12544 ,lowerCamelCase_: float = 3.0 ,lowerCamelCase_: float = 0.7_5 ,lowerCamelCase_: float = 0.0_2 ,lowerCamelCase_: float = 1.0 ,lowerCamelCase_: bool = True ,lowerCamelCase_: List[int] = [4, 8, 16, 32] ,lowerCamelCase_: bool = None ,**lowerCamelCase_: str ,) -> List[Any]: if backbone_config is None: logger.info("""`backbone_config` is `None`. Initializing the config with the default `Swin` backbone.""" ) UpperCAmelCase_ : int = CONFIG_MAPPING["""swin"""]( image_size=224 ,in_channels=3 ,patch_size=4 ,embed_dim=96 ,depths=[2, 2, 18, 2] ,num_heads=[3, 6, 12, 24] ,window_size=7 ,drop_path_rate=0.3 ,use_absolute_embeddings=lowerCamelCase_ ,out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] ,) if isinstance(lowerCamelCase_ ,lowerCamelCase_ ): UpperCAmelCase_ : Dict = backbone_config.pop("""model_type""" ) UpperCAmelCase_ : Dict = CONFIG_MAPPING[backbone_model_type] UpperCAmelCase_ : Dict = config_class.from_dict(lowerCamelCase_ ) # verify that the backbone is supported if backbone_config.model_type not in self.backbones_supported: logger.warning_once( F'''Backbone {backbone_config.model_type} is not a supported model and may not be compatible with Mask2Former. ''' F'''Supported model types: {','.join(self.backbones_supported )}''' ) UpperCAmelCase_ : Optional[int] = backbone_config UpperCAmelCase_ : List[Any] = feature_size UpperCAmelCase_ : Union[str, Any] = mask_feature_size UpperCAmelCase_ : Dict = hidden_dim UpperCAmelCase_ : Union[str, Any] = encoder_feedforward_dim UpperCAmelCase_ : int = activation_function UpperCAmelCase_ : str = encoder_layers UpperCAmelCase_ : List[str] = decoder_layers UpperCAmelCase_ : Optional[Any] = num_attention_heads UpperCAmelCase_ : Tuple = dropout UpperCAmelCase_ : Optional[Any] = dim_feedforward UpperCAmelCase_ : Optional[Any] = pre_norm UpperCAmelCase_ : Optional[Any] = enforce_input_projection UpperCAmelCase_ : Optional[Any] = common_stride UpperCAmelCase_ : Any = ignore_value UpperCAmelCase_ : Union[str, Any] = num_queries UpperCAmelCase_ : str = no_object_weight UpperCAmelCase_ : int = class_weight UpperCAmelCase_ : Union[str, Any] = mask_weight UpperCAmelCase_ : Optional[Any] = dice_weight UpperCAmelCase_ : str = train_num_points UpperCAmelCase_ : Any = oversample_ratio UpperCAmelCase_ : str = importance_sample_ratio UpperCAmelCase_ : Optional[Any] = init_std UpperCAmelCase_ : Dict = init_xavier_std UpperCAmelCase_ : Any = use_auxiliary_loss UpperCAmelCase_ : Union[str, Any] = feature_strides UpperCAmelCase_ : List[str] = output_auxiliary_logits UpperCAmelCase_ : List[str] = decoder_layers super().__init__(**lowerCamelCase_ ) @classmethod def A__ ( cls: Dict ,lowerCamelCase_: PretrainedConfig ,**lowerCamelCase_: Optional[Any] ) -> Tuple: return cls( backbone_config=lowerCamelCase_ ,**lowerCamelCase_ ,) def A__ ( self: Union[str, Any] ) -> Dict[str, any]: UpperCAmelCase_ : Tuple = copy.deepcopy(self.__dict__ ) UpperCAmelCase_ : Dict = self.backbone_config.to_dict() UpperCAmelCase_ : Any = self.__class__.model_type return output
345
import gc import unittest import torch from parameterized import parameterized from diffusers import AutoencoderKL from diffusers.utils import floats_tensor, load_hf_numpy, require_torch_gpu, slow, torch_all_close, torch_device from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import enable_full_determinism from .test_modeling_common import ModelTesterMixin, UNetTesterMixin enable_full_determinism() class _snake_case ( __snake_case , __snake_case , unittest.TestCase ): '''simple docstring''' A__ : Dict = AutoencoderKL A__ : Optional[int] = "sample" A__ : Tuple = 1E-2 @property def A__ ( self: List[Any] ) -> Union[str, Any]: UpperCAmelCase_ : Tuple = 4 UpperCAmelCase_ : str = 3 UpperCAmelCase_ : Any = (32, 32) UpperCAmelCase_ : Optional[int] = floats_tensor((batch_size, num_channels) + sizes ).to(lowerCamelCase_ ) return {"sample": image} @property def A__ ( self: List[str] ) -> Tuple: return (3, 32, 32) @property def A__ ( self: Optional[Any] ) -> Any: return (3, 32, 32) def A__ ( self: Any ) -> Tuple: UpperCAmelCase_ : List[Any] = { """block_out_channels""": [32, 64], """in_channels""": 3, """out_channels""": 3, """down_block_types""": ["""DownEncoderBlock2D""", """DownEncoderBlock2D"""], """up_block_types""": ["""UpDecoderBlock2D""", """UpDecoderBlock2D"""], """latent_channels""": 4, } UpperCAmelCase_ : int = self.dummy_input return init_dict, inputs_dict def A__ ( self: Optional[Any] ) -> int: pass def A__ ( self: str ) -> Any: pass @unittest.skipIf(torch_device == """mps""" ,"""Gradient checkpointing skipped on MPS""" ) def A__ ( self: Union[str, Any] ) -> Dict: # enable deterministic behavior for gradient checkpointing UpperCAmelCase_ , UpperCAmelCase_ : List[str] = self.prepare_init_args_and_inputs_for_common() UpperCAmelCase_ : List[Any] = self.model_class(**lowerCamelCase_ ) model.to(lowerCamelCase_ ) assert not model.is_gradient_checkpointing and model.training UpperCAmelCase_ : Optional[Any] = model(**lowerCamelCase_ ).sample # run the backwards pass on the model. For backwards pass, for simplicity purpose, # we won't calculate the loss and rather backprop on out.sum() model.zero_grad() UpperCAmelCase_ : Any = torch.randn_like(lowerCamelCase_ ) UpperCAmelCase_ : Optional[int] = (out - labels).mean() loss.backward() # re-instantiate the model now enabling gradient checkpointing UpperCAmelCase_ : str = self.model_class(**lowerCamelCase_ ) # clone model model_a.load_state_dict(model.state_dict() ) model_a.to(lowerCamelCase_ ) model_a.enable_gradient_checkpointing() assert model_a.is_gradient_checkpointing and model_a.training UpperCAmelCase_ : Optional[int] = model_a(**lowerCamelCase_ ).sample # run the backwards pass on the model. For backwards pass, for simplicity purpose, # we won't calculate the loss and rather backprop on out.sum() model_a.zero_grad() UpperCAmelCase_ : Dict = (out_a - labels).mean() loss_a.backward() # compare the output and parameters gradients self.assertTrue((loss - loss_a).abs() < 1e-5 ) UpperCAmelCase_ : Dict = dict(model.named_parameters() ) UpperCAmelCase_ : Union[str, Any] = dict(model_a.named_parameters() ) for name, param in named_params.items(): self.assertTrue(torch_all_close(param.grad.data ,named_params_a[name].grad.data ,atol=5e-5 ) ) def A__ ( self: Optional[Any] ) -> str: UpperCAmelCase_ , UpperCAmelCase_ : int = AutoencoderKL.from_pretrained("""fusing/autoencoder-kl-dummy""" ,output_loading_info=lowerCamelCase_ ) self.assertIsNotNone(lowerCamelCase_ ) self.assertEqual(len(loading_info["""missing_keys"""] ) ,0 ) model.to(lowerCamelCase_ ) UpperCAmelCase_ : Dict = model(**self.dummy_input ) assert image is not None, "Make sure output is not None" def A__ ( self: Optional[int] ) -> int: UpperCAmelCase_ : Dict = AutoencoderKL.from_pretrained("""fusing/autoencoder-kl-dummy""" ) UpperCAmelCase_ : Tuple = model.to(lowerCamelCase_ ) model.eval() if torch_device == "mps": UpperCAmelCase_ : Tuple = torch.manual_seed(0 ) else: UpperCAmelCase_ : Optional[int] = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 ) UpperCAmelCase_ : str = torch.randn( 1 ,model.config.in_channels ,model.config.sample_size ,model.config.sample_size ,generator=torch.manual_seed(0 ) ,) UpperCAmelCase_ : int = image.to(lowerCamelCase_ ) with torch.no_grad(): UpperCAmelCase_ : Dict = model(lowerCamelCase_ ,sample_posterior=lowerCamelCase_ ,generator=lowerCamelCase_ ).sample UpperCAmelCase_ : Optional[int] = output[0, -1, -3:, -3:].flatten().cpu() # Since the VAE Gaussian prior's generator is seeded on the appropriate device, # the expected output slices are not the same for CPU and GPU. if torch_device == "mps": UpperCAmelCase_ : Tuple = torch.tensor( [ -4.0078e-01, -3.8323e-04, -1.2681e-01, -1.1462e-01, 2.0095e-01, 1.0893e-01, -8.8247e-02, -3.0361e-01, -9.8644e-03, ] ) elif torch_device == "cpu": UpperCAmelCase_ : List[str] = torch.tensor( [-0.1_3_5_2, 0.0_8_7_8, 0.0_4_1_9, -0.0_8_1_8, -0.1_0_6_9, 0.0_6_8_8, -0.1_4_5_8, -0.4_4_4_6, -0.0_0_2_6] ) else: UpperCAmelCase_ : List[str] = torch.tensor( [-0.2_4_2_1, 0.4_6_4_2, 0.2_5_0_7, -0.0_4_3_8, 0.0_6_8_2, 0.3_1_6_0, -0.2_0_1_8, -0.0_7_2_7, 0.2_4_8_5] ) self.assertTrue(torch_all_close(lowerCamelCase_ ,lowerCamelCase_ ,rtol=1e-2 ) ) @slow class _snake_case ( unittest.TestCase ): '''simple docstring''' def A__ ( self: Any ,lowerCamelCase_: List[Any] ,lowerCamelCase_: Any ) -> Optional[Any]: return F'''gaussian_noise_s={seed}_shape={'_'.join([str(lowerCamelCase_ ) for s in shape] )}.npy''' def A__ ( self: Union[str, Any] ) -> Optional[int]: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def A__ ( self: List[str] ,lowerCamelCase_: Optional[int]=0 ,lowerCamelCase_: List[Any]=(4, 3, 512, 512) ,lowerCamelCase_: Optional[Any]=False ) -> Optional[int]: UpperCAmelCase_ : Tuple = torch.floataa if fpaa else torch.floataa UpperCAmelCase_ : Tuple = torch.from_numpy(load_hf_numpy(self.get_file_format(lowerCamelCase_ ,lowerCamelCase_ ) ) ).to(lowerCamelCase_ ).to(lowerCamelCase_ ) return image def A__ ( self: List[Any] ,lowerCamelCase_: List[str]="CompVis/stable-diffusion-v1-4" ,lowerCamelCase_: Union[str, Any]=False ) -> Any: UpperCAmelCase_ : Optional[Any] = """fp16""" if fpaa else None UpperCAmelCase_ : str = torch.floataa if fpaa else torch.floataa UpperCAmelCase_ : int = AutoencoderKL.from_pretrained( lowerCamelCase_ ,subfolder="""vae""" ,torch_dtype=lowerCamelCase_ ,revision=lowerCamelCase_ ,) model.to(lowerCamelCase_ ).eval() return model def A__ ( self: Dict ,lowerCamelCase_: Union[str, Any]=0 ) -> Optional[int]: if torch_device == "mps": return torch.manual_seed(lowerCamelCase_ ) return torch.Generator(device=lowerCamelCase_ ).manual_seed(lowerCamelCase_ ) @parameterized.expand( [ # fmt: off [33, [-0.1_6_0_3, 0.9_8_7_8, -0.0_4_9_5, -0.0_7_9_0, -0.2_7_0_9, 0.8_3_7_5, -0.2_0_6_0, -0.0_8_2_4], [-0.2_3_9_5, 0.0_0_9_8, 0.0_1_0_2, -0.0_7_0_9, -0.2_8_4_0, -0.0_2_7_4, -0.0_7_1_8, -0.1_8_2_4]], [47, [-0.2_3_7_6, 0.1_1_6_8, 0.1_3_3_2, -0.4_8_4_0, -0.2_5_0_8, -0.0_7_9_1, -0.0_4_9_3, -0.4_0_8_9], [0.0_3_5_0, 0.0_8_4_7, 0.0_4_6_7, 0.0_3_4_4, -0.0_8_4_2, -0.0_5_4_7, -0.0_6_3_3, -0.1_1_3_1]], # fmt: on ] ) def A__ ( self: List[Any] ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: str ,lowerCamelCase_: Dict ) -> Tuple: UpperCAmelCase_ : List[Any] = self.get_sd_vae_model() UpperCAmelCase_ : int = self.get_sd_image(lowerCamelCase_ ) UpperCAmelCase_ : Optional[int] = self.get_generator(lowerCamelCase_ ) with torch.no_grad(): UpperCAmelCase_ : Union[str, Any] = model(lowerCamelCase_ ,generator=lowerCamelCase_ ,sample_posterior=lowerCamelCase_ ).sample assert sample.shape == image.shape UpperCAmelCase_ : Optional[Any] = sample[-1, -2:, -2:, :2].flatten().float().cpu() UpperCAmelCase_ : Tuple = torch.tensor(expected_slice_mps if torch_device == """mps""" else expected_slice ) assert torch_all_close(lowerCamelCase_ ,lowerCamelCase_ ,atol=3e-3 ) @parameterized.expand( [ # fmt: off [33, [-0.0_5_1_3, 0.0_2_8_9, 1.3_7_9_9, 0.2_1_6_6, -0.2_5_7_3, -0.0_8_7_1, 0.5_1_0_3, -0.0_9_9_9]], [47, [-0.4_1_2_8, -0.1_3_2_0, -0.3_7_0_4, 0.1_9_6_5, -0.4_1_1_6, -0.2_3_3_2, -0.3_3_4_0, 0.2_2_4_7]], # fmt: on ] ) @require_torch_gpu def A__ ( self: Union[str, Any] ,lowerCamelCase_: Any ,lowerCamelCase_: List[str] ) -> Tuple: UpperCAmelCase_ : List[str] = self.get_sd_vae_model(fpaa=lowerCamelCase_ ) UpperCAmelCase_ : Any = self.get_sd_image(lowerCamelCase_ ,fpaa=lowerCamelCase_ ) UpperCAmelCase_ : Union[str, Any] = self.get_generator(lowerCamelCase_ ) with torch.no_grad(): UpperCAmelCase_ : Union[str, Any] = model(lowerCamelCase_ ,generator=lowerCamelCase_ ,sample_posterior=lowerCamelCase_ ).sample assert sample.shape == image.shape UpperCAmelCase_ : Tuple = sample[-1, -2:, :2, -2:].flatten().float().cpu() UpperCAmelCase_ : Optional[int] = torch.tensor(lowerCamelCase_ ) assert torch_all_close(lowerCamelCase_ ,lowerCamelCase_ ,atol=1e-2 ) @parameterized.expand( [ # fmt: off [33, [-0.1_6_0_9, 0.9_8_6_6, -0.0_4_8_7, -0.0_7_7_7, -0.2_7_1_6, 0.8_3_6_8, -0.2_0_5_5, -0.0_8_1_4], [-0.2_3_9_5, 0.0_0_9_8, 0.0_1_0_2, -0.0_7_0_9, -0.2_8_4_0, -0.0_2_7_4, -0.0_7_1_8, -0.1_8_2_4]], [47, [-0.2_3_7_7, 0.1_1_4_7, 0.1_3_3_3, -0.4_8_4_1, -0.2_5_0_6, -0.0_8_0_5, -0.0_4_9_1, -0.4_0_8_5], [0.0_3_5_0, 0.0_8_4_7, 0.0_4_6_7, 0.0_3_4_4, -0.0_8_4_2, -0.0_5_4_7, -0.0_6_3_3, -0.1_1_3_1]], # fmt: on ] ) def A__ ( self: Tuple ,lowerCamelCase_: List[Any] ,lowerCamelCase_: Optional[int] ,lowerCamelCase_: List[str] ) -> Dict: UpperCAmelCase_ : Optional[int] = self.get_sd_vae_model() UpperCAmelCase_ : Dict = self.get_sd_image(lowerCamelCase_ ) with torch.no_grad(): UpperCAmelCase_ : str = model(lowerCamelCase_ ).sample assert sample.shape == image.shape UpperCAmelCase_ : List[Any] = sample[-1, -2:, -2:, :2].flatten().float().cpu() UpperCAmelCase_ : Any = torch.tensor(expected_slice_mps if torch_device == """mps""" else expected_slice ) assert torch_all_close(lowerCamelCase_ ,lowerCamelCase_ ,atol=3e-3 ) @parameterized.expand( [ # fmt: off [13, [-0.2_0_5_1, -0.1_8_0_3, -0.2_3_1_1, -0.2_1_1_4, -0.3_2_9_2, -0.3_5_7_4, -0.2_9_5_3, -0.3_3_2_3]], [37, [-0.2_6_3_2, -0.2_6_2_5, -0.2_1_9_9, -0.2_7_4_1, -0.4_5_3_9, -0.4_9_9_0, -0.3_7_2_0, -0.4_9_2_5]], # fmt: on ] ) @require_torch_gpu def A__ ( self: Optional[Any] ,lowerCamelCase_: Tuple ,lowerCamelCase_: str ) -> Optional[Any]: UpperCAmelCase_ : List[str] = self.get_sd_vae_model() UpperCAmelCase_ : Optional[int] = self.get_sd_image(lowerCamelCase_ ,shape=(3, 4, 64, 64) ) with torch.no_grad(): UpperCAmelCase_ : str = model.decode(lowerCamelCase_ ).sample assert list(sample.shape ) == [3, 3, 512, 512] UpperCAmelCase_ : Any = sample[-1, -2:, :2, -2:].flatten().cpu() UpperCAmelCase_ : Union[str, Any] = torch.tensor(lowerCamelCase_ ) assert torch_all_close(lowerCamelCase_ ,lowerCamelCase_ ,atol=1e-3 ) @parameterized.expand( [ # fmt: off [27, [-0.0_3_6_9, 0.0_2_0_7, -0.0_7_7_6, -0.0_6_8_2, -0.1_7_4_7, -0.1_9_3_0, -0.1_4_6_5, -0.2_0_3_9]], [16, [-0.1_6_2_8, -0.2_1_3_4, -0.2_7_4_7, -0.2_6_4_2, -0.3_7_7_4, -0.4_4_0_4, -0.3_6_8_7, -0.4_2_7_7]], # fmt: on ] ) @require_torch_gpu def A__ ( self: str ,lowerCamelCase_: List[Any] ,lowerCamelCase_: Any ) -> Optional[Any]: UpperCAmelCase_ : Dict = self.get_sd_vae_model(fpaa=lowerCamelCase_ ) UpperCAmelCase_ : List[Any] = self.get_sd_image(lowerCamelCase_ ,shape=(3, 4, 64, 64) ,fpaa=lowerCamelCase_ ) with torch.no_grad(): UpperCAmelCase_ : List[str] = model.decode(lowerCamelCase_ ).sample assert list(sample.shape ) == [3, 3, 512, 512] UpperCAmelCase_ : str = sample[-1, -2:, :2, -2:].flatten().float().cpu() UpperCAmelCase_ : str = torch.tensor(lowerCamelCase_ ) assert torch_all_close(lowerCamelCase_ ,lowerCamelCase_ ,atol=5e-3 ) @parameterized.expand([(13,), (16,), (27,)] ) @require_torch_gpu @unittest.skipIf(not is_xformers_available() ,reason="""xformers is not required when using PyTorch 2.0.""" ) def A__ ( self: List[Any] ,lowerCamelCase_: Union[str, Any] ) -> int: UpperCAmelCase_ : Optional[Any] = self.get_sd_vae_model(fpaa=lowerCamelCase_ ) UpperCAmelCase_ : List[str] = self.get_sd_image(lowerCamelCase_ ,shape=(3, 4, 64, 64) ,fpaa=lowerCamelCase_ ) with torch.no_grad(): UpperCAmelCase_ : Optional[Any] = model.decode(lowerCamelCase_ ).sample model.enable_xformers_memory_efficient_attention() with torch.no_grad(): UpperCAmelCase_ : List[str] = model.decode(lowerCamelCase_ ).sample assert list(sample.shape ) == [3, 3, 512, 512] assert torch_all_close(lowerCamelCase_ ,lowerCamelCase_ ,atol=1e-1 ) @parameterized.expand([(13,), (16,), (37,)] ) @require_torch_gpu @unittest.skipIf(not is_xformers_available() ,reason="""xformers is not required when using PyTorch 2.0.""" ) def A__ ( self: Optional[Any] ,lowerCamelCase_: Dict ) -> Union[str, Any]: UpperCAmelCase_ : Tuple = self.get_sd_vae_model() UpperCAmelCase_ : Any = self.get_sd_image(lowerCamelCase_ ,shape=(3, 4, 64, 64) ) with torch.no_grad(): UpperCAmelCase_ : Union[str, Any] = model.decode(lowerCamelCase_ ).sample model.enable_xformers_memory_efficient_attention() with torch.no_grad(): UpperCAmelCase_ : Optional[Any] = model.decode(lowerCamelCase_ ).sample assert list(sample.shape ) == [3, 3, 512, 512] assert torch_all_close(lowerCamelCase_ ,lowerCamelCase_ ,atol=1e-2 ) @parameterized.expand( [ # fmt: off [33, [-0.3_0_0_1, 0.0_9_1_8, -2.6_9_8_4, -3.9_7_2_0, -3.2_0_9_9, -5.0_3_5_3, 1.7_3_3_8, -0.2_0_6_5, 3.4_2_6_7]], [47, [-1.5_0_3_0, -4.3_8_7_1, -6.0_3_5_5, -9.1_1_5_7, -1.6_6_6_1, -2.7_8_5_3, 2.1_6_0_7, -5.0_8_2_3, 2.5_6_3_3]], # fmt: on ] ) def A__ ( self: Union[str, Any] ,lowerCamelCase_: Any ,lowerCamelCase_: Union[str, Any] ) -> Union[str, Any]: UpperCAmelCase_ : Dict = self.get_sd_vae_model() UpperCAmelCase_ : Optional[Any] = self.get_sd_image(lowerCamelCase_ ) UpperCAmelCase_ : str = self.get_generator(lowerCamelCase_ ) with torch.no_grad(): UpperCAmelCase_ : int = model.encode(lowerCamelCase_ ).latent_dist UpperCAmelCase_ : Optional[Any] = dist.sample(generator=lowerCamelCase_ ) assert list(sample.shape ) == [image.shape[0], 4] + [i // 8 for i in image.shape[2:]] UpperCAmelCase_ : Tuple = sample[0, -1, -3:, -3:].flatten().cpu() UpperCAmelCase_ : Optional[Any] = torch.tensor(lowerCamelCase_ ) UpperCAmelCase_ : List[Any] = 3e-3 if torch_device != """mps""" else 1e-2 assert torch_all_close(lowerCamelCase_ ,lowerCamelCase_ ,atol=lowerCamelCase_ )
345
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) UpperCamelCase_ = { '''configuration_distilbert''': [ '''DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''DistilBertConfig''', '''DistilBertOnnxConfig''', ], '''tokenization_distilbert''': ['''DistilBertTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase_ = ['''DistilBertTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase_ = [ '''DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''DistilBertForMaskedLM''', '''DistilBertForMultipleChoice''', '''DistilBertForQuestionAnswering''', '''DistilBertForSequenceClassification''', '''DistilBertForTokenClassification''', '''DistilBertModel''', '''DistilBertPreTrainedModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase_ = [ '''TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFDistilBertForMaskedLM''', '''TFDistilBertForMultipleChoice''', '''TFDistilBertForQuestionAnswering''', '''TFDistilBertForSequenceClassification''', '''TFDistilBertForTokenClassification''', '''TFDistilBertMainLayer''', '''TFDistilBertModel''', '''TFDistilBertPreTrainedModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase_ = [ '''FlaxDistilBertForMaskedLM''', '''FlaxDistilBertForMultipleChoice''', '''FlaxDistilBertForQuestionAnswering''', '''FlaxDistilBertForSequenceClassification''', '''FlaxDistilBertForTokenClassification''', '''FlaxDistilBertModel''', '''FlaxDistilBertPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_distilbert import ( DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, DistilBertConfig, DistilBertOnnxConfig, ) from .tokenization_distilbert import DistilBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_distilbert_fast import DistilBertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_distilbert import ( DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST, DistilBertForMaskedLM, DistilBertForMultipleChoice, DistilBertForQuestionAnswering, DistilBertForSequenceClassification, DistilBertForTokenClassification, DistilBertModel, DistilBertPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_distilbert import ( TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFDistilBertForMaskedLM, TFDistilBertForMultipleChoice, TFDistilBertForQuestionAnswering, TFDistilBertForSequenceClassification, TFDistilBertForTokenClassification, TFDistilBertMainLayer, TFDistilBertModel, TFDistilBertPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_distilbert import ( FlaxDistilBertForMaskedLM, FlaxDistilBertForMultipleChoice, FlaxDistilBertForQuestionAnswering, FlaxDistilBertForSequenceClassification, FlaxDistilBertForTokenClassification, FlaxDistilBertModel, FlaxDistilBertPreTrainedModel, ) else: import sys UpperCamelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
345
import copy from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import Audio, Features, Value from .base import TaskTemplate @dataclass(frozen=__snake_case ) class _snake_case ( __snake_case ): '''simple docstring''' A__ : str = field(default="automatic-speech-recognition" , metadata={"include_in_asdict_even_if_is_default": True} ) A__ : ClassVar[Features] = Features({"audio": Audio()} ) A__ : ClassVar[Features] = Features({"transcription": Value("string" )} ) A__ : str = "audio" A__ : str = "transcription" def A__ ( self: int ,lowerCamelCase_: Union[str, Any] ) -> Optional[Any]: if self.audio_column not in features: raise ValueError(F'''Column {self.audio_column} is not present in features.''' ) if not isinstance(features[self.audio_column] ,lowerCamelCase_ ): raise ValueError(F'''Column {self.audio_column} is not an Audio type.''' ) UpperCAmelCase_ : Any = copy.deepcopy(self ) UpperCAmelCase_ : Union[str, Any] = self.input_schema.copy() UpperCAmelCase_ : Any = features[self.audio_column] UpperCAmelCase_ : Union[str, Any] = input_schema return task_template @property def A__ ( self: List[str] ) -> Dict[str, str]: return {self.audio_column: "audio", self.transcription_column: "transcription"}
345
1
import datasets from .evaluate import evaluate UpperCamelCase_ = '''\ @inproceedings{Rajpurkar2016SQuAD10, title={SQuAD: 100, 000+ Questions for Machine Comprehension of Text}, author={Pranav Rajpurkar and Jian Zhang and Konstantin Lopyrev and Percy Liang}, booktitle={EMNLP}, year={2016} } ''' UpperCamelCase_ = ''' This metric wrap the official scoring script for version 1 of the Stanford Question Answering Dataset (SQuAD). Stanford Question Answering Dataset (SQuAD) is a reading comprehension dataset, consisting of questions posed by crowdworkers on a set of Wikipedia articles, where the answer to every question is a segment of text, or span, from the corresponding reading passage, or the question might be unanswerable. ''' UpperCamelCase_ = ''' Computes SQuAD scores (F1 and EM). Args: predictions: List of question-answers dictionaries with the following key-values: - \'id\': id of the question-answer pair as given in the references (see below) - \'prediction_text\': the text of the answer references: List of question-answers dictionaries with the following key-values: - \'id\': id of the question-answer pair (see above), - \'answers\': a Dict in the SQuAD dataset format { \'text\': list of possible texts for the answer, as a list of strings \'answer_start\': list of start positions for the answer, as a list of ints } Note that answer_start values are not taken into account to compute the metric. Returns: \'exact_match\': Exact match (the normalized answer exactly match the gold answer) \'f1\': The F-score of predicted tokens versus the gold answer Examples: >>> predictions = [{\'prediction_text\': \'1976\', \'id\': \'56e10a3be3433e1400422b22\'}] >>> references = [{\'answers\': {\'answer_start\': [97], \'text\': [\'1976\']}, \'id\': \'56e10a3be3433e1400422b22\'}] >>> squad_metric = datasets.load_metric("squad") >>> results = squad_metric.compute(predictions=predictions, references=references) >>> print(results) {\'exact_match\': 100.0, \'f1\': 100.0} ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class _snake_case ( datasets.Metric ): '''simple docstring''' def A__ ( self: int ) -> Dict: return datasets.MetricInfo( description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features( { """predictions""": {"""id""": datasets.Value("""string""" ), """prediction_text""": datasets.Value("""string""" )}, """references""": { """id""": datasets.Value("""string""" ), """answers""": datasets.features.Sequence( { """text""": datasets.Value("""string""" ), """answer_start""": datasets.Value("""int32""" ), } ), }, } ) ,codebase_urls=["""https://rajpurkar.github.io/SQuAD-explorer/"""] ,reference_urls=["""https://rajpurkar.github.io/SQuAD-explorer/"""] ,) def A__ ( self: Optional[Any] ,lowerCamelCase_: Optional[int] ,lowerCamelCase_: Optional[Any] ) -> List[Any]: UpperCAmelCase_ : Any = {prediction["""id"""]: prediction["""prediction_text"""] for prediction in predictions} UpperCAmelCase_ : str = [ { """paragraphs""": [ { """qas""": [ { """answers""": [{"""text""": answer_text} for answer_text in ref["""answers"""]["""text"""]], """id""": ref["""id"""], } for ref in references ] } ] } ] UpperCAmelCase_ : List[str] = evaluate(dataset=lowerCamelCase_ ,predictions=lowerCamelCase_ ) return score
345
from collections import OrderedDict from typing import TYPE_CHECKING, Any, Mapping, Optional from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...onnx.utils import compute_effective_axis_dimension from ...utils import logging if TYPE_CHECKING: from ...processing_utils import ProcessorMixin from ...utils import TensorType UpperCamelCase_ = logging.get_logger(__name__) UpperCamelCase_ = { '''microsoft/layoutlmv3-base''': '''https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json''', } class _snake_case ( __snake_case ): '''simple docstring''' A__ : Optional[Any] = "layoutlmv3" def __init__( self: str ,lowerCamelCase_: Any=50265 ,lowerCamelCase_: int=768 ,lowerCamelCase_: Any=12 ,lowerCamelCase_: Any=12 ,lowerCamelCase_: List[Any]=3072 ,lowerCamelCase_: str="gelu" ,lowerCamelCase_: List[str]=0.1 ,lowerCamelCase_: Any=0.1 ,lowerCamelCase_: Tuple=512 ,lowerCamelCase_: Union[str, Any]=2 ,lowerCamelCase_: Dict=0.0_2 ,lowerCamelCase_: List[str]=1e-5 ,lowerCamelCase_: int=1 ,lowerCamelCase_: int=0 ,lowerCamelCase_: List[str]=2 ,lowerCamelCase_: Dict=1024 ,lowerCamelCase_: Tuple=128 ,lowerCamelCase_: Tuple=128 ,lowerCamelCase_: Dict=True ,lowerCamelCase_: Union[str, Any]=32 ,lowerCamelCase_: Union[str, Any]=128 ,lowerCamelCase_: Tuple=64 ,lowerCamelCase_: Tuple=256 ,lowerCamelCase_: List[str]=True ,lowerCamelCase_: Optional[int]=True ,lowerCamelCase_: Any=True ,lowerCamelCase_: Dict=224 ,lowerCamelCase_: Optional[int]=3 ,lowerCamelCase_: Optional[int]=16 ,lowerCamelCase_: Dict=None ,**lowerCamelCase_: str ,) -> List[Any]: super().__init__( vocab_size=lowerCamelCase_ ,hidden_size=lowerCamelCase_ ,num_hidden_layers=lowerCamelCase_ ,num_attention_heads=lowerCamelCase_ ,intermediate_size=lowerCamelCase_ ,hidden_act=lowerCamelCase_ ,hidden_dropout_prob=lowerCamelCase_ ,attention_probs_dropout_prob=lowerCamelCase_ ,max_position_embeddings=lowerCamelCase_ ,type_vocab_size=lowerCamelCase_ ,initializer_range=lowerCamelCase_ ,layer_norm_eps=lowerCamelCase_ ,pad_token_id=lowerCamelCase_ ,bos_token_id=lowerCamelCase_ ,eos_token_id=lowerCamelCase_ ,**lowerCamelCase_ ,) UpperCAmelCase_ : List[Any] = max_ad_position_embeddings UpperCAmelCase_ : Optional[int] = coordinate_size UpperCAmelCase_ : Optional[int] = shape_size UpperCAmelCase_ : Optional[Any] = has_relative_attention_bias UpperCAmelCase_ : Optional[int] = rel_pos_bins UpperCAmelCase_ : Union[str, Any] = max_rel_pos UpperCAmelCase_ : Dict = has_spatial_attention_bias UpperCAmelCase_ : Optional[int] = rel_ad_pos_bins UpperCAmelCase_ : Tuple = max_rel_ad_pos UpperCAmelCase_ : Union[str, Any] = text_embed UpperCAmelCase_ : Optional[Any] = visual_embed UpperCAmelCase_ : List[str] = input_size UpperCAmelCase_ : str = num_channels UpperCAmelCase_ : Optional[int] = patch_size UpperCAmelCase_ : Tuple = classifier_dropout class _snake_case ( __snake_case ): '''simple docstring''' A__ : Optional[Any] = version.parse("1.12" ) @property def A__ ( self: Dict ) -> Mapping[str, Mapping[int, str]]: # The order of inputs is different for question answering and sequence classification if self.task in ["question-answering", "sequence-classification"]: return OrderedDict( [ ("""input_ids""", {0: """batch""", 1: """sequence"""}), ("""attention_mask""", {0: """batch""", 1: """sequence"""}), ("""bbox""", {0: """batch""", 1: """sequence"""}), ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ] ) else: return OrderedDict( [ ("""input_ids""", {0: """batch""", 1: """sequence"""}), ("""bbox""", {0: """batch""", 1: """sequence"""}), ("""attention_mask""", {0: """batch""", 1: """sequence"""}), ("""pixel_values""", {0: """batch""", 1: """num_channels"""}), ] ) @property def A__ ( self: Any ) -> float: return 1e-5 @property def A__ ( self: int ) -> int: return 12 def A__ ( self: List[str] ,lowerCamelCase_: "ProcessorMixin" ,lowerCamelCase_: int = -1 ,lowerCamelCase_: int = -1 ,lowerCamelCase_: bool = False ,lowerCamelCase_: Optional["TensorType"] = None ,lowerCamelCase_: int = 3 ,lowerCamelCase_: int = 40 ,lowerCamelCase_: int = 40 ,) -> Mapping[str, Any]: setattr(processor.image_processor ,"""apply_ocr""" ,lowerCamelCase_ ) # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX UpperCAmelCase_ : List[str] = compute_effective_axis_dimension( lowerCamelCase_ ,fixed_dimension=OnnxConfig.default_fixed_batch ,num_token_to_add=0 ) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX UpperCAmelCase_ : int = processor.tokenizer.num_special_tokens_to_add(lowerCamelCase_ ) UpperCAmelCase_ : int = compute_effective_axis_dimension( lowerCamelCase_ ,fixed_dimension=OnnxConfig.default_fixed_sequence ,num_token_to_add=lowerCamelCase_ ) # Generate dummy inputs according to compute batch and sequence UpperCAmelCase_ : Optional[int] = [[""" """.join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size # Generate dummy bounding boxes UpperCAmelCase_ : List[Any] = [[[48, 84, 73, 128]]] * batch_size # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX # batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch) UpperCAmelCase_ : Any = self._generate_dummy_images(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ) UpperCAmelCase_ : Optional[Any] = dict( processor( lowerCamelCase_ ,text=lowerCamelCase_ ,boxes=lowerCamelCase_ ,return_tensors=lowerCamelCase_ ,) ) return inputs
345
1
from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCamelCase_ = logging.get_logger(__name__) UpperCamelCase_ = { '''studio-ousia/luke-base''': '''https://huggingface.co/studio-ousia/luke-base/resolve/main/config.json''', '''studio-ousia/luke-large''': '''https://huggingface.co/studio-ousia/luke-large/resolve/main/config.json''', } class _snake_case ( __snake_case ): '''simple docstring''' A__ : Optional[int] = "luke" def __init__( self: Optional[int] ,lowerCamelCase_: Union[str, Any]=50267 ,lowerCamelCase_: Any=500000 ,lowerCamelCase_: List[str]=768 ,lowerCamelCase_: Tuple=256 ,lowerCamelCase_: Any=12 ,lowerCamelCase_: List[Any]=12 ,lowerCamelCase_: int=3072 ,lowerCamelCase_: Tuple="gelu" ,lowerCamelCase_: Tuple=0.1 ,lowerCamelCase_: Tuple=0.1 ,lowerCamelCase_: str=512 ,lowerCamelCase_: Any=2 ,lowerCamelCase_: str=0.0_2 ,lowerCamelCase_: Any=1e-12 ,lowerCamelCase_: Optional[Any]=True ,lowerCamelCase_: str=None ,lowerCamelCase_: Union[str, Any]=1 ,lowerCamelCase_: Tuple=0 ,lowerCamelCase_: Any=2 ,**lowerCamelCase_: Optional[Any] ,) -> int: super().__init__(pad_token_id=lowerCamelCase_ ,bos_token_id=lowerCamelCase_ ,eos_token_id=lowerCamelCase_ ,**lowerCamelCase_ ) UpperCAmelCase_ : List[str] = vocab_size UpperCAmelCase_ : Tuple = entity_vocab_size UpperCAmelCase_ : str = hidden_size UpperCAmelCase_ : List[Any] = entity_emb_size UpperCAmelCase_ : List[Any] = num_hidden_layers UpperCAmelCase_ : Optional[int] = num_attention_heads UpperCAmelCase_ : List[Any] = hidden_act UpperCAmelCase_ : int = intermediate_size UpperCAmelCase_ : Optional[int] = hidden_dropout_prob UpperCAmelCase_ : Dict = attention_probs_dropout_prob UpperCAmelCase_ : Tuple = max_position_embeddings UpperCAmelCase_ : List[str] = type_vocab_size UpperCAmelCase_ : Optional[int] = initializer_range UpperCAmelCase_ : int = layer_norm_eps UpperCAmelCase_ : int = use_entity_aware_attention UpperCAmelCase_ : Dict = classifier_dropout
345
import argparse from argparse import Namespace import torch from torch import nn from transformers import XGLMConfig, XGLMForCausalLM def lowerCamelCase_ ( _a : List[Any] ): '''simple docstring''' UpperCAmelCase_ : Optional[int] = [ """decoder.version""", """decoder.output_projection.weight""", """_float_tensor""", """decoder.embed_positions._float_tensor""", ] for k in ignore_keys: state_dict.pop(_a , _a ) def lowerCamelCase_ ( _a : Any ): '''simple docstring''' UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = emb.weight.shape UpperCAmelCase_ : Tuple = nn.Linear(_a , _a , bias=_a ) UpperCAmelCase_ : List[Any] = emb.weight.data return lin_layer def lowerCamelCase_ ( _a : Dict ): '''simple docstring''' UpperCAmelCase_ : int = torch.load(_a , map_location="""cpu""" ) UpperCAmelCase_ : Dict = Namespace(**checkpoint["""cfg"""]["""model"""] ) UpperCAmelCase_ : Optional[int] = checkpoint["""model"""] remove_ignore_keys_(_a ) UpperCAmelCase_ : str = state_dict["""decoder.embed_tokens.weight"""].shape[0] UpperCAmelCase_ : List[str] = {key.replace("""decoder""" , """model""" ): val for key, val in state_dict.items()} UpperCAmelCase_ : int = XGLMConfig( vocab_size=_a , max_position_embeddings=args.max_target_positions , num_layers=args.decoder_layers , attention_heads=args.decoder_attention_heads , ffn_dim=args.decoder_ffn_embed_dim , d_model=args.decoder_embed_dim , layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="""gelu""" , scale_embedding=not args.no_scale_embedding , tie_word_embeddings=args.share_decoder_input_output_embed , ) UpperCAmelCase_ : List[str] = XGLMForCausalLM(_a ) UpperCAmelCase_ : Tuple = model.load_state_dict(_a , strict=_a ) print(_a ) UpperCAmelCase_ : Optional[Any] = make_linear_from_emb(model.model.embed_tokens ) return model if __name__ == "__main__": UpperCamelCase_ = argparse.ArgumentParser() # Required parameters parser.add_argument('''fairseq_path''', type=str, help='''path to a model.pt on local filesystem.''') parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') UpperCamelCase_ = parser.parse_args() UpperCamelCase_ = convert_fairseq_xglm_checkpoint_from_disk(args.fairseq_path) model.save_pretrained(args.pytorch_dump_folder_path)
345
1
from typing import Callable, Optional, Union from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCamelCase_ = logging.get_logger(__name__) UpperCamelCase_ = { '''microsoft/xprophetnet-large-wiki100-cased''': ( '''https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/config.json''' ), } class _snake_case ( __snake_case ): '''simple docstring''' A__ : Tuple = "xlm-prophetnet" A__ : Optional[int] = ["past_key_values"] A__ : List[Any] = { "num_attention_heads": "num_encoder_attention_heads", } def __init__( self: Union[str, Any] ,lowerCamelCase_: Optional[float] = 0.1 ,lowerCamelCase_: Optional[Union[str, Callable]] = "gelu" ,lowerCamelCase_: Optional[int] = 30522 ,lowerCamelCase_: Optional[int] = 1024 ,lowerCamelCase_: Optional[int] = 4096 ,lowerCamelCase_: Optional[int] = 12 ,lowerCamelCase_: Optional[int] = 16 ,lowerCamelCase_: Optional[int] = 4096 ,lowerCamelCase_: Optional[int] = 12 ,lowerCamelCase_: Optional[int] = 16 ,lowerCamelCase_: Optional[float] = 0.1 ,lowerCamelCase_: Optional[float] = 0.1 ,lowerCamelCase_: Optional[int] = 512 ,lowerCamelCase_: Optional[float] = 0.0_2 ,lowerCamelCase_: Optional[bool] = True ,lowerCamelCase_: Optional[bool] = True ,lowerCamelCase_: Optional[int] = 0 ,lowerCamelCase_: Optional[int] = 2 ,lowerCamelCase_: Optional[int] = 32 ,lowerCamelCase_: Optional[int] = 128 ,lowerCamelCase_: Optional[bool] = False ,lowerCamelCase_: Optional[float] = 0.0 ,lowerCamelCase_: Optional[bool] = True ,lowerCamelCase_: Optional[int] = 0 ,lowerCamelCase_: Optional[int] = 1 ,lowerCamelCase_: Optional[int] = 2 ,**lowerCamelCase_: Optional[int] ,) -> Dict: UpperCAmelCase_ : List[str] = vocab_size UpperCAmelCase_ : Optional[int] = hidden_size UpperCAmelCase_ : Tuple = encoder_ffn_dim UpperCAmelCase_ : Dict = num_encoder_layers UpperCAmelCase_ : List[str] = num_encoder_attention_heads UpperCAmelCase_ : Tuple = decoder_ffn_dim UpperCAmelCase_ : Optional[Any] = num_decoder_layers UpperCAmelCase_ : int = num_decoder_attention_heads UpperCAmelCase_ : Optional[int] = max_position_embeddings UpperCAmelCase_ : Union[str, Any] = init_std # Normal(0, this parameter) UpperCAmelCase_ : Any = activation_function # parameters for xlmprophetnet UpperCAmelCase_ : str = ngram UpperCAmelCase_ : str = num_buckets UpperCAmelCase_ : Dict = relative_max_distance UpperCAmelCase_ : List[Any] = disable_ngram_loss UpperCAmelCase_ : int = eps # 3 Types of Dropout UpperCAmelCase_ : Any = attention_dropout UpperCAmelCase_ : Dict = activation_dropout UpperCAmelCase_ : int = dropout UpperCAmelCase_ : List[str] = use_cache super().__init__( pad_token_id=lowerCamelCase_ ,bos_token_id=lowerCamelCase_ ,eos_token_id=lowerCamelCase_ ,is_encoder_decoder=lowerCamelCase_ ,add_cross_attention=lowerCamelCase_ ,decoder_start_token_id=lowerCamelCase_ ,**lowerCamelCase_ ,) @property def A__ ( self: Optional[Any] ) -> int: return self.num_encoder_layers + self.num_decoder_layers @num_hidden_layers.setter def A__ ( self: Dict ,lowerCamelCase_: Optional[int] ) -> int: raise NotImplementedError( """This model does not support the setting of `num_hidden_layers`. Please set `num_encoder_layers` and""" """ `num_decoder_layers`.""" )
345
import collections import inspect import unittest from transformers import FocalNetConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( FocalNetBackbone, FocalNetForImageClassification, FocalNetForMaskedImageModeling, FocalNetModel, ) from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class _snake_case : '''simple docstring''' def __init__( self: List[Any] ,lowerCamelCase_: Tuple ,lowerCamelCase_: Union[str, Any]=13 ,lowerCamelCase_: Optional[int]=32 ,lowerCamelCase_: List[str]=2 ,lowerCamelCase_: Optional[Any]=3 ,lowerCamelCase_: int=16 ,lowerCamelCase_: Optional[Any]=[32, 64, 128] ,lowerCamelCase_: Optional[int]=[1, 2, 1] ,lowerCamelCase_: Union[str, Any]=[2, 2, 4] ,lowerCamelCase_: int=2 ,lowerCamelCase_: List[str]=2.0 ,lowerCamelCase_: List[Any]=True ,lowerCamelCase_: List[str]=0.0 ,lowerCamelCase_: List[str]=0.0 ,lowerCamelCase_: Optional[int]=0.1 ,lowerCamelCase_: Optional[int]="gelu" ,lowerCamelCase_: Any=False ,lowerCamelCase_: Dict=True ,lowerCamelCase_: Union[str, Any]=0.0_2 ,lowerCamelCase_: int=1e-5 ,lowerCamelCase_: int=True ,lowerCamelCase_: Tuple=None ,lowerCamelCase_: str=True ,lowerCamelCase_: Dict=10 ,lowerCamelCase_: str=8 ,lowerCamelCase_: Union[str, Any]=["stage1", "stage2"] ,lowerCamelCase_: Optional[Any]=[1, 2] ,) -> str: UpperCAmelCase_ : List[Any] = parent UpperCAmelCase_ : Tuple = batch_size UpperCAmelCase_ : Any = image_size UpperCAmelCase_ : str = patch_size UpperCAmelCase_ : List[str] = num_channels UpperCAmelCase_ : Dict = embed_dim UpperCAmelCase_ : Dict = hidden_sizes UpperCAmelCase_ : str = depths UpperCAmelCase_ : int = num_heads UpperCAmelCase_ : List[Any] = window_size UpperCAmelCase_ : Union[str, Any] = mlp_ratio UpperCAmelCase_ : int = qkv_bias UpperCAmelCase_ : List[str] = hidden_dropout_prob UpperCAmelCase_ : Union[str, Any] = attention_probs_dropout_prob UpperCAmelCase_ : Optional[int] = drop_path_rate UpperCAmelCase_ : Union[str, Any] = hidden_act UpperCAmelCase_ : List[Any] = use_absolute_embeddings UpperCAmelCase_ : List[Any] = patch_norm UpperCAmelCase_ : int = layer_norm_eps UpperCAmelCase_ : int = initializer_range UpperCAmelCase_ : Optional[Any] = is_training UpperCAmelCase_ : Optional[Any] = scope UpperCAmelCase_ : Union[str, Any] = use_labels UpperCAmelCase_ : Union[str, Any] = type_sequence_label_size UpperCAmelCase_ : Optional[int] = encoder_stride UpperCAmelCase_ : Optional[int] = out_features UpperCAmelCase_ : Optional[int] = out_indices def A__ ( self: Union[str, Any] ) -> List[Any]: UpperCAmelCase_ : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) UpperCAmelCase_ : int = None if self.use_labels: UpperCAmelCase_ : str = ids_tensor([self.batch_size] ,self.type_sequence_label_size ) UpperCAmelCase_ : Any = self.get_config() return config, pixel_values, labels def A__ ( self: List[Any] ) -> Tuple: return FocalNetConfig( image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,embed_dim=self.embed_dim ,hidden_sizes=self.hidden_sizes ,depths=self.depths ,num_heads=self.num_heads ,window_size=self.window_size ,mlp_ratio=self.mlp_ratio ,qkv_bias=self.qkv_bias ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,drop_path_rate=self.drop_path_rate ,hidden_act=self.hidden_act ,use_absolute_embeddings=self.use_absolute_embeddings ,path_norm=self.patch_norm ,layer_norm_eps=self.layer_norm_eps ,initializer_range=self.initializer_range ,encoder_stride=self.encoder_stride ,out_features=self.out_features ,out_indices=self.out_indices ,) def A__ ( self: Dict ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: str ,lowerCamelCase_: str ) -> List[str]: UpperCAmelCase_ : Optional[int] = FocalNetModel(config=lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() UpperCAmelCase_ : List[Any] = model(lowerCamelCase_ ) UpperCAmelCase_ : Dict = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1)) UpperCAmelCase_ : Optional[Any] = int(config.embed_dim * 2 ** (len(config.depths ) - 1) ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, expected_seq_len, expected_dim) ) def A__ ( self: Union[str, Any] ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: Any ,lowerCamelCase_: Optional[int] ) -> List[str]: UpperCAmelCase_ : List[str] = FocalNetBackbone(config=lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() UpperCAmelCase_ : Tuple = model(lowerCamelCase_ ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) ,len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) ,[self.batch_size, self.image_size, 8, 8] ) # verify channels self.parent.assertEqual(len(model.channels ) ,len(config.out_features ) ) self.parent.assertListEqual(model.channels ,config.hidden_sizes[:-1] ) # verify backbone works with out_features=None UpperCAmelCase_ : Union[str, Any] = None UpperCAmelCase_ : List[str] = FocalNetBackbone(config=lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() UpperCAmelCase_ : Tuple = model(lowerCamelCase_ ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) ,1 ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) ,[self.batch_size, self.image_size * 2, 4, 4] ) # verify channels self.parent.assertEqual(len(model.channels ) ,1 ) self.parent.assertListEqual(model.channels ,[config.hidden_sizes[-1]] ) def A__ ( self: Optional[int] ,lowerCamelCase_: List[str] ,lowerCamelCase_: Tuple ,lowerCamelCase_: Union[str, Any] ) -> List[Any]: UpperCAmelCase_ : Any = FocalNetForMaskedImageModeling(config=lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() UpperCAmelCase_ : Optional[Any] = model(lowerCamelCase_ ) self.parent.assertEqual( result.reconstruction.shape ,(self.batch_size, self.num_channels, self.image_size, self.image_size) ) # test greyscale images UpperCAmelCase_ : int = 1 UpperCAmelCase_ : List[str] = FocalNetForMaskedImageModeling(lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() UpperCAmelCase_ : int = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) UpperCAmelCase_ : Optional[int] = model(lowerCamelCase_ ) self.parent.assertEqual(result.reconstruction.shape ,(self.batch_size, 1, self.image_size, self.image_size) ) def A__ ( self: List[str] ,lowerCamelCase_: List[str] ,lowerCamelCase_: List[str] ,lowerCamelCase_: Any ) -> int: UpperCAmelCase_ : List[Any] = self.type_sequence_label_size UpperCAmelCase_ : int = FocalNetForImageClassification(lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() UpperCAmelCase_ : Union[str, Any] = model(lowerCamelCase_ ,labels=lowerCamelCase_ ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) ) # test greyscale images UpperCAmelCase_ : List[Any] = 1 UpperCAmelCase_ : Optional[int] = FocalNetForImageClassification(lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() UpperCAmelCase_ : List[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) UpperCAmelCase_ : List[str] = model(lowerCamelCase_ ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) ) def A__ ( self: Union[str, Any] ) -> Optional[int]: UpperCAmelCase_ : List[Any] = self.prepare_config_and_inputs() UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = config_and_inputs UpperCAmelCase_ : int = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class _snake_case ( __snake_case , __snake_case , unittest.TestCase ): '''simple docstring''' A__ : List[Any] = ( ( FocalNetModel, FocalNetForImageClassification, FocalNetForMaskedImageModeling, FocalNetBackbone, ) if is_torch_available() else () ) A__ : Union[str, Any] = ( {"feature-extraction": FocalNetModel, "image-classification": FocalNetForImageClassification} if is_torch_available() else {} ) A__ : Optional[Any] = False A__ : Any = False A__ : List[str] = False A__ : Any = False A__ : Any = False def A__ ( self: List[str] ) -> Tuple: UpperCAmelCase_ : Dict = FocalNetModelTester(self ) UpperCAmelCase_ : int = ConfigTester(self ,config_class=lowerCamelCase_ ,embed_dim=37 ,has_text_modality=lowerCamelCase_ ) def A__ ( self: List[str] ) -> int: self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def A__ ( self: List[str] ) -> Union[str, Any]: return def A__ ( self: str ) -> List[str]: UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCamelCase_ ) def A__ ( self: Tuple ) -> int: UpperCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*lowerCamelCase_ ) def A__ ( self: Dict ) -> List[str]: UpperCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*lowerCamelCase_ ) def A__ ( self: int ) -> int: UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*lowerCamelCase_ ) @unittest.skip(reason="""FocalNet does not use inputs_embeds""" ) def A__ ( self: int ) -> Dict: pass @unittest.skip(reason="""FocalNet does not use feedforward chunking""" ) def A__ ( self: Optional[Any] ) -> Optional[Any]: pass def A__ ( self: Optional[Any] ) -> List[str]: UpperCAmelCase_ , UpperCAmelCase_ : str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes[:-1]: UpperCAmelCase_ : Optional[Any] = model_class(lowerCamelCase_ ) self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) ) UpperCAmelCase_ : List[Any] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(lowerCamelCase_ ,nn.Linear ) ) def A__ ( self: str ) -> Optional[int]: UpperCAmelCase_ , UpperCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes[:-1]: UpperCAmelCase_ : str = model_class(lowerCamelCase_ ) UpperCAmelCase_ : Dict = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCAmelCase_ : Any = [*signature.parameters.keys()] UpperCAmelCase_ : List[str] = ["""pixel_values"""] self.assertListEqual(arg_names[:1] ,lowerCamelCase_ ) def A__ ( self: Dict ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: List[str] ,lowerCamelCase_: Dict ,lowerCamelCase_: Any ) -> List[str]: UpperCAmelCase_ : Tuple = model_class(lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() with torch.no_grad(): UpperCAmelCase_ : Optional[int] = model(**self._prepare_for_class(lowerCamelCase_ ,lowerCamelCase_ ) ) UpperCAmelCase_ : Any = outputs.hidden_states UpperCAmelCase_ : List[Any] = getattr( self.model_tester ,"""expected_num_hidden_layers""" ,len(self.model_tester.depths ) + 1 ) self.assertEqual(len(lowerCamelCase_ ) ,lowerCamelCase_ ) # FocalNet has a different seq_length UpperCAmelCase_ : int = ( config.patch_size if isinstance(config.patch_size ,collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) UpperCAmelCase_ : Optional[int] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.assertListEqual( list(hidden_states[0].shape[-2:] ) ,[num_patches, self.model_tester.embed_dim] ,) UpperCAmelCase_ : Union[str, Any] = outputs.reshaped_hidden_states self.assertEqual(len(lowerCamelCase_ ) ,lowerCamelCase_ ) UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Tuple = reshaped_hidden_states[0].shape UpperCAmelCase_ : List[Any] = ( reshaped_hidden_states[0].view(lowerCamelCase_ ,lowerCamelCase_ ,height * width ).permute(0 ,2 ,1 ) ) self.assertListEqual( list(reshaped_hidden_states.shape[-2:] ) ,[num_patches, self.model_tester.embed_dim] ,) def A__ ( self: Any ) -> List[Any]: UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase_ : Optional[int] = ( self.model_tester.image_size if isinstance(self.model_tester.image_size ,collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) for model_class in self.all_model_classes[:-1]: UpperCAmelCase_ : str = True self.check_hidden_states_output(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] UpperCAmelCase_ : Union[str, Any] = True self.check_hidden_states_output(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ) def A__ ( self: List[str] ) -> str: UpperCAmelCase_ , UpperCAmelCase_ : int = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase_ : Tuple = 3 UpperCAmelCase_ : Tuple = ( self.model_tester.image_size if isinstance(self.model_tester.image_size ,collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) UpperCAmelCase_ : Union[str, Any] = ( config.patch_size if isinstance(config.patch_size ,collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) UpperCAmelCase_ : Union[str, Any] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0]) UpperCAmelCase_ : Any = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1]) for model_class in self.all_model_classes[:-1]: UpperCAmelCase_ : Optional[Any] = True self.check_hidden_states_output(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,(padded_height, padded_width) ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] UpperCAmelCase_ : Optional[int] = True self.check_hidden_states_output(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,(padded_height, padded_width) ) @slow def A__ ( self: Optional[int] ) -> Optional[Any]: for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase_ : Tuple = FocalNetModel.from_pretrained(lowerCamelCase_ ) self.assertIsNotNone(lowerCamelCase_ ) def A__ ( self: Optional[Any] ) -> Optional[int]: UpperCAmelCase_ , UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase_ : Optional[int] = _config_zero_init(lowerCamelCase_ ) for model_class in self.all_model_classes: UpperCAmelCase_ : List[Any] = model_class(config=lowerCamelCase_ ) for name, param in model.named_parameters(): if "embeddings" not in name and param.requires_grad: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item() ,[0.0, 1.0] ,msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' ,) @require_vision @require_torch class _snake_case ( unittest.TestCase ): '''simple docstring''' @cached_property def A__ ( self: Optional[int] ) -> str: # TODO update organization return AutoImageProcessor.from_pretrained("""microsoft/focalnet-tiny""" ) if is_vision_available() else None @slow def A__ ( self: List[Any] ) -> List[str]: UpperCAmelCase_ : Optional[int] = FocalNetForImageClassification.from_pretrained("""microsoft/focalnet-tiny""" ).to(lowerCamelCase_ ) UpperCAmelCase_ : Tuple = self.default_image_processor UpperCAmelCase_ : Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) UpperCAmelCase_ : Dict = image_processor(images=lowerCamelCase_ ,return_tensors="""pt""" ).to(lowerCamelCase_ ) # forward pass with torch.no_grad(): UpperCAmelCase_ : Dict = model(**lowerCamelCase_ ) # verify the logits UpperCAmelCase_ : str = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape ,lowerCamelCase_ ) UpperCAmelCase_ : List[Any] = torch.tensor([0.2_1_6_6, -0.4_3_6_8, 0.2_1_9_1] ).to(lowerCamelCase_ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] ,lowerCamelCase_ ,atol=1e-4 ) ) self.assertTrue(outputs.logits.argmax(dim=-1 ).item() ,281 ) @require_torch class _snake_case ( __snake_case , unittest.TestCase ): '''simple docstring''' A__ : List[Any] = (FocalNetBackbone,) if is_torch_available() else () A__ : int = FocalNetConfig A__ : List[str] = False def A__ ( self: Any ) -> Optional[int]: UpperCAmelCase_ : str = FocalNetModelTester(self )
345
1
import copy import json import os import tempfile from transformers import is_torch_available from .test_configuration_utils import config_common_kwargs class _snake_case ( __snake_case ): '''simple docstring''' def __init__( self: Tuple ,lowerCamelCase_: int ,lowerCamelCase_: Union[str, Any]=None ,lowerCamelCase_: int=True ,lowerCamelCase_: Any=None ,**lowerCamelCase_: List[str] ) -> Any: UpperCAmelCase_ : Dict = parent UpperCAmelCase_ : List[str] = config_class UpperCAmelCase_ : str = has_text_modality UpperCAmelCase_ : Union[str, Any] = kwargs UpperCAmelCase_ : int = common_properties def A__ ( self: Union[str, Any] ) -> List[str]: UpperCAmelCase_ : Optional[Any] = self.config_class(**self.inputs_dict ) UpperCAmelCase_ : List[str] = ( ["""hidden_size""", """num_attention_heads""", """num_hidden_layers"""] if self.common_properties is None else self.common_properties ) # Add common fields for text models if self.has_text_modality: common_properties.extend(["""vocab_size"""] ) # Test that config has the common properties as getters for prop in common_properties: self.parent.assertTrue(hasattr(lowerCamelCase_ ,lowerCamelCase_ ) ,msg=F'''`{prop}` does not exist''' ) # Test that config has the common properties as setter for idx, name in enumerate(lowerCamelCase_ ): try: setattr(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ) self.parent.assertEqual( getattr(lowerCamelCase_ ,lowerCamelCase_ ) ,lowerCamelCase_ ,msg=F'''`{name} value {idx} expected, but was {getattr(lowerCamelCase_ ,lowerCamelCase_ )}''' ) except NotImplementedError: # Some models might not be able to implement setters for common_properties # In that case, a NotImplementedError is raised pass # Test if config class can be called with Config(prop_name=..) for idx, name in enumerate(lowerCamelCase_ ): try: UpperCAmelCase_ : Optional[Any] = self.config_class(**{name: idx} ) self.parent.assertEqual( getattr(lowerCamelCase_ ,lowerCamelCase_ ) ,lowerCamelCase_ ,msg=F'''`{name} value {idx} expected, but was {getattr(lowerCamelCase_ ,lowerCamelCase_ )}''' ) except NotImplementedError: # Some models might not be able to implement setters for common_properties # In that case, a NotImplementedError is raised pass def A__ ( self: Optional[Any] ) -> Union[str, Any]: UpperCAmelCase_ : Any = self.config_class(**self.inputs_dict ) UpperCAmelCase_ : Union[str, Any] = json.loads(config.to_json_string() ) for key, value in self.inputs_dict.items(): self.parent.assertEqual(obj[key] ,lowerCamelCase_ ) def A__ ( self: Tuple ) -> Tuple: UpperCAmelCase_ : str = self.config_class(**self.inputs_dict ) with tempfile.TemporaryDirectory() as tmpdirname: UpperCAmelCase_ : List[str] = os.path.join(lowerCamelCase_ ,"""config.json""" ) config_first.to_json_file(lowerCamelCase_ ) UpperCAmelCase_ : Any = self.config_class.from_json_file(lowerCamelCase_ ) self.parent.assertEqual(config_second.to_dict() ,config_first.to_dict() ) def A__ ( self: List[Any] ) -> Dict: UpperCAmelCase_ : List[Any] = self.config_class(**self.inputs_dict ) with tempfile.TemporaryDirectory() as tmpdirname: config_first.save_pretrained(lowerCamelCase_ ) UpperCAmelCase_ : Optional[int] = self.config_class.from_pretrained(lowerCamelCase_ ) self.parent.assertEqual(config_second.to_dict() ,config_first.to_dict() ) def A__ ( self: List[Any] ) -> Tuple: UpperCAmelCase_ : Any = self.config_class(**self.inputs_dict ) UpperCAmelCase_ : Optional[Any] = """test""" with tempfile.TemporaryDirectory() as tmpdirname: UpperCAmelCase_ : str = os.path.join(lowerCamelCase_ ,lowerCamelCase_ ) config_first.save_pretrained(lowerCamelCase_ ) UpperCAmelCase_ : List[str] = self.config_class.from_pretrained(lowerCamelCase_ ,subfolder=lowerCamelCase_ ) self.parent.assertEqual(config_second.to_dict() ,config_first.to_dict() ) def A__ ( self: int ) -> List[Any]: UpperCAmelCase_ : Any = self.config_class(**self.inputs_dict ,num_labels=5 ) self.parent.assertEqual(len(config.idalabel ) ,5 ) self.parent.assertEqual(len(config.labelaid ) ,5 ) UpperCAmelCase_ : int = 3 self.parent.assertEqual(len(config.idalabel ) ,3 ) self.parent.assertEqual(len(config.labelaid ) ,3 ) def A__ ( self: List[str] ) -> int: if self.config_class.is_composition: return UpperCAmelCase_ : str = self.config_class() self.parent.assertIsNotNone(lowerCamelCase_ ) def A__ ( self: List[str] ) -> List[Any]: UpperCAmelCase_ : Optional[Any] = copy.deepcopy(lowerCamelCase_ ) UpperCAmelCase_ : Tuple = self.config_class(**lowerCamelCase_ ) UpperCAmelCase_ : Optional[int] = [] for key, value in config_common_kwargs.items(): if key == "torch_dtype": if not is_torch_available(): continue else: import torch if config.torch_dtype != torch.floataa: wrong_values.append(("""torch_dtype""", config.torch_dtype, torch.floataa) ) elif getattr(lowerCamelCase_ ,lowerCamelCase_ ) != value: wrong_values.append((key, getattr(lowerCamelCase_ ,lowerCamelCase_ ), value) ) if len(lowerCamelCase_ ) > 0: UpperCAmelCase_ : Optional[Any] = """\n""".join([F'''- {v[0]}: got {v[1]} instead of {v[2]}''' for v in wrong_values] ) raise ValueError(F'''The following keys were not properly set in the config:\n{errors}''' ) def A__ ( self: Union[str, Any] ) -> Union[str, Any]: self.create_and_test_config_common_properties() self.create_and_test_config_to_json_string() self.create_and_test_config_to_json_file() self.create_and_test_config_from_and_save_pretrained() self.create_and_test_config_from_and_save_pretrained_subfolder() self.create_and_test_config_with_num_labels() self.check_config_can_be_init_without_params() self.check_config_arguments_init()
345
import collections import inspect import unittest from transformers import SwinvaConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class _snake_case : '''simple docstring''' def __init__( self: Tuple ,lowerCamelCase_: List[str] ,lowerCamelCase_: int=13 ,lowerCamelCase_: int=32 ,lowerCamelCase_: Optional[int]=2 ,lowerCamelCase_: Any=3 ,lowerCamelCase_: str=16 ,lowerCamelCase_: Optional[Any]=[1, 2, 1] ,lowerCamelCase_: Tuple=[2, 2, 4] ,lowerCamelCase_: int=2 ,lowerCamelCase_: List[Any]=2.0 ,lowerCamelCase_: str=True ,lowerCamelCase_: Optional[int]=0.0 ,lowerCamelCase_: List[Any]=0.0 ,lowerCamelCase_: List[str]=0.1 ,lowerCamelCase_: Tuple="gelu" ,lowerCamelCase_: Union[str, Any]=False ,lowerCamelCase_: Union[str, Any]=True ,lowerCamelCase_: Optional[int]=0.0_2 ,lowerCamelCase_: int=1e-5 ,lowerCamelCase_: Optional[int]=True ,lowerCamelCase_: Union[str, Any]=None ,lowerCamelCase_: Union[str, Any]=True ,lowerCamelCase_: Optional[int]=10 ,lowerCamelCase_: Tuple=8 ,) -> List[Any]: UpperCAmelCase_ : List[str] = parent UpperCAmelCase_ : int = batch_size UpperCAmelCase_ : int = image_size UpperCAmelCase_ : Union[str, Any] = patch_size UpperCAmelCase_ : Optional[Any] = num_channels UpperCAmelCase_ : int = embed_dim UpperCAmelCase_ : Union[str, Any] = depths UpperCAmelCase_ : List[str] = num_heads UpperCAmelCase_ : int = window_size UpperCAmelCase_ : List[str] = mlp_ratio UpperCAmelCase_ : Tuple = qkv_bias UpperCAmelCase_ : Tuple = hidden_dropout_prob UpperCAmelCase_ : str = attention_probs_dropout_prob UpperCAmelCase_ : Tuple = drop_path_rate UpperCAmelCase_ : List[str] = hidden_act UpperCAmelCase_ : int = use_absolute_embeddings UpperCAmelCase_ : Any = patch_norm UpperCAmelCase_ : Optional[int] = layer_norm_eps UpperCAmelCase_ : Tuple = initializer_range UpperCAmelCase_ : Optional[Any] = is_training UpperCAmelCase_ : Dict = scope UpperCAmelCase_ : int = use_labels UpperCAmelCase_ : Optional[Any] = type_sequence_label_size UpperCAmelCase_ : List[str] = encoder_stride def A__ ( self: Any ) -> int: UpperCAmelCase_ : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) UpperCAmelCase_ : List[Any] = None if self.use_labels: UpperCAmelCase_ : Optional[int] = ids_tensor([self.batch_size] ,self.type_sequence_label_size ) UpperCAmelCase_ : str = self.get_config() return config, pixel_values, labels def A__ ( self: List[Any] ) -> Union[str, Any]: return SwinvaConfig( image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,embed_dim=self.embed_dim ,depths=self.depths ,num_heads=self.num_heads ,window_size=self.window_size ,mlp_ratio=self.mlp_ratio ,qkv_bias=self.qkv_bias ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,drop_path_rate=self.drop_path_rate ,hidden_act=self.hidden_act ,use_absolute_embeddings=self.use_absolute_embeddings ,path_norm=self.patch_norm ,layer_norm_eps=self.layer_norm_eps ,initializer_range=self.initializer_range ,encoder_stride=self.encoder_stride ,) def A__ ( self: Dict ,lowerCamelCase_: Tuple ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: List[str] ) -> str: UpperCAmelCase_ : str = SwinvaModel(config=lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() UpperCAmelCase_ : Optional[Any] = model(lowerCamelCase_ ) UpperCAmelCase_ : List[Any] = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1)) UpperCAmelCase_ : List[Any] = int(config.embed_dim * 2 ** (len(config.depths ) - 1) ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, expected_seq_len, expected_dim) ) def A__ ( self: List[Any] ,lowerCamelCase_: List[Any] ,lowerCamelCase_: int ,lowerCamelCase_: int ) -> int: UpperCAmelCase_ : Any = SwinvaForMaskedImageModeling(config=lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() UpperCAmelCase_ : Union[str, Any] = model(lowerCamelCase_ ) self.parent.assertEqual( result.logits.shape ,(self.batch_size, self.num_channels, self.image_size, self.image_size) ) # test greyscale images UpperCAmelCase_ : str = 1 UpperCAmelCase_ : Optional[Any] = SwinvaForMaskedImageModeling(lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() UpperCAmelCase_ : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) UpperCAmelCase_ : int = model(lowerCamelCase_ ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, 1, self.image_size, self.image_size) ) def A__ ( self: int ,lowerCamelCase_: int ,lowerCamelCase_: List[Any] ,lowerCamelCase_: Optional[Any] ) -> int: UpperCAmelCase_ : Union[str, Any] = self.type_sequence_label_size UpperCAmelCase_ : int = SwinvaForImageClassification(lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() UpperCAmelCase_ : Optional[int] = model(lowerCamelCase_ ,labels=lowerCamelCase_ ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) ) def A__ ( self: str ) -> Union[str, Any]: UpperCAmelCase_ : Optional[Any] = self.prepare_config_and_inputs() UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = config_and_inputs UpperCAmelCase_ : Optional[int] = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class _snake_case ( __snake_case , __snake_case , unittest.TestCase ): '''simple docstring''' A__ : Tuple = ( (SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else () ) A__ : Optional[Any] = ( {"feature-extraction": SwinvaModel, "image-classification": SwinvaForImageClassification} if is_torch_available() else {} ) A__ : List[Any] = False A__ : Tuple = False A__ : int = False A__ : Union[str, Any] = False def A__ ( self: List[str] ) -> Optional[Any]: UpperCAmelCase_ : Any = SwinvaModelTester(self ) UpperCAmelCase_ : str = ConfigTester(self ,config_class=lowerCamelCase_ ,embed_dim=37 ) def A__ ( self: Optional[int] ) -> List[Any]: self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def A__ ( self: Any ) -> Dict: UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCamelCase_ ) @unittest.skip(reason="""Got `CUDA error: misaligned address` with PyTorch 2.0.0.""" ) def A__ ( self: int ) -> Dict: pass @unittest.skip(reason="""Swinv2 does not use inputs_embeds""" ) def A__ ( self: Tuple ) -> List[str]: pass def A__ ( self: str ) -> List[Any]: UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase_ : int = model_class(lowerCamelCase_ ) self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) ) UpperCAmelCase_ : Tuple = model.get_output_embeddings() self.assertTrue(x is None or isinstance(lowerCamelCase_ ,nn.Linear ) ) def A__ ( self: Optional[Any] ) -> Optional[int]: UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase_ : Dict = model_class(lowerCamelCase_ ) UpperCAmelCase_ : Any = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCAmelCase_ : int = [*signature.parameters.keys()] UpperCAmelCase_ : Tuple = ["""pixel_values"""] self.assertListEqual(arg_names[:1] ,lowerCamelCase_ ) def A__ ( self: Union[str, Any] ) -> Optional[Any]: UpperCAmelCase_ , UpperCAmelCase_ : int = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase_ : Any = True for model_class in self.all_model_classes: UpperCAmelCase_ : Optional[Any] = True UpperCAmelCase_ : Union[str, Any] = False UpperCAmelCase_ : str = True UpperCAmelCase_ : List[Any] = model_class(lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() with torch.no_grad(): UpperCAmelCase_ : Optional[int] = model(**self._prepare_for_class(lowerCamelCase_ ,lowerCamelCase_ ) ) UpperCAmelCase_ : Optional[Any] = outputs.attentions UpperCAmelCase_ : List[str] = len(self.model_tester.depths ) self.assertEqual(len(lowerCamelCase_ ) ,lowerCamelCase_ ) # check that output_attentions also work using config del inputs_dict["output_attentions"] UpperCAmelCase_ : str = True UpperCAmelCase_ : Optional[Any] = config.window_size**2 UpperCAmelCase_ : Optional[int] = model_class(lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() with torch.no_grad(): UpperCAmelCase_ : Optional[Any] = model(**self._prepare_for_class(lowerCamelCase_ ,lowerCamelCase_ ) ) UpperCAmelCase_ : List[Any] = outputs.attentions self.assertEqual(len(lowerCamelCase_ ) ,lowerCamelCase_ ) self.assertListEqual( list(attentions[0].shape[-3:] ) ,[self.model_tester.num_heads[0], window_size_squared, window_size_squared] ,) UpperCAmelCase_ : Optional[Any] = len(lowerCamelCase_ ) # Check attention is always last and order is fine UpperCAmelCase_ : Tuple = True UpperCAmelCase_ : List[Any] = True UpperCAmelCase_ : Tuple = model_class(lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() with torch.no_grad(): UpperCAmelCase_ : Union[str, Any] = model(**self._prepare_for_class(lowerCamelCase_ ,lowerCamelCase_ ) ) if hasattr(self.model_tester ,"""num_hidden_states_types""" ): UpperCAmelCase_ : List[Any] = self.model_tester.num_hidden_states_types else: # also another +1 for reshaped_hidden_states UpperCAmelCase_ : List[str] = 2 self.assertEqual(out_len + added_hidden_states ,len(lowerCamelCase_ ) ) UpperCAmelCase_ : Any = outputs.attentions self.assertEqual(len(lowerCamelCase_ ) ,lowerCamelCase_ ) self.assertListEqual( list(self_attentions[0].shape[-3:] ) ,[self.model_tester.num_heads[0], window_size_squared, window_size_squared] ,) def A__ ( self: List[str] ,lowerCamelCase_: Dict ,lowerCamelCase_: Tuple ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: Optional[int] ) -> List[Any]: UpperCAmelCase_ : str = model_class(lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() with torch.no_grad(): UpperCAmelCase_ : int = model(**self._prepare_for_class(lowerCamelCase_ ,lowerCamelCase_ ) ) UpperCAmelCase_ : List[str] = outputs.hidden_states UpperCAmelCase_ : Optional[Any] = getattr( self.model_tester ,"""expected_num_hidden_layers""" ,len(self.model_tester.depths ) + 1 ) self.assertEqual(len(lowerCamelCase_ ) ,lowerCamelCase_ ) # Swinv2 has a different seq_length UpperCAmelCase_ : Optional[Any] = ( config.patch_size if isinstance(config.patch_size ,collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) UpperCAmelCase_ : int = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.assertListEqual( list(hidden_states[0].shape[-2:] ) ,[num_patches, self.model_tester.embed_dim] ,) UpperCAmelCase_ : Optional[int] = outputs.reshaped_hidden_states self.assertEqual(len(lowerCamelCase_ ) ,lowerCamelCase_ ) UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = reshaped_hidden_states[0].shape UpperCAmelCase_ : Optional[Any] = ( reshaped_hidden_states[0].view(lowerCamelCase_ ,lowerCamelCase_ ,height * width ).permute(0 ,2 ,1 ) ) self.assertListEqual( list(reshaped_hidden_states.shape[-2:] ) ,[num_patches, self.model_tester.embed_dim] ,) def A__ ( self: Any ) -> int: UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase_ : Dict = ( self.model_tester.image_size if isinstance(self.model_tester.image_size ,collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) for model_class in self.all_model_classes: UpperCAmelCase_ : Any = True self.check_hidden_states_output(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] UpperCAmelCase_ : str = True self.check_hidden_states_output(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ) def A__ ( self: List[str] ) -> Dict: UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase_ : Union[str, Any] = 3 UpperCAmelCase_ : Optional[int] = ( self.model_tester.image_size if isinstance(self.model_tester.image_size ,collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) UpperCAmelCase_ : List[str] = ( config.patch_size if isinstance(config.patch_size ,collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) UpperCAmelCase_ : List[Any] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0]) UpperCAmelCase_ : Optional[Any] = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1]) for model_class in self.all_model_classes: UpperCAmelCase_ : Optional[Any] = True self.check_hidden_states_output(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,(padded_height, padded_width) ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] UpperCAmelCase_ : List[str] = True self.check_hidden_states_output(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,(padded_height, padded_width) ) def A__ ( self: Optional[int] ) -> str: UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*lowerCamelCase_ ) def A__ ( self: Union[str, Any] ) -> Dict: UpperCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*lowerCamelCase_ ) @slow def A__ ( self: str ) -> Tuple: for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase_ : Dict = SwinvaModel.from_pretrained(lowerCamelCase_ ) self.assertIsNotNone(lowerCamelCase_ ) def A__ ( self: Any ) -> int: UpperCAmelCase_ , UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase_ : List[str] = _config_zero_init(lowerCamelCase_ ) for model_class in self.all_model_classes: UpperCAmelCase_ : int = model_class(config=lowerCamelCase_ ) for name, param in model.named_parameters(): if "embeddings" not in name and "logit_scale" not in name and param.requires_grad: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item() ,[0.0, 1.0] ,msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' ,) @require_vision @require_torch class _snake_case ( unittest.TestCase ): '''simple docstring''' @cached_property def A__ ( self: Dict ) -> Optional[Any]: return ( AutoImageProcessor.from_pretrained("""microsoft/swinv2-tiny-patch4-window8-256""" ) if is_vision_available() else None ) @slow def A__ ( self: str ) -> List[Any]: UpperCAmelCase_ : Tuple = SwinvaForImageClassification.from_pretrained("""microsoft/swinv2-tiny-patch4-window8-256""" ).to( lowerCamelCase_ ) UpperCAmelCase_ : Any = self.default_image_processor UpperCAmelCase_ : List[str] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) UpperCAmelCase_ : Optional[int] = image_processor(images=lowerCamelCase_ ,return_tensors="""pt""" ).to(lowerCamelCase_ ) # forward pass with torch.no_grad(): UpperCAmelCase_ : Optional[Any] = model(**lowerCamelCase_ ) # verify the logits UpperCAmelCase_ : Dict = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape ,lowerCamelCase_ ) UpperCAmelCase_ : Any = torch.tensor([-0.3_9_4_7, -0.4_3_0_6, 0.0_0_2_6] ).to(lowerCamelCase_ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] ,lowerCamelCase_ ,atol=1e-4 ) )
345
1
from typing import List, Optional, Tuple, Union import torch from ...utils import logging, randn_tensor from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline UpperCamelCase_ = logging.get_logger(__name__) # pylint: disable=invalid-name class _snake_case ( __snake_case ): '''simple docstring''' def __init__( self: List[Any] ,lowerCamelCase_: int ,lowerCamelCase_: Optional[int] ) -> Any: super().__init__() self.register_modules(unet=lowerCamelCase_ ,scheduler=lowerCamelCase_ ) @torch.no_grad() def __call__( self: Dict ,lowerCamelCase_: int = 1 ,lowerCamelCase_: int = 100 ,lowerCamelCase_: Optional[Union[torch.Generator, List[torch.Generator]]] = None ,lowerCamelCase_: Optional[float] = None ,lowerCamelCase_: bool = True ,) -> Union[AudioPipelineOutput, Tuple]: if audio_length_in_s is None: UpperCAmelCase_ : int = self.unet.config.sample_size / self.unet.config.sample_rate UpperCAmelCase_ : Tuple = audio_length_in_s * self.unet.config.sample_rate UpperCAmelCase_ : Tuple = 2 ** len(self.unet.up_blocks ) if sample_size < 3 * down_scale_factor: raise ValueError( F'''{audio_length_in_s} is too small. Make sure it\'s bigger or equal to''' F''' {3 * down_scale_factor / self.unet.config.sample_rate}.''' ) UpperCAmelCase_ : List[str] = int(lowerCamelCase_ ) if sample_size % down_scale_factor != 0: UpperCAmelCase_ : Dict = ( (audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1 ) * down_scale_factor logger.info( F'''{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled''' F''' by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising''' """ process.""" ) UpperCAmelCase_ : Dict = int(lowerCamelCase_ ) UpperCAmelCase_ : List[str] = next(iter(self.unet.parameters() ) ).dtype UpperCAmelCase_ : int = (batch_size, self.unet.config.in_channels, sample_size) if isinstance(lowerCamelCase_ ,lowerCamelCase_ ) and len(lowerCamelCase_ ) != batch_size: raise ValueError( F'''You have passed a list of generators of length {len(lowerCamelCase_ )}, but requested an effective batch''' F''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' ) UpperCAmelCase_ : Dict = randn_tensor(lowerCamelCase_ ,generator=lowerCamelCase_ ,device=self.device ,dtype=lowerCamelCase_ ) # set step values self.scheduler.set_timesteps(lowerCamelCase_ ,device=audio.device ) UpperCAmelCase_ : Union[str, Any] = self.scheduler.timesteps.to(lowerCamelCase_ ) for t in self.progress_bar(self.scheduler.timesteps ): # 1. predict noise model_output UpperCAmelCase_ : str = self.unet(lowerCamelCase_ ,lowerCamelCase_ ).sample # 2. compute previous image: x_t -> t_t-1 UpperCAmelCase_ : Union[str, Any] = self.scheduler.step(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ).prev_sample UpperCAmelCase_ : Any = audio.clamp(-1 ,1 ).float().cpu().numpy() UpperCAmelCase_ : List[str] = audio[:, :, :original_sample_size] if not return_dict: return (audio,) return AudioPipelineOutput(audios=lowerCamelCase_ )
345
import json import os from functools import lru_cache from typing import Dict, List, Optional, Tuple, Union import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...tokenization_utils_base import BatchEncoding, EncodedInput from ...utils import PaddingStrategy, logging UpperCamelCase_ = logging.get_logger(__name__) UpperCamelCase_ = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt'''} # See all LED models at https://huggingface.co/models?filter=LED UpperCamelCase_ = { '''vocab_file''': { '''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json''', }, '''merges_file''': { '''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt''', }, '''tokenizer_file''': { '''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json''', }, } UpperCamelCase_ = { '''allenai/led-base-16384''': 16384, } @lru_cache() # Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode def lowerCamelCase_ ( ): '''simple docstring''' UpperCAmelCase_ : int = ( list(range(ord("""!""" ) , ord("""~""" ) + 1 ) ) + list(range(ord("""¡""" ) , ord("""¬""" ) + 1 ) ) + list(range(ord("""®""" ) , ord("""ÿ""" ) + 1 ) ) ) UpperCAmelCase_ : Dict = bs[:] UpperCAmelCase_ : Any = 0 for b in range(2**8 ): if b not in bs: bs.append(_a ) cs.append(2**8 + n ) n += 1 UpperCAmelCase_ : Any = [chr(_a ) for n in cs] return dict(zip(_a , _a ) ) def lowerCamelCase_ ( _a : List[str] ): '''simple docstring''' UpperCAmelCase_ : Union[str, Any] = set() UpperCAmelCase_ : List[Any] = word[0] for char in word[1:]: pairs.add((prev_char, char) ) UpperCAmelCase_ : Optional[int] = char return pairs class _snake_case ( __snake_case ): '''simple docstring''' A__ : str = VOCAB_FILES_NAMES A__ : List[str] = PRETRAINED_VOCAB_FILES_MAP A__ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES A__ : Optional[int] = ["input_ids", "attention_mask"] def __init__( self: Union[str, Any] ,lowerCamelCase_: Tuple ,lowerCamelCase_: Any ,lowerCamelCase_: Union[str, Any]="replace" ,lowerCamelCase_: Optional[Any]="<s>" ,lowerCamelCase_: List[Any]="</s>" ,lowerCamelCase_: List[str]="</s>" ,lowerCamelCase_: int="<s>" ,lowerCamelCase_: int="<unk>" ,lowerCamelCase_: str="<pad>" ,lowerCamelCase_: Optional[Any]="<mask>" ,lowerCamelCase_: List[str]=False ,**lowerCamelCase_: Tuple ,) -> Any: UpperCAmelCase_ : Union[str, Any] = AddedToken(lowerCamelCase_ ,lstrip=lowerCamelCase_ ,rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ ,lowerCamelCase_ ) else bos_token UpperCAmelCase_ : int = AddedToken(lowerCamelCase_ ,lstrip=lowerCamelCase_ ,rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ ,lowerCamelCase_ ) else eos_token UpperCAmelCase_ : List[str] = AddedToken(lowerCamelCase_ ,lstrip=lowerCamelCase_ ,rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ ,lowerCamelCase_ ) else sep_token UpperCAmelCase_ : List[str] = AddedToken(lowerCamelCase_ ,lstrip=lowerCamelCase_ ,rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ ,lowerCamelCase_ ) else cls_token UpperCAmelCase_ : Optional[Any] = AddedToken(lowerCamelCase_ ,lstrip=lowerCamelCase_ ,rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ ,lowerCamelCase_ ) else unk_token UpperCAmelCase_ : List[str] = AddedToken(lowerCamelCase_ ,lstrip=lowerCamelCase_ ,rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ ,lowerCamelCase_ ) else pad_token # Mask token behave like a normal word, i.e. include the space before it UpperCAmelCase_ : str = AddedToken(lowerCamelCase_ ,lstrip=lowerCamelCase_ ,rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ ,lowerCamelCase_ ) else mask_token super().__init__( errors=lowerCamelCase_ ,bos_token=lowerCamelCase_ ,eos_token=lowerCamelCase_ ,unk_token=lowerCamelCase_ ,sep_token=lowerCamelCase_ ,cls_token=lowerCamelCase_ ,pad_token=lowerCamelCase_ ,mask_token=lowerCamelCase_ ,add_prefix_space=lowerCamelCase_ ,**lowerCamelCase_ ,) with open(lowerCamelCase_ ,encoding="""utf-8""" ) as vocab_handle: UpperCAmelCase_ : Union[str, Any] = json.load(lowerCamelCase_ ) UpperCAmelCase_ : Optional[int] = {v: k for k, v in self.encoder.items()} UpperCAmelCase_ : Any = errors # how to handle errors in decoding UpperCAmelCase_ : int = bytes_to_unicode() UpperCAmelCase_ : Dict = {v: k for k, v in self.byte_encoder.items()} with open(lowerCamelCase_ ,encoding="""utf-8""" ) as merges_handle: UpperCAmelCase_ : Any = merges_handle.read().split("""\n""" )[1:-1] UpperCAmelCase_ : int = [tuple(merge.split() ) for merge in bpe_merges] UpperCAmelCase_ : Union[str, Any] = dict(zip(lowerCamelCase_ ,range(len(lowerCamelCase_ ) ) ) ) UpperCAmelCase_ : Tuple = {} UpperCAmelCase_ : Optional[int] = add_prefix_space # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions UpperCAmelCase_ : int = re.compile(R"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""" ) @property # Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size def A__ ( self: List[str] ) -> List[str]: return len(self.encoder ) def A__ ( self: Any ) -> Union[str, Any]: return dict(self.encoder ,**self.added_tokens_encoder ) def A__ ( self: Tuple ,lowerCamelCase_: Dict ) -> Optional[Any]: if token in self.cache: return self.cache[token] UpperCAmelCase_ : Union[str, Any] = tuple(lowerCamelCase_ ) UpperCAmelCase_ : Union[str, Any] = get_pairs(lowerCamelCase_ ) if not pairs: return token while True: UpperCAmelCase_ : Union[str, Any] = min(lowerCamelCase_ ,key=lambda lowerCamelCase_ : self.bpe_ranks.get(lowerCamelCase_ ,float("""inf""" ) ) ) if bigram not in self.bpe_ranks: break UpperCAmelCase_ , UpperCAmelCase_ : Any = bigram UpperCAmelCase_ : Optional[Any] = [] UpperCAmelCase_ : List[str] = 0 while i < len(lowerCamelCase_ ): try: UpperCAmelCase_ : str = word.index(lowerCamelCase_ ,lowerCamelCase_ ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) UpperCAmelCase_ : Union[str, Any] = j if word[i] == first and i < len(lowerCamelCase_ ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 UpperCAmelCase_ : List[str] = tuple(lowerCamelCase_ ) UpperCAmelCase_ : List[Any] = new_word if len(lowerCamelCase_ ) == 1: break else: UpperCAmelCase_ : List[str] = get_pairs(lowerCamelCase_ ) UpperCAmelCase_ : int = """ """.join(lowerCamelCase_ ) UpperCAmelCase_ : Optional[Any] = word return word def A__ ( self: Union[str, Any] ,lowerCamelCase_: Tuple ) -> List[str]: UpperCAmelCase_ : str = [] for token in re.findall(self.pat ,lowerCamelCase_ ): UpperCAmelCase_ : List[Any] = """""".join( self.byte_encoder[b] for b in token.encode("""utf-8""" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowerCamelCase_ ).split(""" """ ) ) return bpe_tokens def A__ ( self: List[Any] ,lowerCamelCase_: Optional[Any] ) -> Optional[int]: return self.encoder.get(lowerCamelCase_ ,self.encoder.get(self.unk_token ) ) def A__ ( self: List[str] ,lowerCamelCase_: str ) -> Optional[Any]: return self.decoder.get(lowerCamelCase_ ) def A__ ( self: List[str] ,lowerCamelCase_: List[str] ) -> List[Any]: UpperCAmelCase_ : str = """""".join(lowerCamelCase_ ) UpperCAmelCase_ : int = bytearray([self.byte_decoder[c] for c in text] ).decode("""utf-8""" ,errors=self.errors ) return text def A__ ( self: Optional[Any] ,lowerCamelCase_: str ,lowerCamelCase_: Optional[str] = None ) -> Tuple[str]: if not os.path.isdir(lowerCamelCase_ ): logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' ) return UpperCAmelCase_ : List[Any] = os.path.join( lowerCamelCase_ ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) UpperCAmelCase_ : List[str] = os.path.join( lowerCamelCase_ ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] ) with open(lowerCamelCase_ ,"""w""" ,encoding="""utf-8""" ) as f: f.write(json.dumps(self.encoder ,indent=2 ,sort_keys=lowerCamelCase_ ,ensure_ascii=lowerCamelCase_ ) + """\n""" ) UpperCAmelCase_ : str = 0 with open(lowerCamelCase_ ,"""w""" ,encoding="""utf-8""" ) as writer: writer.write("""#version: 0.2\n""" ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() ,key=lambda lowerCamelCase_ : kv[1] ): if index != token_index: logger.warning( F'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.''' """ Please check that the tokenizer is not corrupted!""" ) UpperCAmelCase_ : Tuple = token_index writer.write(""" """.join(lowerCamelCase_ ) + """\n""" ) index += 1 return vocab_file, merge_file def A__ ( self: str ,lowerCamelCase_: List[int] ,lowerCamelCase_: Optional[List[int]] = None ) -> List[int]: if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] UpperCAmelCase_ : int = [self.cls_token_id] UpperCAmelCase_ : Optional[int] = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def A__ ( self: Union[str, Any] ,lowerCamelCase_: List[int] ,lowerCamelCase_: Optional[List[int]] = None ,lowerCamelCase_: bool = False ) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=lowerCamelCase_ ,token_ids_a=lowerCamelCase_ ,already_has_special_tokens=lowerCamelCase_ ) if token_ids_a is None: return [1] + ([0] * len(lowerCamelCase_ )) + [1] return [1] + ([0] * len(lowerCamelCase_ )) + [1, 1] + ([0] * len(lowerCamelCase_ )) + [1] def A__ ( self: str ,lowerCamelCase_: List[int] ,lowerCamelCase_: Optional[List[int]] = None ) -> List[int]: UpperCAmelCase_ : Optional[Any] = [self.sep_token_id] UpperCAmelCase_ : Optional[Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def A__ ( self: Optional[Any] ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: str=False ,**lowerCamelCase_: List[str] ) -> Optional[int]: UpperCAmelCase_ : Optional[int] = kwargs.pop("""add_prefix_space""" ,self.add_prefix_space ) if (is_split_into_words or add_prefix_space) and (len(lowerCamelCase_ ) > 0 and not text[0].isspace()): UpperCAmelCase_ : Dict = """ """ + text return (text, kwargs) def A__ ( self: List[str] ,lowerCamelCase_: Union[Dict[str, EncodedInput], BatchEncoding] ,lowerCamelCase_: Optional[int] = None ,lowerCamelCase_: PaddingStrategy = PaddingStrategy.DO_NOT_PAD ,lowerCamelCase_: Optional[int] = None ,lowerCamelCase_: Optional[bool] = None ,) -> dict: UpperCAmelCase_ : Optional[int] = super()._pad( encoded_inputs=lowerCamelCase_ ,max_length=lowerCamelCase_ ,padding_strategy=lowerCamelCase_ ,pad_to_multiple_of=lowerCamelCase_ ,return_attention_mask=lowerCamelCase_ ,) # Load from model defaults if return_attention_mask is None: UpperCAmelCase_ : str = """attention_mask""" in self.model_input_names if return_attention_mask and "global_attention_mask" in encoded_inputs: UpperCAmelCase_ : str = encoded_inputs[self.model_input_names[0]] # `global_attention_mask` need to have the same length as other (sequential) inputs. UpperCAmelCase_ : List[Any] = len(encoded_inputs["""global_attention_mask"""] ) != len(lowerCamelCase_ ) if needs_to_be_padded: UpperCAmelCase_ : Dict = len(lowerCamelCase_ ) - len(encoded_inputs["""global_attention_mask"""] ) if self.padding_side == "right": # Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend` UpperCAmelCase_ : str = ( encoded_inputs["""global_attention_mask"""] + [-1] * difference ) elif self.padding_side == "left": UpperCAmelCase_ : List[str] = [-1] * difference + encoded_inputs[ """global_attention_mask""" ] else: raise ValueError("""Invalid padding strategy:""" + str(self.padding_side ) ) return encoded_inputs
345
1
import logging from transformers.configuration_utils import PretrainedConfig UpperCamelCase_ = logging.getLogger(__name__) class _snake_case ( __snake_case ): '''simple docstring''' A__ : Optional[Any] = "masked_bert" def __init__( self: Optional[Any] ,lowerCamelCase_: Union[str, Any]=30522 ,lowerCamelCase_: Union[str, Any]=768 ,lowerCamelCase_: List[Any]=12 ,lowerCamelCase_: List[Any]=12 ,lowerCamelCase_: Tuple=3072 ,lowerCamelCase_: Optional[Any]="gelu" ,lowerCamelCase_: Optional[int]=0.1 ,lowerCamelCase_: List[str]=0.1 ,lowerCamelCase_: Union[str, Any]=512 ,lowerCamelCase_: Optional[int]=2 ,lowerCamelCase_: Optional[int]=0.0_2 ,lowerCamelCase_: Any=1e-12 ,lowerCamelCase_: Optional[Any]=0 ,lowerCamelCase_: Dict="topK" ,lowerCamelCase_: str="constant" ,lowerCamelCase_: Optional[int]=0.0 ,**lowerCamelCase_: List[str] ,) -> str: super().__init__(pad_token_id=lowerCamelCase_ ,**lowerCamelCase_ ) UpperCAmelCase_ : Optional[int] = vocab_size UpperCAmelCase_ : List[Any] = hidden_size UpperCAmelCase_ : Dict = num_hidden_layers UpperCAmelCase_ : Union[str, Any] = num_attention_heads UpperCAmelCase_ : Tuple = hidden_act UpperCAmelCase_ : Any = intermediate_size UpperCAmelCase_ : Any = hidden_dropout_prob UpperCAmelCase_ : Optional[int] = attention_probs_dropout_prob UpperCAmelCase_ : int = max_position_embeddings UpperCAmelCase_ : List[str] = type_vocab_size UpperCAmelCase_ : List[str] = initializer_range UpperCAmelCase_ : Optional[Any] = layer_norm_eps UpperCAmelCase_ : int = pruning_method UpperCAmelCase_ : Optional[Any] = mask_init UpperCAmelCase_ : Tuple = mask_scale
345
import gc import random import tempfile import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline from diffusers.utils import floats_tensor, nightly, torch_device from diffusers.utils.testing_utils import require_torch_gpu class _snake_case ( unittest.TestCase ): '''simple docstring''' def A__ ( self: Union[str, Any] ) -> Union[str, Any]: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() @property def A__ ( self: List[str] ) -> Dict: UpperCAmelCase_ : Union[str, Any] = 1 UpperCAmelCase_ : Tuple = 3 UpperCAmelCase_ : Optional[Any] = (32, 32) UpperCAmelCase_ : Optional[int] = floats_tensor((batch_size, num_channels) + sizes ,rng=random.Random(0 ) ).to(lowerCamelCase_ ) return image @property def A__ ( self: List[Any] ) -> Optional[Any]: torch.manual_seed(0 ) UpperCAmelCase_ : int = UNetaDConditionModel( block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") ,up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") ,cross_attention_dim=32 ,) return model @property def A__ ( self: str ) -> List[str]: torch.manual_seed(0 ) UpperCAmelCase_ : Optional[int] = AutoencoderKL( block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] ,up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] ,latent_channels=4 ,) return model @property def A__ ( self: Optional[int] ) -> int: torch.manual_seed(0 ) UpperCAmelCase_ : Dict = CLIPTextConfig( bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1e-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1000 ,) return CLIPTextModel(lowerCamelCase_ ) @property def A__ ( self: Tuple ) -> Tuple: def extract(*lowerCamelCase_: Optional[Any] ,**lowerCamelCase_: str ): class _snake_case : '''simple docstring''' def __init__( self: List[Any] ) -> Optional[Any]: UpperCAmelCase_ : List[str] = torch.ones([0] ) def A__ ( self: List[Any] ,lowerCamelCase_: str ) -> int: self.pixel_values.to(lowerCamelCase_ ) return self return Out() return extract def A__ ( self: Union[str, Any] ) -> Tuple: UpperCAmelCase_ : int = """cpu""" # ensure determinism for the device-dependent torch.Generator UpperCAmelCase_ : int = self.dummy_cond_unet UpperCAmelCase_ : Optional[Any] = DDIMScheduler( beta_start=0.0_0_0_8_5 ,beta_end=0.0_1_2 ,beta_schedule="""scaled_linear""" ,clip_sample=lowerCamelCase_ ,set_alpha_to_one=lowerCamelCase_ ,) UpperCAmelCase_ : str = self.dummy_vae UpperCAmelCase_ : List[str] = self.dummy_text_encoder UpperCAmelCase_ : int = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) # make sure here that pndm scheduler skips prk UpperCAmelCase_ : str = StableDiffusionPipeline( unet=lowerCamelCase_ ,scheduler=lowerCamelCase_ ,vae=lowerCamelCase_ ,text_encoder=lowerCamelCase_ ,tokenizer=lowerCamelCase_ ,safety_checker=lowerCamelCase_ ,feature_extractor=self.dummy_extractor ,) UpperCAmelCase_ : List[str] = sd_pipe.to(lowerCamelCase_ ) sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ ) UpperCAmelCase_ : List[str] = """A painting of a squirrel eating a burger""" UpperCAmelCase_ : str = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 ) UpperCAmelCase_ : int = sd_pipe([prompt] ,generator=lowerCamelCase_ ,guidance_scale=6.0 ,num_inference_steps=2 ,output_type="""np""" ) UpperCAmelCase_ : List[Any] = output.images UpperCAmelCase_ : str = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 ) UpperCAmelCase_ : Dict = sd_pipe( [prompt] ,generator=lowerCamelCase_ ,guidance_scale=6.0 ,num_inference_steps=2 ,output_type="""np""" ,return_dict=lowerCamelCase_ ,)[0] UpperCAmelCase_ : int = image[0, -3:, -3:, -1] UpperCAmelCase_ : Union[str, Any] = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) UpperCAmelCase_ : Tuple = np.array([0.5_7_5_6, 0.6_1_1_8, 0.5_0_0_5, 0.5_0_4_1, 0.5_4_7_1, 0.4_7_2_6, 0.4_9_7_6, 0.4_8_6_5, 0.4_8_6_4] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 def A__ ( self: Optional[Any] ) -> Any: UpperCAmelCase_ : Tuple = """cpu""" # ensure determinism for the device-dependent torch.Generator UpperCAmelCase_ : Dict = self.dummy_cond_unet UpperCAmelCase_ : List[Any] = PNDMScheduler(skip_prk_steps=lowerCamelCase_ ) UpperCAmelCase_ : str = self.dummy_vae UpperCAmelCase_ : Union[str, Any] = self.dummy_text_encoder UpperCAmelCase_ : str = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) # make sure here that pndm scheduler skips prk UpperCAmelCase_ : Any = StableDiffusionPipeline( unet=lowerCamelCase_ ,scheduler=lowerCamelCase_ ,vae=lowerCamelCase_ ,text_encoder=lowerCamelCase_ ,tokenizer=lowerCamelCase_ ,safety_checker=lowerCamelCase_ ,feature_extractor=self.dummy_extractor ,) UpperCAmelCase_ : int = sd_pipe.to(lowerCamelCase_ ) sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ ) UpperCAmelCase_ : Optional[Any] = """A painting of a squirrel eating a burger""" UpperCAmelCase_ : Optional[Any] = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 ) UpperCAmelCase_ : Optional[Any] = sd_pipe([prompt] ,generator=lowerCamelCase_ ,guidance_scale=6.0 ,num_inference_steps=2 ,output_type="""np""" ) UpperCAmelCase_ : str = output.images UpperCAmelCase_ : Union[str, Any] = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 ) UpperCAmelCase_ : int = sd_pipe( [prompt] ,generator=lowerCamelCase_ ,guidance_scale=6.0 ,num_inference_steps=2 ,output_type="""np""" ,return_dict=lowerCamelCase_ ,)[0] UpperCAmelCase_ : Dict = image[0, -3:, -3:, -1] UpperCAmelCase_ : List[Any] = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) UpperCAmelCase_ : Tuple = np.array([0.5_1_2_5, 0.5_7_1_6, 0.4_8_2_8, 0.5_0_6_0, 0.5_6_5_0, 0.4_7_6_8, 0.5_1_8_5, 0.4_8_9_5, 0.4_9_9_3] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 def A__ ( self: str ) -> Dict: UpperCAmelCase_ : Any = StableDiffusionPipeline.from_pretrained( """hf-internal-testing/tiny-stable-diffusion-lms-pipe""" ,safety_checker=lowerCamelCase_ ) assert isinstance(lowerCamelCase_ ,lowerCamelCase_ ) assert isinstance(pipe.scheduler ,lowerCamelCase_ ) assert pipe.safety_checker is None UpperCAmelCase_ : List[Any] = pipe("""example prompt""" ,num_inference_steps=2 ).images[0] assert image is not None # check that there's no error when saving a pipeline with one of the models being None with tempfile.TemporaryDirectory() as tmpdirname: pipe.save_pretrained(lowerCamelCase_ ) UpperCAmelCase_ : Any = StableDiffusionPipeline.from_pretrained(lowerCamelCase_ ) # sanity check that the pipeline still works assert pipe.safety_checker is None UpperCAmelCase_ : Optional[int] = pipe("""example prompt""" ,num_inference_steps=2 ).images[0] assert image is not None @unittest.skipIf(torch_device != """cuda""" ,"""This test requires a GPU""" ) def A__ ( self: List[str] ) -> Any: UpperCAmelCase_ : Tuple = self.dummy_cond_unet UpperCAmelCase_ : Dict = PNDMScheduler(skip_prk_steps=lowerCamelCase_ ) UpperCAmelCase_ : List[Any] = self.dummy_vae UpperCAmelCase_ : List[str] = self.dummy_text_encoder UpperCAmelCase_ : Union[str, Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) # put models in fp16 UpperCAmelCase_ : Optional[Any] = unet.half() UpperCAmelCase_ : Optional[int] = vae.half() UpperCAmelCase_ : int = bert.half() # make sure here that pndm scheduler skips prk UpperCAmelCase_ : Any = StableDiffusionPipeline( unet=lowerCamelCase_ ,scheduler=lowerCamelCase_ ,vae=lowerCamelCase_ ,text_encoder=lowerCamelCase_ ,tokenizer=lowerCamelCase_ ,safety_checker=lowerCamelCase_ ,feature_extractor=self.dummy_extractor ,) UpperCAmelCase_ : List[Any] = sd_pipe.to(lowerCamelCase_ ) sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ ) UpperCAmelCase_ : Tuple = """A painting of a squirrel eating a burger""" UpperCAmelCase_ : Optional[int] = sd_pipe([prompt] ,num_inference_steps=2 ,output_type="""np""" ).images assert image.shape == (1, 64, 64, 3) @nightly @require_torch_gpu class _snake_case ( unittest.TestCase ): '''simple docstring''' def A__ ( self: Optional[int] ) -> Optional[Any]: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def A__ ( self: List[str] ) -> List[Any]: UpperCAmelCase_ : Tuple = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" ,safety_checker=lowerCamelCase_ ) UpperCAmelCase_ : Optional[int] = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config ) UpperCAmelCase_ : str = sd_pipe.to(lowerCamelCase_ ) sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ ) UpperCAmelCase_ : str = ( """portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle""" """ coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with""" """ anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and""" """ children from bahnhof zoo, detailed """ ) UpperCAmelCase_ : Optional[int] = 4003660346 UpperCAmelCase_ : int = 7 # without safety guidance (sld_guidance_scale = 0) UpperCAmelCase_ : Dict = torch.manual_seed(lowerCamelCase_ ) UpperCAmelCase_ : List[Any] = sd_pipe( [prompt] ,generator=lowerCamelCase_ ,guidance_scale=lowerCamelCase_ ,num_inference_steps=50 ,output_type="""np""" ,width=512 ,height=512 ,sld_guidance_scale=0 ,) UpperCAmelCase_ : Optional[int] = output.images UpperCAmelCase_ : Union[str, Any] = image[0, -3:, -3:, -1] UpperCAmelCase_ : Dict = [0.2_2_7_8, 0.2_2_3_1, 0.2_2_4_9, 0.2_3_3_3, 0.2_3_0_3, 0.1_8_8_5, 0.2_2_7_3, 0.2_1_4_4, 0.2_1_7_6] assert image.shape == (1, 512, 512, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 # without safety guidance (strong configuration) UpperCAmelCase_ : Union[str, Any] = torch.manual_seed(lowerCamelCase_ ) UpperCAmelCase_ : Any = sd_pipe( [prompt] ,generator=lowerCamelCase_ ,guidance_scale=lowerCamelCase_ ,num_inference_steps=50 ,output_type="""np""" ,width=512 ,height=512 ,sld_guidance_scale=2000 ,sld_warmup_steps=7 ,sld_threshold=0.0_2_5 ,sld_momentum_scale=0.5 ,sld_mom_beta=0.7 ,) UpperCAmelCase_ : Tuple = output.images UpperCAmelCase_ : Union[str, Any] = image[0, -3:, -3:, -1] UpperCAmelCase_ : str = [0.2_3_8_3, 0.2_2_7_6, 0.2_3_6, 0.2_1_9_2, 0.2_1_8_6, 0.2_0_5_3, 0.1_9_7_1, 0.1_9_0_1, 0.1_7_1_9] assert image.shape == (1, 512, 512, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def A__ ( self: Optional[int] ) -> Any: UpperCAmelCase_ : Any = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" ,safety_checker=lowerCamelCase_ ) UpperCAmelCase_ : Any = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config ) UpperCAmelCase_ : str = sd_pipe.to(lowerCamelCase_ ) sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ ) UpperCAmelCase_ : Any = """padme amidala taking a bath artwork, safe for work, no nudity""" UpperCAmelCase_ : List[Any] = 2734971755 UpperCAmelCase_ : Optional[Any] = 7 UpperCAmelCase_ : int = torch.manual_seed(lowerCamelCase_ ) UpperCAmelCase_ : Optional[int] = sd_pipe( [prompt] ,generator=lowerCamelCase_ ,guidance_scale=lowerCamelCase_ ,num_inference_steps=50 ,output_type="""np""" ,width=512 ,height=512 ,sld_guidance_scale=0 ,) UpperCAmelCase_ : Dict = output.images UpperCAmelCase_ : Tuple = image[0, -3:, -3:, -1] UpperCAmelCase_ : Optional[Any] = [0.3_5_0_2, 0.3_6_2_2, 0.3_3_9_6, 0.3_6_4_2, 0.3_4_7_8, 0.3_3_1_8, 0.3_5, 0.3_3_4_8, 0.3_2_9_7] assert image.shape == (1, 512, 512, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 UpperCAmelCase_ : Any = torch.manual_seed(lowerCamelCase_ ) UpperCAmelCase_ : Tuple = sd_pipe( [prompt] ,generator=lowerCamelCase_ ,guidance_scale=lowerCamelCase_ ,num_inference_steps=50 ,output_type="""np""" ,width=512 ,height=512 ,sld_guidance_scale=2000 ,sld_warmup_steps=7 ,sld_threshold=0.0_2_5 ,sld_momentum_scale=0.5 ,sld_mom_beta=0.7 ,) UpperCAmelCase_ : Dict = output.images UpperCAmelCase_ : List[Any] = image[0, -3:, -3:, -1] UpperCAmelCase_ : Tuple = [0.5_5_3_1, 0.5_2_0_6, 0.4_8_9_5, 0.5_1_5_6, 0.5_1_8_2, 0.4_7_5_1, 0.4_8_0_2, 0.4_8_0_3, 0.4_4_4_3] assert image.shape == (1, 512, 512, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def A__ ( self: Union[str, Any] ) -> int: UpperCAmelCase_ : List[Any] = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" ) UpperCAmelCase_ : List[str] = sd_pipe.to(lowerCamelCase_ ) sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ ) UpperCAmelCase_ : Any = ( """the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c.""" """ leyendecker""" ) UpperCAmelCase_ : Optional[Any] = 1044355234 UpperCAmelCase_ : List[str] = 12 UpperCAmelCase_ : List[Any] = torch.manual_seed(lowerCamelCase_ ) UpperCAmelCase_ : List[Any] = sd_pipe( [prompt] ,generator=lowerCamelCase_ ,guidance_scale=lowerCamelCase_ ,num_inference_steps=50 ,output_type="""np""" ,width=512 ,height=512 ,sld_guidance_scale=0 ,) UpperCAmelCase_ : Any = output.images UpperCAmelCase_ : Dict = image[0, -3:, -3:, -1] UpperCAmelCase_ : Optional[Any] = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] ) assert image.shape == (1, 512, 512, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-7 UpperCAmelCase_ : Optional[int] = torch.manual_seed(lowerCamelCase_ ) UpperCAmelCase_ : Optional[Any] = sd_pipe( [prompt] ,generator=lowerCamelCase_ ,guidance_scale=lowerCamelCase_ ,num_inference_steps=50 ,output_type="""np""" ,width=512 ,height=512 ,sld_guidance_scale=2000 ,sld_warmup_steps=7 ,sld_threshold=0.0_2_5 ,sld_momentum_scale=0.5 ,sld_mom_beta=0.7 ,) UpperCAmelCase_ : List[str] = output.images UpperCAmelCase_ : Any = image[0, -3:, -3:, -1] UpperCAmelCase_ : Any = np.array([0.5_8_1_8, 0.6_2_8_5, 0.6_8_3_5, 0.6_0_1_9, 0.6_2_5, 0.6_7_5_4, 0.6_0_9_6, 0.6_3_3_4, 0.6_5_6_1] ) assert image.shape == (1, 512, 512, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
345
1
import copy import random from transformers import CLIPTokenizer class _snake_case ( __snake_case ): '''simple docstring''' def __init__( self: str ,*lowerCamelCase_: int ,**lowerCamelCase_: Optional[Any] ) -> str: super().__init__(*lowerCamelCase_ ,**lowerCamelCase_ ) UpperCAmelCase_ : List[Any] = {} def A__ ( self: str ,lowerCamelCase_: str ,*lowerCamelCase_: List[Any] ,**lowerCamelCase_: Dict ) -> Union[str, Any]: UpperCAmelCase_ : Union[str, Any] = super().add_tokens(lowerCamelCase_ ,*lowerCamelCase_ ,**lowerCamelCase_ ) if num_added_tokens == 0: raise ValueError( F'''The tokenizer already contains the token {placeholder_token}. Please pass a different''' """ `placeholder_token` that is not already in the tokenizer.""" ) def A__ ( self: List[Any] ,lowerCamelCase_: Dict ,*lowerCamelCase_: List[str] ,lowerCamelCase_: List[Any]=1 ,**lowerCamelCase_: List[str] ) -> List[str]: UpperCAmelCase_ : str = [] if num_vec_per_token == 1: self.try_adding_tokens(lowerCamelCase_ ,*lowerCamelCase_ ,**lowerCamelCase_ ) output.append(lowerCamelCase_ ) else: UpperCAmelCase_ : Optional[int] = [] for i in range(lowerCamelCase_ ): UpperCAmelCase_ : int = placeholder_token + F'''_{i}''' self.try_adding_tokens(lowerCamelCase_ ,*lowerCamelCase_ ,**lowerCamelCase_ ) output.append(lowerCamelCase_ ) # handle cases where there is a new placeholder token that contains the current placeholder token but is larger for token in self.token_map: if token in placeholder_token: raise ValueError( F'''The tokenizer already has placeholder token {token} that can get confused with''' F''' {placeholder_token}keep placeholder tokens independent''' ) UpperCAmelCase_ : Optional[int] = output def A__ ( self: int ,lowerCamelCase_: List[str] ,lowerCamelCase_: int=False ,lowerCamelCase_: Optional[Any]=1.0 ) -> Tuple: if isinstance(lowerCamelCase_ ,lowerCamelCase_ ): UpperCAmelCase_ : List[Any] = [] for i in range(len(lowerCamelCase_ ) ): output.append(self.replace_placeholder_tokens_in_text(text[i] ,vector_shuffle=lowerCamelCase_ ) ) return output for placeholder_token in self.token_map: if placeholder_token in text: UpperCAmelCase_ : Tuple = self.token_map[placeholder_token] UpperCAmelCase_ : List[str] = tokens[: 1 + int(len(lowerCamelCase_ ) * prop_tokens_to_load )] if vector_shuffle: UpperCAmelCase_ : Union[str, Any] = copy.copy(lowerCamelCase_ ) random.shuffle(lowerCamelCase_ ) UpperCAmelCase_ : List[Any] = text.replace(lowerCamelCase_ ,""" """.join(lowerCamelCase_ ) ) return text def __call__( self: Any ,lowerCamelCase_: str ,*lowerCamelCase_: Dict ,lowerCamelCase_: Optional[Any]=False ,lowerCamelCase_: Union[str, Any]=1.0 ,**lowerCamelCase_: List[Any] ) -> Optional[Any]: return super().__call__( self.replace_placeholder_tokens_in_text( lowerCamelCase_ ,vector_shuffle=lowerCamelCase_ ,prop_tokens_to_load=lowerCamelCase_ ) ,*lowerCamelCase_ ,**lowerCamelCase_ ,) def A__ ( self: Tuple ,lowerCamelCase_: Any ,*lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: Optional[Any]=False ,lowerCamelCase_: str=1.0 ,**lowerCamelCase_: Union[str, Any] ) -> str: return super().encode( self.replace_placeholder_tokens_in_text( lowerCamelCase_ ,vector_shuffle=lowerCamelCase_ ,prop_tokens_to_load=lowerCamelCase_ ) ,*lowerCamelCase_ ,**lowerCamelCase_ ,)
345
import unittest from transformers import MobileBertConfig, is_torch_available from transformers.models.auto import get_values from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_PRETRAINING_MAPPING, MobileBertForMaskedLM, MobileBertForMultipleChoice, MobileBertForNextSentencePrediction, MobileBertForPreTraining, MobileBertForQuestionAnswering, MobileBertForSequenceClassification, MobileBertForTokenClassification, MobileBertModel, ) class _snake_case : '''simple docstring''' def __init__( self: Optional[int] ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: Tuple=13 ,lowerCamelCase_: int=7 ,lowerCamelCase_: Union[str, Any]=True ,lowerCamelCase_: Dict=True ,lowerCamelCase_: str=True ,lowerCamelCase_: Tuple=True ,lowerCamelCase_: int=99 ,lowerCamelCase_: List[str]=64 ,lowerCamelCase_: Tuple=32 ,lowerCamelCase_: List[str]=5 ,lowerCamelCase_: str=4 ,lowerCamelCase_: str=37 ,lowerCamelCase_: Union[str, Any]="gelu" ,lowerCamelCase_: Union[str, Any]=0.1 ,lowerCamelCase_: str=0.1 ,lowerCamelCase_: List[str]=512 ,lowerCamelCase_: Dict=16 ,lowerCamelCase_: List[str]=2 ,lowerCamelCase_: List[str]=0.0_2 ,lowerCamelCase_: Optional[Any]=3 ,lowerCamelCase_: Union[str, Any]=4 ,lowerCamelCase_: str=None ,) -> List[str]: UpperCAmelCase_ : Any = parent UpperCAmelCase_ : List[Any] = batch_size UpperCAmelCase_ : Union[str, Any] = seq_length UpperCAmelCase_ : Optional[int] = is_training UpperCAmelCase_ : Dict = use_input_mask UpperCAmelCase_ : Any = use_token_type_ids UpperCAmelCase_ : Tuple = use_labels UpperCAmelCase_ : List[Any] = vocab_size UpperCAmelCase_ : str = hidden_size UpperCAmelCase_ : List[str] = embedding_size UpperCAmelCase_ : List[Any] = num_hidden_layers UpperCAmelCase_ : List[Any] = num_attention_heads UpperCAmelCase_ : List[Any] = intermediate_size UpperCAmelCase_ : Tuple = hidden_act UpperCAmelCase_ : str = hidden_dropout_prob UpperCAmelCase_ : List[str] = attention_probs_dropout_prob UpperCAmelCase_ : Any = max_position_embeddings UpperCAmelCase_ : List[str] = type_vocab_size UpperCAmelCase_ : Any = type_sequence_label_size UpperCAmelCase_ : Optional[Any] = initializer_range UpperCAmelCase_ : Optional[int] = num_labels UpperCAmelCase_ : Optional[int] = num_choices UpperCAmelCase_ : List[str] = scope def A__ ( self: Any ) -> Optional[int]: UpperCAmelCase_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size ) UpperCAmelCase_ : List[str] = None if self.use_input_mask: UpperCAmelCase_ : Tuple = random_attention_mask([self.batch_size, self.seq_length] ) UpperCAmelCase_ : Dict = None if self.use_token_type_ids: UpperCAmelCase_ : str = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size ) UpperCAmelCase_ : int = None UpperCAmelCase_ : Union[str, Any] = None UpperCAmelCase_ : Union[str, Any] = None if self.use_labels: UpperCAmelCase_ : List[str] = ids_tensor([self.batch_size] ,self.type_sequence_label_size ) UpperCAmelCase_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels ) UpperCAmelCase_ : int = ids_tensor([self.batch_size] ,self.num_choices ) UpperCAmelCase_ : Tuple = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def A__ ( self: Any ) -> Dict: return MobileBertConfig( vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,embedding_size=self.embedding_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=lowerCamelCase_ ,initializer_range=self.initializer_range ,) def A__ ( self: List[Any] ,lowerCamelCase_: str ,lowerCamelCase_: Optional[int] ,lowerCamelCase_: Any ,lowerCamelCase_: List[Any] ,lowerCamelCase_: List[str] ,lowerCamelCase_: str ,lowerCamelCase_: str ) -> int: UpperCAmelCase_ : Any = MobileBertModel(config=lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() UpperCAmelCase_ : List[Any] = model(lowerCamelCase_ ,attention_mask=lowerCamelCase_ ,token_type_ids=lowerCamelCase_ ) UpperCAmelCase_ : Union[str, Any] = model(lowerCamelCase_ ,token_type_ids=lowerCamelCase_ ) UpperCAmelCase_ : Tuple = model(lowerCamelCase_ ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.hidden_size) ) def A__ ( self: Optional[Any] ,lowerCamelCase_: List[str] ,lowerCamelCase_: List[str] ,lowerCamelCase_: Tuple ,lowerCamelCase_: List[Any] ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: List[Any] ,lowerCamelCase_: Dict ) -> int: UpperCAmelCase_ : Union[str, Any] = MobileBertForMaskedLM(config=lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() UpperCAmelCase_ : Optional[Any] = model(lowerCamelCase_ ,attention_mask=lowerCamelCase_ ,token_type_ids=lowerCamelCase_ ,labels=lowerCamelCase_ ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) ) def A__ ( self: str ,lowerCamelCase_: Any ,lowerCamelCase_: Dict ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: List[str] ,lowerCamelCase_: str ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: int ) -> int: UpperCAmelCase_ : List[Any] = MobileBertForNextSentencePrediction(config=lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() UpperCAmelCase_ : Union[str, Any] = model( lowerCamelCase_ ,attention_mask=lowerCamelCase_ ,token_type_ids=lowerCamelCase_ ,labels=lowerCamelCase_ ,) self.parent.assertEqual(result.logits.shape ,(self.batch_size, 2) ) def A__ ( self: Tuple ,lowerCamelCase_: Tuple ,lowerCamelCase_: Dict ,lowerCamelCase_: List[str] ,lowerCamelCase_: Tuple ,lowerCamelCase_: Tuple ,lowerCamelCase_: Dict ,lowerCamelCase_: Any ) -> Optional[Any]: UpperCAmelCase_ : Tuple = MobileBertForPreTraining(config=lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() UpperCAmelCase_ : Optional[int] = model( lowerCamelCase_ ,attention_mask=lowerCamelCase_ ,token_type_ids=lowerCamelCase_ ,labels=lowerCamelCase_ ,next_sentence_label=lowerCamelCase_ ,) self.parent.assertEqual(result.prediction_logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertEqual(result.seq_relationship_logits.shape ,(self.batch_size, 2) ) def A__ ( self: Any ,lowerCamelCase_: Optional[int] ,lowerCamelCase_: Any ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: List[str] ,lowerCamelCase_: Any ,lowerCamelCase_: int ,lowerCamelCase_: List[Any] ) -> List[str]: UpperCAmelCase_ : Optional[Any] = MobileBertForQuestionAnswering(config=lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() UpperCAmelCase_ : int = model( lowerCamelCase_ ,attention_mask=lowerCamelCase_ ,token_type_ids=lowerCamelCase_ ,start_positions=lowerCamelCase_ ,end_positions=lowerCamelCase_ ,) self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) ) def A__ ( self: List[str] ,lowerCamelCase_: List[Any] ,lowerCamelCase_: Tuple ,lowerCamelCase_: Any ,lowerCamelCase_: Tuple ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: Any ) -> str: UpperCAmelCase_ : Optional[Any] = self.num_labels UpperCAmelCase_ : Union[str, Any] = MobileBertForSequenceClassification(lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() UpperCAmelCase_ : Optional[int] = model(lowerCamelCase_ ,attention_mask=lowerCamelCase_ ,token_type_ids=lowerCamelCase_ ,labels=lowerCamelCase_ ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) ) def A__ ( self: Union[str, Any] ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: str ,lowerCamelCase_: Dict ,lowerCamelCase_: Any ,lowerCamelCase_: List[str] ) -> Any: UpperCAmelCase_ : str = self.num_labels UpperCAmelCase_ : Optional[int] = MobileBertForTokenClassification(config=lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() UpperCAmelCase_ : List[Any] = model(lowerCamelCase_ ,attention_mask=lowerCamelCase_ ,token_type_ids=lowerCamelCase_ ,labels=lowerCamelCase_ ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) ) def A__ ( self: Tuple ,lowerCamelCase_: str ,lowerCamelCase_: int ,lowerCamelCase_: Tuple ,lowerCamelCase_: List[Any] ,lowerCamelCase_: str ,lowerCamelCase_: Optional[int] ,lowerCamelCase_: List[Any] ) -> Union[str, Any]: UpperCAmelCase_ : Union[str, Any] = self.num_choices UpperCAmelCase_ : Tuple = MobileBertForMultipleChoice(config=lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() UpperCAmelCase_ : Dict = input_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous() UpperCAmelCase_ : Union[str, Any] = token_type_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous() UpperCAmelCase_ : str = input_mask.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous() UpperCAmelCase_ : Optional[int] = model( lowerCamelCase_ ,attention_mask=lowerCamelCase_ ,token_type_ids=lowerCamelCase_ ,labels=lowerCamelCase_ ,) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) ) def A__ ( self: List[str] ) -> str: UpperCAmelCase_ : str = self.prepare_config_and_inputs() ( ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ) : Union[str, Any] = config_and_inputs UpperCAmelCase_ : Dict = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class _snake_case ( __snake_case , __snake_case , unittest.TestCase ): '''simple docstring''' A__ : Dict = ( ( MobileBertModel, MobileBertForMaskedLM, MobileBertForMultipleChoice, MobileBertForNextSentencePrediction, MobileBertForPreTraining, MobileBertForQuestionAnswering, MobileBertForSequenceClassification, MobileBertForTokenClassification, ) if is_torch_available() else () ) A__ : List[str] = ( { "feature-extraction": MobileBertModel, "fill-mask": MobileBertForMaskedLM, "question-answering": MobileBertForQuestionAnswering, "text-classification": MobileBertForSequenceClassification, "token-classification": MobileBertForTokenClassification, "zero-shot": MobileBertForSequenceClassification, } if is_torch_available() else {} ) A__ : List[str] = True def A__ ( self: Dict ,lowerCamelCase_: Tuple ,lowerCamelCase_: Tuple ,lowerCamelCase_: int=False ) -> Union[str, Any]: UpperCAmelCase_ : List[Any] = super()._prepare_for_class(lowerCamelCase_ ,lowerCamelCase_ ,return_labels=lowerCamelCase_ ) if return_labels: if model_class in get_values(lowerCamelCase_ ): UpperCAmelCase_ : Any = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length) ,dtype=torch.long ,device=lowerCamelCase_ ) UpperCAmelCase_ : List[str] = torch.zeros( self.model_tester.batch_size ,dtype=torch.long ,device=lowerCamelCase_ ) return inputs_dict def A__ ( self: List[str] ) -> Any: UpperCAmelCase_ : List[str] = MobileBertModelTester(self ) UpperCAmelCase_ : Union[str, Any] = ConfigTester(self ,config_class=lowerCamelCase_ ,hidden_size=37 ) def A__ ( self: Optional[Any] ) -> List[Any]: self.config_tester.run_common_tests() def A__ ( self: List[str] ) -> Optional[Any]: UpperCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_model(*lowerCamelCase_ ) def A__ ( self: Optional[int] ) -> Optional[int]: UpperCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_masked_lm(*lowerCamelCase_ ) def A__ ( self: Optional[Any] ) -> Tuple: UpperCAmelCase_ : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_multiple_choice(*lowerCamelCase_ ) def A__ ( self: List[Any] ) -> List[str]: UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*lowerCamelCase_ ) def A__ ( self: Optional[Any] ) -> Dict: UpperCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_pretraining(*lowerCamelCase_ ) def A__ ( self: Optional[int] ) -> Optional[int]: UpperCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_question_answering(*lowerCamelCase_ ) def A__ ( self: Union[str, Any] ) -> Optional[int]: UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_sequence_classification(*lowerCamelCase_ ) def A__ ( self: Any ) -> Optional[int]: UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_token_classification(*lowerCamelCase_ ) def lowerCamelCase_ ( _a : Union[str, Any] ): '''simple docstring''' return torch.tensor( _a , dtype=torch.long , device=_a , ) UpperCamelCase_ = 1E-3 @require_torch @require_sentencepiece @require_tokenizers class _snake_case ( unittest.TestCase ): '''simple docstring''' @slow def A__ ( self: List[Any] ) -> str: UpperCAmelCase_ : Any = MobileBertModel.from_pretrained("""google/mobilebert-uncased""" ).to(lowerCamelCase_ ) UpperCAmelCase_ : str = _long_tensor([[101, 7110, 1005, 1056, 2023, 11333, 17413, 1029, 102]] ) with torch.no_grad(): UpperCAmelCase_ : Union[str, Any] = model(lowerCamelCase_ )[0] UpperCAmelCase_ : Union[str, Any] = torch.Size((1, 9, 512) ) self.assertEqual(output.shape ,lowerCamelCase_ ) UpperCAmelCase_ : Tuple = torch.tensor( [ [ [-2.473_6526e07, 8.269_1656e04, 1.652_1838e05], [-5.754_1704e-01, 3.905_6022e00, 4.401_1507e00], [2.604_7359e00, 1.567_7652e00, -1.732_4188e-01], ] ] ,device=lowerCamelCase_ ,) # MobileBERT results range from 10e0 to 10e8. Even a 0.0000001% difference with a value of 10e8 results in a # ~1 difference, it's therefore not a good idea to measure using addition. # Here, we instead divide the expected result with the result in order to obtain ~1. We then check that the # result is held between bounds: 1 - TOLERANCE < expected_result / result < 1 + TOLERANCE UpperCAmelCase_ : Dict = torch.all((expected_slice / output[..., :3, :3]) >= 1 - TOLERANCE ) UpperCAmelCase_ : Dict = torch.all((expected_slice / output[..., :3, :3]) <= 1 + TOLERANCE ) self.assertTrue(lower_bound and upper_bound )
345
1
import re import jax.numpy as jnp from flax.traverse_util import flatten_dict, unflatten_dict from jax.random import PRNGKey from ..utils import logging UpperCamelCase_ = logging.get_logger(__name__) def lowerCamelCase_ ( _a : Optional[Any] ): '''simple docstring''' UpperCAmelCase_ : str = r"""\w+[.]\d+""" UpperCAmelCase_ : List[Any] = re.findall(_a , _a ) for pat in pats: UpperCAmelCase_ : Any = key.replace(_a , """_""".join(pat.split(""".""" ) ) ) return key def lowerCamelCase_ ( _a : List[str] , _a : Optional[Any] , _a : int ): '''simple docstring''' UpperCAmelCase_ : str = pt_tuple_key[:-1] + ("""scale""",) if ( any("""norm""" in str_ for str_ in pt_tuple_key ) and (pt_tuple_key[-1] == "bias") and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict) and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict) ): UpperCAmelCase_ : Tuple = pt_tuple_key[:-1] + ("""scale""",) return renamed_pt_tuple_key, pt_tensor elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict: UpperCAmelCase_ : Union[str, Any] = pt_tuple_key[:-1] + ("""scale""",) return renamed_pt_tuple_key, pt_tensor # embedding if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict: UpperCAmelCase_ : Optional[Any] = pt_tuple_key[:-1] + ("""embedding""",) return renamed_pt_tuple_key, pt_tensor # conv layer UpperCAmelCase_ : str = pt_tuple_key[:-1] + ("""kernel""",) if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4: UpperCAmelCase_ : List[Any] = pt_tensor.transpose(2 , 3 , 1 , 0 ) return renamed_pt_tuple_key, pt_tensor # linear layer UpperCAmelCase_ : Union[str, Any] = pt_tuple_key[:-1] + ("""kernel""",) if pt_tuple_key[-1] == "weight": UpperCAmelCase_ : Optional[Any] = pt_tensor.T return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm weight UpperCAmelCase_ : str = pt_tuple_key[:-1] + ("""weight""",) if pt_tuple_key[-1] == "gamma": return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm bias UpperCAmelCase_ : Any = pt_tuple_key[:-1] + ("""bias""",) if pt_tuple_key[-1] == "beta": return renamed_pt_tuple_key, pt_tensor return pt_tuple_key, pt_tensor def lowerCamelCase_ ( _a : Optional[int] , _a : Union[str, Any] , _a : List[str]=42 ): '''simple docstring''' UpperCAmelCase_ : Any = {k: v.numpy() for k, v in pt_state_dict.items()} # Step 2: Since the model is stateless, get random Flax params UpperCAmelCase_ : str = flax_model.init_weights(PRNGKey(_a ) ) UpperCAmelCase_ : List[Any] = flatten_dict(_a ) UpperCAmelCase_ : Tuple = {} # Need to change some parameters name to match Flax names for pt_key, pt_tensor in pt_state_dict.items(): UpperCAmelCase_ : int = rename_key(_a ) UpperCAmelCase_ : Any = tuple(renamed_pt_key.split(""".""" ) ) # Correctly rename weight parameters UpperCAmelCase_ , UpperCAmelCase_ : List[str] = rename_key_and_reshape_tensor(_a , _a , _a ) if flax_key in random_flax_state_dict: if flax_tensor.shape != random_flax_state_dict[flax_key].shape: raise ValueError( F'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape ''' F'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' ) # also add unexpected weight so that warning is thrown UpperCAmelCase_ : Tuple = jnp.asarray(_a ) return unflatten_dict(_a )
345
import os import shutil import tempfile import unittest import numpy as np from transformers import AutoTokenizer, BarkProcessor from transformers.testing_utils import require_torch, slow @require_torch class _snake_case ( unittest.TestCase ): '''simple docstring''' def A__ ( self: str ) -> int: UpperCAmelCase_ : List[Any] = """ylacombe/bark-small""" UpperCAmelCase_ : Tuple = tempfile.mkdtemp() UpperCAmelCase_ : Union[str, Any] = """en_speaker_1""" UpperCAmelCase_ : Optional[Any] = """This is a test string""" UpperCAmelCase_ : int = """speaker_embeddings_path.json""" UpperCAmelCase_ : Any = """speaker_embeddings""" def A__ ( self: Tuple ,**lowerCamelCase_: List[str] ) -> List[Any]: return AutoTokenizer.from_pretrained(self.checkpoint ,**lowerCamelCase_ ) def A__ ( self: str ) -> Union[str, Any]: shutil.rmtree(self.tmpdirname ) def A__ ( self: List[Any] ) -> int: UpperCAmelCase_ : int = self.get_tokenizer() UpperCAmelCase_ : Tuple = BarkProcessor(tokenizer=lowerCamelCase_ ) processor.save_pretrained(self.tmpdirname ) UpperCAmelCase_ : Optional[int] = BarkProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer.get_vocab() ) @slow def A__ ( self: List[Any] ) -> Optional[int]: UpperCAmelCase_ : List[Any] = BarkProcessor.from_pretrained( pretrained_processor_name_or_path=self.checkpoint ,speaker_embeddings_dict_path=self.speaker_embeddings_dict_path ,) processor.save_pretrained( self.tmpdirname ,speaker_embeddings_dict_path=self.speaker_embeddings_dict_path ,speaker_embeddings_directory=self.speaker_embeddings_directory ,) UpperCAmelCase_ : Optional[Any] = self.get_tokenizer(bos_token="""(BOS)""" ,eos_token="""(EOS)""" ) UpperCAmelCase_ : List[Any] = BarkProcessor.from_pretrained( self.tmpdirname ,self.speaker_embeddings_dict_path ,bos_token="""(BOS)""" ,eos_token="""(EOS)""" ,) self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer_add_kwargs.get_vocab() ) def A__ ( self: List[str] ) -> Optional[Any]: UpperCAmelCase_ : Any = BarkProcessor.from_pretrained( pretrained_processor_name_or_path=self.checkpoint ,speaker_embeddings_dict_path=self.speaker_embeddings_dict_path ,) UpperCAmelCase_ : Optional[int] = 35 UpperCAmelCase_ : Optional[int] = 2 UpperCAmelCase_ : Dict = 8 UpperCAmelCase_ : Optional[int] = { """semantic_prompt""": np.ones(lowerCamelCase_ ), """coarse_prompt""": np.ones((nb_codebooks_coarse, seq_len) ), """fine_prompt""": np.ones((nb_codebooks_total, seq_len) ), } # test providing already loaded voice_preset UpperCAmelCase_ : str = processor(text=self.input_string ,voice_preset=lowerCamelCase_ ) UpperCAmelCase_ : Optional[int] = inputs["""history_prompt"""] for key in voice_preset: self.assertListEqual(voice_preset[key].tolist() ,processed_voice_preset.get(lowerCamelCase_ ,np.array([] ) ).tolist() ) # test loading voice preset from npz file UpperCAmelCase_ : List[Any] = os.path.join(self.tmpdirname ,"""file.npz""" ) np.savez(lowerCamelCase_ ,**lowerCamelCase_ ) UpperCAmelCase_ : Optional[Any] = processor(text=self.input_string ,voice_preset=lowerCamelCase_ ) UpperCAmelCase_ : int = inputs["""history_prompt"""] for key in voice_preset: self.assertListEqual(voice_preset[key].tolist() ,processed_voice_preset.get(lowerCamelCase_ ,np.array([] ) ).tolist() ) # test loading voice preset from the hub UpperCAmelCase_ : Union[str, Any] = processor(text=self.input_string ,voice_preset=self.voice_preset ) def A__ ( self: Dict ) -> Tuple: UpperCAmelCase_ : Any = self.get_tokenizer() UpperCAmelCase_ : Dict = BarkProcessor(tokenizer=lowerCamelCase_ ) UpperCAmelCase_ : Optional[Any] = processor(text=self.input_string ) UpperCAmelCase_ : str = tokenizer( self.input_string ,padding="""max_length""" ,max_length=256 ,add_special_tokens=lowerCamelCase_ ,return_attention_mask=lowerCamelCase_ ,return_token_type_ids=lowerCamelCase_ ,) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] ,encoded_processor[key].squeeze().tolist() )
345
1
import argparse import json import os import re import torch from transformers import BloomConfig, BloomModel from transformers.file_utils import CONFIG_NAME, WEIGHTS_NAME from transformers.utils import logging logging.set_verbosity_info() UpperCamelCase_ = [ '''word_embeddings_layernorm.weight''', '''word_embeddings_layernorm.bias''', '''input_layernorm.weight''', '''input_layernorm.bias''', '''post_attention_layernorm.weight''', '''post_attention_layernorm.bias''', '''self_attention.dense.bias''', '''mlp.dense_4h_to_h.bias''', '''ln_f.weight''', '''ln_f.bias''', ] UpperCamelCase_ = [ '''mlp.dense_4h_to_h.weight''', '''self_attention.dense.weight''', ] def lowerCamelCase_ ( _a : List[Any] , _a : List[str] ): '''simple docstring''' UpperCAmelCase_ : Dict = { """word_embeddings.weight""": """word_embeddings.weight""", """word_embeddings.norm.weight""": """word_embeddings_layernorm.weight""", """word_embeddings.norm.bias""": """word_embeddings_layernorm.bias""", """weight""": """ln_f.weight""", """bias""": """ln_f.bias""", } if key in layer_rename_map: return layer_rename_map[key] # Handle transformer blocks UpperCAmelCase_ : Union[str, Any] = int(re.match(r""".*layer_(\d*).*""" , _a )[1] ) layer_number -= 3 return F'''h.{layer_number}.''' + key def lowerCamelCase_ ( _a : List[Any] ): '''simple docstring''' if dtype == torch.bool: return 1 / 8 UpperCAmelCase_ : str = re.search(r"""[^\d](\d+)$""" , str(_a ) ) if bit_search is None: raise ValueError(F'''`dtype` is not a valid dtype: {dtype}.''' ) UpperCAmelCase_ : int = int(bit_search.groups()[0] ) return bit_size // 8 def lowerCamelCase_ ( _a : Optional[int] , _a : Dict , _a : Any , _a : Any , _a : str ): '''simple docstring''' if bloom_config_file == "": UpperCAmelCase_ : Dict = BloomConfig() else: UpperCAmelCase_ : Optional[int] = BloomConfig.from_json_file(_a ) if shard_model: UpperCAmelCase_ : Dict = os.listdir(_a ) UpperCAmelCase_ : List[str] = sorted(filter(lambda _a : s.startswith("""layer""" ) and "model_00" in s , _a ) ) UpperCAmelCase_ : Optional[Any] = {"""weight_map""": {}, """metadata""": {}} UpperCAmelCase_ : Dict = 0 UpperCAmelCase_ : List[str] = None UpperCAmelCase_ : int = BloomConfig() for j, file in enumerate(_a ): print("""Processing file: {}""".format(_a ) ) UpperCAmelCase_ : Any = None for i in range(_a ): # load all TP files UpperCAmelCase_ : List[str] = file.replace("""model_00""" , F'''model_0{i}''' ) UpperCAmelCase_ : Any = torch.load(os.path.join(_a , _a ) , map_location="""cpu""" ) # Rename keys in the transformers names UpperCAmelCase_ : List[Any] = list(temp.keys() ) for key in keys: UpperCAmelCase_ : List[Any] = temp.pop(_a ) if tensors is None: UpperCAmelCase_ : Union[str, Any] = temp else: for key in tensors.keys(): if any(key.endswith(_a ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ): # We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425) tensors[key] += temp[key] else: # Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel UpperCAmelCase_ : List[str] = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0 # We concatenate these weights accross TP ranks UpperCAmelCase_ : Any = torch.cat([tensors[key], temp[key]] , dim=_a ) # Divide by the number of TP the weights we want to average for key in tensors.keys(): if any(key.endswith(_a ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ): UpperCAmelCase_ : int = tensors[key] / pretraining_tp torch.save( _a , os.path.join( _a , """pytorch_model_{}-of-{}.bin""".format(str(j + 1 ).zfill(5 ) , str(len(_a ) ).zfill(5 ) ) , ) , ) for key in tensors.keys(): UpperCAmelCase_ : Optional[Any] = tensors[key] total_size += value.numel() * get_dtype_size(value.dtype ) if key not in index_dict["weight_map"]: UpperCAmelCase_ : Dict = """pytorch_model_{}-of-{}.bin""".format( str(j + 1 ).zfill(5 ) , str(len(_a ) ).zfill(5 ) ) UpperCAmelCase_ : int = BloomConfig() UpperCAmelCase_ : Tuple = pytorch_dump_folder_path + """/""" + CONFIG_NAME UpperCAmelCase_ : Any = total_size with open(_a , """w""" , encoding="""utf-8""" ) as f: f.write(config.to_json_string() ) with open(os.path.join(_a , WEIGHTS_NAME + """.index.json""" ) , """w""" , encoding="""utf-8""" ) as f: UpperCAmelCase_ : Optional[int] = json.dumps(_a , indent=2 , sort_keys=_a ) + """\n""" f.write(_a ) else: UpperCAmelCase_ : str = BloomModel(_a ) UpperCAmelCase_ : Any = os.listdir(_a ) UpperCAmelCase_ : Union[str, Any] = sorted(filter(lambda _a : s.startswith("""layer""" ) and "model_00" in s , _a ) ) UpperCAmelCase_ : Optional[Any] = None for i, file in enumerate(_a ): UpperCAmelCase_ : Dict = None for i in range(_a ): # load all TP files UpperCAmelCase_ : Optional[Any] = file.replace("""model_00""" , F'''model_0{i}''' ) UpperCAmelCase_ : List[str] = torch.load(os.path.join(_a , _a ) , map_location="""cpu""" ) # Rename keys in the transformers names UpperCAmelCase_ : Union[str, Any] = list(temp.keys() ) for key in keys: UpperCAmelCase_ : Union[str, Any] = temp.pop(_a ) if tensors is None: UpperCAmelCase_ : Optional[int] = temp else: for key in tensors.keys(): # We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425) if any(key.endswith(_a ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ): tensors[key] += temp[key] else: # Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel UpperCAmelCase_ : Any = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0 # We concatenate these weights accross TP ranks UpperCAmelCase_ : str = torch.cat([tensors[key], temp[key]] , dim=_a ) # Divide by the number of TP the weights we want to average for key in tensors.keys(): if any(key.endswith(_a ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ): UpperCAmelCase_ : str = tensors[key] / pretraining_tp UpperCAmelCase_ : List[Any] = model.load_state_dict(_a , strict=_a ) assert not other_keys.unexpected_keys, F'''The keys {other_keys.unexpected_keys} are unexpected''' if missing_keys is None: UpperCAmelCase_ : Optional[int] = set(other_keys.missing_keys ) else: UpperCAmelCase_ : Union[str, Any] = missing_keys.intersection(set(other_keys.missing_keys ) ) assert not missing_keys, F'''The keys {missing_keys} are missing''' # Save pytorch-model os.makedirs(_a , exist_ok=_a ) UpperCAmelCase_ : Union[str, Any] = pytorch_dump_folder_path + """/""" + WEIGHTS_NAME UpperCAmelCase_ : int = pytorch_dump_folder_path + """/""" + CONFIG_NAME print(F'''Save PyTorch model to {pytorch_weights_dump_path} with dtype {config.torch_dtype}''' ) if config.torch_dtype is not None: UpperCAmelCase_ : Optional[Any] = model.to(config.torch_dtype ) torch.save(model.state_dict() , _a ) print(F'''Save configuration file to {pytorch_config_dump_path}''' ) with open(_a , """w""" , encoding="""utf-8""" ) as f: f.write(config.to_json_string() ) if __name__ == "__main__": UpperCamelCase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--bloom_checkpoint_path''', default=None, type=str, required=True, help='''Path to the Megatron-LM checkpoint path.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) parser.add_argument( '''--bloom_config_file''', default='''''', type=str, help=( '''An optional config json file corresponding to the pre-trained model. \n''' '''This specifies the model architecture.''' ), ) parser.add_argument( '''--shard_model''', action='''store_true''', help='''An optional setting to shard the output model \nThis enables sharding the converted checkpoint''', ) parser.add_argument( '''--pretraining_tp''', default=4, type=int, help='''Pretraining TP rank that has been used when training the model in Megatron-LM \n''', ) UpperCamelCase_ = parser.parse_args() convert_bloom_checkpoint_to_pytorch( args.bloom_checkpoint_path, args.bloom_config_file, args.pytorch_dump_folder_path, args.shard_model, args.pretraining_tp, )
345
import unittest from queue import Empty from threading import Thread from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available from transformers.testing_utils import CaptureStdout, require_torch, torch_device from ..test_modeling_common import ids_tensor if is_torch_available(): import torch from transformers import AutoModelForCausalLM @require_torch class _snake_case ( unittest.TestCase ): '''simple docstring''' def A__ ( self: Optional[int] ) -> Any: UpperCAmelCase_ : List[str] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ) UpperCAmelCase_ : Union[str, Any] = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(lowerCamelCase_ ) UpperCAmelCase_ : str = -1 UpperCAmelCase_ : Dict = ids_tensor((1, 5) ,vocab_size=model.config.vocab_size ).to(lowerCamelCase_ ) UpperCAmelCase_ : Union[str, Any] = model.generate(lowerCamelCase_ ,max_new_tokens=10 ,do_sample=lowerCamelCase_ ) UpperCAmelCase_ : Any = tokenizer.decode(greedy_ids[0] ) with CaptureStdout() as cs: UpperCAmelCase_ : List[Any] = TextStreamer(lowerCamelCase_ ) model.generate(lowerCamelCase_ ,max_new_tokens=10 ,do_sample=lowerCamelCase_ ,streamer=lowerCamelCase_ ) # The greedy text should be printed to stdout, except for the final "\n" in the streamer UpperCAmelCase_ : Optional[int] = cs.out[:-1] self.assertEqual(lowerCamelCase_ ,lowerCamelCase_ ) def A__ ( self: Dict ) -> Optional[Any]: UpperCAmelCase_ : str = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ) UpperCAmelCase_ : Optional[Any] = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(lowerCamelCase_ ) UpperCAmelCase_ : Optional[int] = -1 UpperCAmelCase_ : List[Any] = ids_tensor((1, 5) ,vocab_size=model.config.vocab_size ).to(lowerCamelCase_ ) UpperCAmelCase_ : List[str] = model.generate(lowerCamelCase_ ,max_new_tokens=10 ,do_sample=lowerCamelCase_ ) UpperCAmelCase_ : Dict = tokenizer.decode(greedy_ids[0] ) UpperCAmelCase_ : str = TextIteratorStreamer(lowerCamelCase_ ) UpperCAmelCase_ : Optional[int] = {"""input_ids""": input_ids, """max_new_tokens""": 10, """do_sample""": False, """streamer""": streamer} UpperCAmelCase_ : str = Thread(target=model.generate ,kwargs=lowerCamelCase_ ) thread.start() UpperCAmelCase_ : int = """""" for new_text in streamer: streamer_text += new_text self.assertEqual(lowerCamelCase_ ,lowerCamelCase_ ) def A__ ( self: List[Any] ) -> Dict: UpperCAmelCase_ : List[Any] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ) UpperCAmelCase_ : Optional[Any] = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(lowerCamelCase_ ) UpperCAmelCase_ : Optional[int] = -1 UpperCAmelCase_ : Tuple = ids_tensor((1, 5) ,vocab_size=model.config.vocab_size ).to(lowerCamelCase_ ) UpperCAmelCase_ : Dict = model.generate(lowerCamelCase_ ,max_new_tokens=10 ,do_sample=lowerCamelCase_ ) UpperCAmelCase_ : str = greedy_ids[:, input_ids.shape[1] :] UpperCAmelCase_ : Dict = tokenizer.decode(new_greedy_ids[0] ) with CaptureStdout() as cs: UpperCAmelCase_ : List[Any] = TextStreamer(lowerCamelCase_ ,skip_prompt=lowerCamelCase_ ) model.generate(lowerCamelCase_ ,max_new_tokens=10 ,do_sample=lowerCamelCase_ ,streamer=lowerCamelCase_ ) # The greedy text should be printed to stdout, except for the final "\n" in the streamer UpperCAmelCase_ : List[str] = cs.out[:-1] self.assertEqual(lowerCamelCase_ ,lowerCamelCase_ ) def A__ ( self: str ) -> str: # Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested # with actual models -- the dummy models' tokenizers are not aligned with their models, and # `skip_special_tokens=True` has no effect on them UpperCAmelCase_ : Union[str, Any] = AutoTokenizer.from_pretrained("""distilgpt2""" ) UpperCAmelCase_ : Optional[Any] = AutoModelForCausalLM.from_pretrained("""distilgpt2""" ).to(lowerCamelCase_ ) UpperCAmelCase_ : Any = -1 UpperCAmelCase_ : Union[str, Any] = torch.ones((1, 5) ,device=lowerCamelCase_ ).long() * model.config.bos_token_id with CaptureStdout() as cs: UpperCAmelCase_ : Union[str, Any] = TextStreamer(lowerCamelCase_ ,skip_special_tokens=lowerCamelCase_ ) model.generate(lowerCamelCase_ ,max_new_tokens=1 ,do_sample=lowerCamelCase_ ,streamer=lowerCamelCase_ ) # The prompt contains a special token, so the streamer should not print it. As such, the output text, when # re-tokenized, must only contain one token UpperCAmelCase_ : List[str] = cs.out[:-1] # Remove the final "\n" UpperCAmelCase_ : Dict = tokenizer(lowerCamelCase_ ,return_tensors="""pt""" ) self.assertEqual(streamer_text_tokenized.input_ids.shape ,(1, 1) ) def A__ ( self: List[str] ) -> Any: UpperCAmelCase_ : List[Any] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ) UpperCAmelCase_ : Any = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(lowerCamelCase_ ) UpperCAmelCase_ : List[str] = -1 UpperCAmelCase_ : Optional[Any] = ids_tensor((1, 5) ,vocab_size=model.config.vocab_size ).to(lowerCamelCase_ ) UpperCAmelCase_ : Optional[int] = TextIteratorStreamer(lowerCamelCase_ ,timeout=0.0_0_1 ) UpperCAmelCase_ : Any = {"""input_ids""": input_ids, """max_new_tokens""": 10, """do_sample""": False, """streamer""": streamer} UpperCAmelCase_ : Dict = Thread(target=model.generate ,kwargs=lowerCamelCase_ ) thread.start() # The streamer will timeout after 0.001 seconds, so an exception will be raised with self.assertRaises(lowerCamelCase_ ): UpperCAmelCase_ : Union[str, Any] = """""" for new_text in streamer: streamer_text += new_text
345
1
def lowerCamelCase_ ( _a : int ): '''simple docstring''' if n == 1 or not isinstance(_a , _a ): return 0 elif n == 2: return 1 else: UpperCAmelCase_ : Union[str, Any] = [0, 1] for i in range(2 , n + 1 ): sequence.append(sequence[i - 1] + sequence[i - 2] ) return sequence[n] def lowerCamelCase_ ( _a : int ): '''simple docstring''' UpperCAmelCase_ : Optional[int] = 0 UpperCAmelCase_ : Any = 2 while digits < n: index += 1 UpperCAmelCase_ : Any = len(str(fibonacci(_a ) ) ) return index def lowerCamelCase_ ( _a : int = 1000 ): '''simple docstring''' return fibonacci_digits_index(_a ) if __name__ == "__main__": print(solution(int(str(input()).strip())))
345
import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel from diffusers import DDIMScheduler, LDMPipeline, UNetaDModel, VQModel from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device enable_full_determinism() class _snake_case ( unittest.TestCase ): '''simple docstring''' @property def A__ ( self: Optional[int] ) -> int: torch.manual_seed(0 ) UpperCAmelCase_ : Union[str, Any] = UNetaDModel( block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=3 ,out_channels=3 ,down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") ,up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") ,) return model @property def A__ ( self: Tuple ) -> Optional[Any]: torch.manual_seed(0 ) UpperCAmelCase_ : List[str] = VQModel( block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] ,up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] ,latent_channels=3 ,) return model @property def A__ ( self: Tuple ) -> Any: torch.manual_seed(0 ) UpperCAmelCase_ : int = CLIPTextConfig( bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1e-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1000 ,) return CLIPTextModel(lowerCamelCase_ ) def A__ ( self: str ) -> Optional[Any]: UpperCAmelCase_ : str = self.dummy_uncond_unet UpperCAmelCase_ : List[Any] = DDIMScheduler() UpperCAmelCase_ : List[Any] = self.dummy_vq_model UpperCAmelCase_ : Optional[int] = LDMPipeline(unet=lowerCamelCase_ ,vqvae=lowerCamelCase_ ,scheduler=lowerCamelCase_ ) ldm.to(lowerCamelCase_ ) ldm.set_progress_bar_config(disable=lowerCamelCase_ ) UpperCAmelCase_ : Any = torch.manual_seed(0 ) UpperCAmelCase_ : int = ldm(generator=lowerCamelCase_ ,num_inference_steps=2 ,output_type="""numpy""" ).images UpperCAmelCase_ : List[str] = torch.manual_seed(0 ) UpperCAmelCase_ : Union[str, Any] = ldm(generator=lowerCamelCase_ ,num_inference_steps=2 ,output_type="""numpy""" ,return_dict=lowerCamelCase_ )[0] UpperCAmelCase_ : Optional[Any] = image[0, -3:, -3:, -1] UpperCAmelCase_ : Tuple = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) UpperCAmelCase_ : str = np.array([0.8_5_1_2, 0.8_1_8, 0.6_4_1_1, 0.6_8_0_8, 0.4_4_6_5, 0.5_6_1_8, 0.4_6, 0.6_2_3_1, 0.5_1_7_2] ) UpperCAmelCase_ : Tuple = 1e-2 if torch_device != """mps""" else 3e-2 assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < tolerance @slow @require_torch class _snake_case ( unittest.TestCase ): '''simple docstring''' def A__ ( self: Optional[int] ) -> Optional[Any]: UpperCAmelCase_ : List[str] = LDMPipeline.from_pretrained("""CompVis/ldm-celebahq-256""" ) ldm.to(lowerCamelCase_ ) ldm.set_progress_bar_config(disable=lowerCamelCase_ ) UpperCAmelCase_ : Optional[Any] = torch.manual_seed(0 ) UpperCAmelCase_ : Optional[int] = ldm(generator=lowerCamelCase_ ,num_inference_steps=5 ,output_type="""numpy""" ).images UpperCAmelCase_ : Optional[Any] = image[0, -3:, -3:, -1] assert image.shape == (1, 256, 256, 3) UpperCAmelCase_ : int = np.array([0.4_3_9_9, 0.4_4_9_7_5, 0.4_6_8_2_5, 0.4_7_4, 0.4_3_5_9, 0.4_5_8_1, 0.4_5_0_9_5, 0.4_3_4_1, 0.4_4_4_7] ) UpperCAmelCase_ : Union[str, Any] = 1e-2 if torch_device != """mps""" else 3e-2 assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
345
1
import torch def lowerCamelCase_ ( ): '''simple docstring''' if torch.cuda.is_available(): UpperCAmelCase_ : Tuple = torch.cuda.device_count() else: UpperCAmelCase_ : Optional[int] = 0 print(F'''Successfully ran on {num_gpus} GPUs''' ) if __name__ == "__main__": main()
345
def lowerCamelCase_ ( _a : List[str] ): '''simple docstring''' UpperCAmelCase_ : Tuple = [0] * len(_a ) UpperCAmelCase_ : Dict = [] UpperCAmelCase_ : Optional[int] = [] UpperCAmelCase_ : Dict = 0 for values in graph.values(): for i in values: indegree[i] += 1 for i in range(len(_a ) ): if indegree[i] == 0: queue.append(_a ) while queue: UpperCAmelCase_ : List[str] = queue.pop(0 ) cnt += 1 topo.append(_a ) for x in graph[vertex]: indegree[x] -= 1 if indegree[x] == 0: queue.append(_a ) if cnt != len(_a ): print("""Cycle exists""" ) else: print(_a ) # Adjacency List of Graph UpperCamelCase_ = {0: [1, 2], 1: [3], 2: [3], 3: [4, 5], 4: [], 5: []} topological_sort(graph)
345
1
import timeit import numpy as np import datasets from datasets.arrow_writer import ArrowWriter from datasets.features.features import _ArrayXD def lowerCamelCase_ ( _a : Tuple ): '''simple docstring''' def wrapper(*_a : Tuple , **_a : Union[str, Any] ): UpperCAmelCase_ : str = timeit.default_timer() UpperCAmelCase_ : List[Any] = func(*_a , **_a ) UpperCAmelCase_ : str = timeit.default_timer() - starttime return delta UpperCAmelCase_ : Any = func.__name__ return wrapper def lowerCamelCase_ ( _a : dict , _a : Union[str, Any]=100 , _a : List[Any]=None ): '''simple docstring''' UpperCAmelCase_ : Tuple = [] UpperCAmelCase_ : Optional[Any] = seq_shapes or {} for i in range(_a ): UpperCAmelCase_ : List[Any] = {} for col_id, (k, v) in enumerate(features.items() ): if isinstance(_a , _ArrayXD ): UpperCAmelCase_ : Optional[Any] = np.random.rand(*v.shape ).astype(v.dtype ) elif isinstance(_a , datasets.Value ): if v.dtype == "string": UpperCAmelCase_ : Dict = """The small grey turtle was surprisingly fast when challenged.""" else: UpperCAmelCase_ : Optional[Any] = np.random.randint(10 , size=1 ).astype(v.dtype ).item() elif isinstance(_a , datasets.Sequence ): while isinstance(_a , datasets.Sequence ): UpperCAmelCase_ : Union[str, Any] = v.feature UpperCAmelCase_ : int = seq_shapes[k] UpperCAmelCase_ : Dict = np.random.rand(*_a ).astype(v.dtype ) UpperCAmelCase_ : List[str] = data dummy_data.append((i, example) ) return dummy_data def lowerCamelCase_ ( _a : int , _a : Tuple , _a : Optional[Any]=100 , _a : int=None ): '''simple docstring''' UpperCAmelCase_ : Dict = generate_examples(_a , num_examples=_a , seq_shapes=_a ) with ArrowWriter(features=_a , path=_a ) as writer: for key, record in dummy_data: UpperCAmelCase_ : str = features.encode_example(_a ) writer.write(_a ) UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = writer.finalize() if not num_final_examples == num_examples: raise ValueError( F'''Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}.''' ) UpperCAmelCase_ : List[str] = datasets.Dataset.from_file(filename=_a , info=datasets.DatasetInfo(features=_a ) ) return dataset
345
from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCamelCase_ = logging.get_logger(__name__) UpperCamelCase_ = { '''microsoft/swinv2-tiny-patch4-window8-256''': ( '''https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json''' ), } class _snake_case ( __snake_case ): '''simple docstring''' A__ : Optional[Any] = "swinv2" A__ : int = { "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers", } def __init__( self: List[str] ,lowerCamelCase_: List[str]=224 ,lowerCamelCase_: List[str]=4 ,lowerCamelCase_: List[Any]=3 ,lowerCamelCase_: Optional[Any]=96 ,lowerCamelCase_: Any=[2, 2, 6, 2] ,lowerCamelCase_: Dict=[3, 6, 12, 24] ,lowerCamelCase_: str=7 ,lowerCamelCase_: Optional[Any]=4.0 ,lowerCamelCase_: Tuple=True ,lowerCamelCase_: List[str]=0.0 ,lowerCamelCase_: Optional[int]=0.0 ,lowerCamelCase_: List[str]=0.1 ,lowerCamelCase_: str="gelu" ,lowerCamelCase_: str=False ,lowerCamelCase_: Dict=0.0_2 ,lowerCamelCase_: Union[str, Any]=1e-5 ,lowerCamelCase_: str=32 ,**lowerCamelCase_: List[str] ,) -> Tuple: super().__init__(**lowerCamelCase_ ) UpperCAmelCase_ : Tuple = image_size UpperCAmelCase_ : Tuple = patch_size UpperCAmelCase_ : Dict = num_channels UpperCAmelCase_ : List[Any] = embed_dim UpperCAmelCase_ : Dict = depths UpperCAmelCase_ : Dict = len(lowerCamelCase_ ) UpperCAmelCase_ : str = num_heads UpperCAmelCase_ : Tuple = window_size UpperCAmelCase_ : int = mlp_ratio UpperCAmelCase_ : str = qkv_bias UpperCAmelCase_ : Any = hidden_dropout_prob UpperCAmelCase_ : Tuple = attention_probs_dropout_prob UpperCAmelCase_ : int = drop_path_rate UpperCAmelCase_ : Optional[Any] = hidden_act UpperCAmelCase_ : List[str] = use_absolute_embeddings UpperCAmelCase_ : Dict = layer_norm_eps UpperCAmelCase_ : int = initializer_range UpperCAmelCase_ : Union[str, Any] = encoder_stride # we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model UpperCAmelCase_ : List[str] = int(embed_dim * 2 ** (len(lowerCamelCase_ ) - 1) ) UpperCAmelCase_ : Any = (0, 0, 0, 0)
345
1
from importlib import import_module from .logging import get_logger UpperCamelCase_ = get_logger(__name__) class _snake_case : '''simple docstring''' def __init__( self: str ,lowerCamelCase_: Tuple ,lowerCamelCase_: Any=None ) -> int: UpperCAmelCase_ : int = attrs or [] if module is not None: for key in module.__dict__: if key in attrs or not key.startswith("""__""" ): setattr(self ,lowerCamelCase_ ,getattr(lowerCamelCase_ ,lowerCamelCase_ ) ) UpperCAmelCase_ : Optional[Any] = module._original_module if isinstance(lowerCamelCase_ ,_PatchedModuleObj ) else module class _snake_case : '''simple docstring''' A__ : List[Any] = [] def __init__( self: Tuple ,lowerCamelCase_: List[Any] ,lowerCamelCase_: str ,lowerCamelCase_: Tuple ,lowerCamelCase_: Tuple=None ) -> Dict: UpperCAmelCase_ : Optional[Any] = obj UpperCAmelCase_ : Optional[Any] = target UpperCAmelCase_ : List[str] = new UpperCAmelCase_ : Union[str, Any] = target.split(""".""" )[0] UpperCAmelCase_ : List[Any] = {} UpperCAmelCase_ : Any = attrs or [] def __enter__( self: Tuple ) -> Dict: *UpperCAmelCase_ , UpperCAmelCase_ : int = self.target.split(""".""" ) # Patch modules: # it's used to patch attributes of submodules like "os.path.join"; # in this case we need to patch "os" and "os.path" for i in range(len(lowerCamelCase_ ) ): try: UpperCAmelCase_ : int = import_module(""".""".join(submodules[: i + 1] ) ) except ModuleNotFoundError: continue # We iterate over all the globals in self.obj in case we find "os" or "os.path" for attr in self.obj.__dir__(): UpperCAmelCase_ : Any = getattr(self.obj ,lowerCamelCase_ ) # We don't check for the name of the global, but rather if its value *is* "os" or "os.path". # This allows to patch renamed modules like "from os import path as ospath". if obj_attr is submodule or ( (isinstance(lowerCamelCase_ ,_PatchedModuleObj ) and obj_attr._original_module is submodule) ): UpperCAmelCase_ : str = obj_attr # patch at top level setattr(self.obj ,lowerCamelCase_ ,_PatchedModuleObj(lowerCamelCase_ ,attrs=self.attrs ) ) UpperCAmelCase_ : str = getattr(self.obj ,lowerCamelCase_ ) # construct lower levels patches for key in submodules[i + 1 :]: setattr(lowerCamelCase_ ,lowerCamelCase_ ,_PatchedModuleObj(getattr(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ) ,attrs=self.attrs ) ) UpperCAmelCase_ : str = getattr(lowerCamelCase_ ,lowerCamelCase_ ) # finally set the target attribute setattr(lowerCamelCase_ ,lowerCamelCase_ ,self.new ) # Patch attribute itself: # it's used for builtins like "open", # and also to patch "os.path.join" we may also need to patch "join" # itself if it was imported as "from os.path import join". if submodules: # if it's an attribute of a submodule like "os.path.join" try: UpperCAmelCase_ : Dict = getattr(import_module(""".""".join(lowerCamelCase_ ) ) ,lowerCamelCase_ ) except (AttributeError, ModuleNotFoundError): return # We iterate over all the globals in self.obj in case we find "os.path.join" for attr in self.obj.__dir__(): # We don't check for the name of the global, but rather if its value *is* "os.path.join". # This allows to patch renamed attributes like "from os.path import join as pjoin". if getattr(self.obj ,lowerCamelCase_ ) is attr_value: UpperCAmelCase_ : Dict = getattr(self.obj ,lowerCamelCase_ ) setattr(self.obj ,lowerCamelCase_ ,self.new ) elif target_attr in globals()["__builtins__"]: # if it'a s builtin like "open" UpperCAmelCase_ : Any = globals()["""__builtins__"""][target_attr] setattr(self.obj ,lowerCamelCase_ ,self.new ) else: raise RuntimeError(F'''Tried to patch attribute {target_attr} instead of a submodule.''' ) def __exit__( self: int ,*lowerCamelCase_: str ) -> Optional[Any]: for attr in list(self.original ): setattr(self.obj ,lowerCamelCase_ ,self.original.pop(lowerCamelCase_ ) ) def A__ ( self: str ) -> int: self.__enter__() self._active_patches.append(self ) def A__ ( self: Any ) -> Any: try: self._active_patches.remove(self ) except ValueError: # If the patch hasn't been started this will fail return None return self.__exit__()
345
import os import sys import tempfile import unittest import unittest.mock as mock from pathlib import Path from huggingface_hub import HfFolder, delete_repo from huggingface_hub.file_download import http_get from requests.exceptions import HTTPError from transformers import ( AlbertTokenizer, AutoTokenizer, BertTokenizer, BertTokenizerFast, GPTaTokenizerFast, is_tokenizers_available, ) from transformers.testing_utils import TOKEN, USER, is_staging_test, require_tokenizers from transformers.tokenization_utils import Trie sys.path.append(str(Path(__file__).parent.parent / '''utils''')) from test_module.custom_tokenization import CustomTokenizer # noqa E402 if is_tokenizers_available(): from test_module.custom_tokenization_fast import CustomTokenizerFast class _snake_case ( unittest.TestCase ): '''simple docstring''' def A__ ( self: int ) -> str: # A mock response for an HTTP head request to emulate server down UpperCAmelCase_ : List[str] = mock.Mock() UpperCAmelCase_ : List[Any] = 500 UpperCAmelCase_ : Union[str, Any] = {} UpperCAmelCase_ : Union[str, Any] = HTTPError UpperCAmelCase_ : Any = {} # Download this model to make sure it's in the cache. UpperCAmelCase_ : Union[str, Any] = BertTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bert""" ) # Under the mock environment we get a 500 error when trying to reach the tokenizer. with mock.patch("""requests.Session.request""" ,return_value=lowerCamelCase_ ) as mock_head: UpperCAmelCase_ : Any = BertTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bert""" ) # This check we did call the fake head request mock_head.assert_called() @require_tokenizers def A__ ( self: str ) -> int: # A mock response for an HTTP head request to emulate server down UpperCAmelCase_ : str = mock.Mock() UpperCAmelCase_ : Optional[int] = 500 UpperCAmelCase_ : int = {} UpperCAmelCase_ : Union[str, Any] = HTTPError UpperCAmelCase_ : List[Any] = {} # Download this model to make sure it's in the cache. UpperCAmelCase_ : Optional[int] = GPTaTokenizerFast.from_pretrained("""gpt2""" ) # Under the mock environment we get a 500 error when trying to reach the tokenizer. with mock.patch("""requests.Session.request""" ,return_value=lowerCamelCase_ ) as mock_head: UpperCAmelCase_ : Any = GPTaTokenizerFast.from_pretrained("""gpt2""" ) # This check we did call the fake head request mock_head.assert_called() def A__ ( self: str ) -> Dict: # This test is for deprecated behavior and can be removed in v5 try: UpperCAmelCase_ : Any = tempfile.mktemp() with open(lowerCamelCase_ ,"""wb""" ) as f: http_get("""https://huggingface.co/albert-base-v1/resolve/main/spiece.model""" ,lowerCamelCase_ ) UpperCAmelCase_ : Tuple = AlbertTokenizer.from_pretrained(lowerCamelCase_ ) finally: os.remove(lowerCamelCase_ ) # Supporting this legacy load introduced a weird bug where the tokenizer would load local files if they are in # the current folder and have the right name. if os.path.isfile("""tokenizer.json""" ): # We skip the test if the user has a `tokenizer.json` in this folder to avoid deleting it. return try: with open("""tokenizer.json""" ,"""wb""" ) as f: http_get("""https://huggingface.co/hf-internal-testing/tiny-random-bert/blob/main/tokenizer.json""" ,lowerCamelCase_ ) UpperCAmelCase_ : str = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ) # The tiny random BERT has a vocab size of 1024, tiny gpt2 as a vocab size of 1000 self.assertEqual(tokenizer.vocab_size ,1000 ) # Tokenizer should depend on the remote checkpoint, not the local tokenizer.json file. finally: os.remove("""tokenizer.json""" ) def A__ ( self: List[str] ) -> Tuple: # This test is for deprecated behavior and can be removed in v5 UpperCAmelCase_ : str = AlbertTokenizer.from_pretrained("""https://huggingface.co/albert-base-v1/resolve/main/spiece.model""" ) @is_staging_test class _snake_case ( unittest.TestCase ): '''simple docstring''' A__ : str = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"] @classmethod def A__ ( cls: Dict ) -> Optional[int]: UpperCAmelCase_ : List[str] = TOKEN HfFolder.save_token(lowerCamelCase_ ) @classmethod def A__ ( cls: Optional[Any] ) -> List[str]: try: delete_repo(token=cls._token ,repo_id="""test-tokenizer""" ) except HTTPError: pass try: delete_repo(token=cls._token ,repo_id="""valid_org/test-tokenizer-org""" ) except HTTPError: pass try: delete_repo(token=cls._token ,repo_id="""test-dynamic-tokenizer""" ) except HTTPError: pass def A__ ( self: Any ) -> Optional[int]: with tempfile.TemporaryDirectory() as tmp_dir: UpperCAmelCase_ : Tuple = os.path.join(lowerCamelCase_ ,"""vocab.txt""" ) with open(lowerCamelCase_ ,"""w""" ,encoding="""utf-8""" ) as vocab_writer: vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) ) UpperCAmelCase_ : List[Any] = BertTokenizer(lowerCamelCase_ ) tokenizer.push_to_hub("""test-tokenizer""" ,use_auth_token=self._token ) UpperCAmelCase_ : List[Any] = BertTokenizer.from_pretrained(F'''{USER}/test-tokenizer''' ) self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab ) # Reset repo delete_repo(token=self._token ,repo_id="""test-tokenizer""" ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(lowerCamelCase_ ,repo_id="""test-tokenizer""" ,push_to_hub=lowerCamelCase_ ,use_auth_token=self._token ) UpperCAmelCase_ : List[Any] = BertTokenizer.from_pretrained(F'''{USER}/test-tokenizer''' ) self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab ) def A__ ( self: Optional[int] ) -> Any: with tempfile.TemporaryDirectory() as tmp_dir: UpperCAmelCase_ : List[Any] = os.path.join(lowerCamelCase_ ,"""vocab.txt""" ) with open(lowerCamelCase_ ,"""w""" ,encoding="""utf-8""" ) as vocab_writer: vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) ) UpperCAmelCase_ : Dict = BertTokenizer(lowerCamelCase_ ) tokenizer.push_to_hub("""valid_org/test-tokenizer-org""" ,use_auth_token=self._token ) UpperCAmelCase_ : Dict = BertTokenizer.from_pretrained("""valid_org/test-tokenizer-org""" ) self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab ) # Reset repo delete_repo(token=self._token ,repo_id="""valid_org/test-tokenizer-org""" ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained( lowerCamelCase_ ,repo_id="""valid_org/test-tokenizer-org""" ,push_to_hub=lowerCamelCase_ ,use_auth_token=self._token ) UpperCAmelCase_ : List[Any] = BertTokenizer.from_pretrained("""valid_org/test-tokenizer-org""" ) self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab ) @require_tokenizers def A__ ( self: Optional[int] ) -> Optional[Any]: CustomTokenizer.register_for_auto_class() with tempfile.TemporaryDirectory() as tmp_dir: UpperCAmelCase_ : Any = os.path.join(lowerCamelCase_ ,"""vocab.txt""" ) with open(lowerCamelCase_ ,"""w""" ,encoding="""utf-8""" ) as vocab_writer: vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) ) UpperCAmelCase_ : Optional[Any] = CustomTokenizer(lowerCamelCase_ ) # No fast custom tokenizer tokenizer.push_to_hub("""test-dynamic-tokenizer""" ,use_auth_token=self._token ) UpperCAmelCase_ : Optional[Any] = AutoTokenizer.from_pretrained(F'''{USER}/test-dynamic-tokenizer''' ,trust_remote_code=lowerCamelCase_ ) # Can't make an isinstance check because the new_model.config is from the CustomTokenizer class of a dynamic module self.assertEqual(tokenizer.__class__.__name__ ,"""CustomTokenizer""" ) # Fast and slow custom tokenizer CustomTokenizerFast.register_for_auto_class() with tempfile.TemporaryDirectory() as tmp_dir: UpperCAmelCase_ : List[str] = os.path.join(lowerCamelCase_ ,"""vocab.txt""" ) with open(lowerCamelCase_ ,"""w""" ,encoding="""utf-8""" ) as vocab_writer: vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) ) UpperCAmelCase_ : str = BertTokenizerFast.from_pretrained(lowerCamelCase_ ) bert_tokenizer.save_pretrained(lowerCamelCase_ ) UpperCAmelCase_ : List[str] = CustomTokenizerFast.from_pretrained(lowerCamelCase_ ) tokenizer.push_to_hub("""test-dynamic-tokenizer""" ,use_auth_token=self._token ) UpperCAmelCase_ : List[str] = AutoTokenizer.from_pretrained(F'''{USER}/test-dynamic-tokenizer''' ,trust_remote_code=lowerCamelCase_ ) # Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module self.assertEqual(tokenizer.__class__.__name__ ,"""CustomTokenizerFast""" ) UpperCAmelCase_ : List[str] = AutoTokenizer.from_pretrained( F'''{USER}/test-dynamic-tokenizer''' ,use_fast=lowerCamelCase_ ,trust_remote_code=lowerCamelCase_ ) # Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module self.assertEqual(tokenizer.__class__.__name__ ,"""CustomTokenizer""" ) class _snake_case ( unittest.TestCase ): '''simple docstring''' def A__ ( self: Optional[Any] ) -> Any: UpperCAmelCase_ : Any = Trie() trie.add("""Hello 友達""" ) self.assertEqual(trie.data ,{"""H""": {"""e""": {"""l""": {"""l""": {"""o""": {""" """: {"""友""": {"""達""": {"""""": 1}}}}}}}}} ) trie.add("""Hello""" ) trie.data self.assertEqual(trie.data ,{"""H""": {"""e""": {"""l""": {"""l""": {"""o""": {"""""": 1, """ """: {"""友""": {"""達""": {"""""": 1}}}}}}}}} ) def A__ ( self: Tuple ) -> Optional[int]: UpperCAmelCase_ : str = Trie() self.assertEqual(trie.split("""[CLS] This is a extra_id_100""" ) ,["""[CLS] This is a extra_id_100"""] ) trie.add("""[CLS]""" ) trie.add("""extra_id_1""" ) trie.add("""extra_id_100""" ) self.assertEqual(trie.split("""[CLS] This is a extra_id_100""" ) ,["""[CLS]""", """ This is a """, """extra_id_100"""] ) def A__ ( self: Optional[Any] ) -> Optional[int]: UpperCAmelCase_ : Dict = Trie() trie.add("""A""" ) self.assertEqual(trie.split("""ABC""" ) ,["""A""", """BC"""] ) self.assertEqual(trie.split("""BCA""" ) ,["""BC""", """A"""] ) def A__ ( self: Union[str, Any] ) -> int: UpperCAmelCase_ : List[str] = Trie() trie.add("""TOKEN]""" ) trie.add("""[SPECIAL_TOKEN]""" ) self.assertEqual(trie.split("""This is something [SPECIAL_TOKEN]""" ) ,["""This is something """, """[SPECIAL_TOKEN]"""] ) def A__ ( self: int ) -> Union[str, Any]: UpperCAmelCase_ : List[str] = Trie() trie.add("""A""" ) trie.add("""P""" ) trie.add("""[SPECIAL_TOKEN]""" ) self.assertEqual(trie.split("""This is something [SPECIAL_TOKEN]""" ) ,["""This is something """, """[SPECIAL_TOKEN]"""] ) def A__ ( self: int ) -> List[str]: UpperCAmelCase_ : int = Trie() trie.add("""AB""" ) trie.add("""B""" ) trie.add("""C""" ) self.assertEqual(trie.split("""ABC""" ) ,["""AB""", """C"""] ) def A__ ( self: str ) -> Optional[int]: UpperCAmelCase_ : Optional[Any] = Trie() trie.add("""ABC""" ) trie.add("""B""" ) trie.add("""CD""" ) self.assertEqual(trie.split("""ABCD""" ) ,["""ABC""", """D"""] ) def A__ ( self: List[Any] ) -> Any: # Even if the offsets are wrong, we necessarily output correct string # parts. UpperCAmelCase_ : Tuple = Trie() UpperCAmelCase_ : Optional[Any] = trie.cut_text("""ABC""" ,[0, 0, 2, 1, 2, 3] ) self.assertEqual(lowerCamelCase_ ,["""AB""", """C"""] )
345
1
import math import unittest from transformers import BioGptConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification, BioGptModel, BioGptTokenizer, ) from transformers.models.biogpt.modeling_biogpt import BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST class _snake_case : '''simple docstring''' def __init__( self: List[str] ,lowerCamelCase_: Dict ,lowerCamelCase_: List[str]=13 ,lowerCamelCase_: Any=7 ,lowerCamelCase_: Any=True ,lowerCamelCase_: Dict=True ,lowerCamelCase_: Tuple=False ,lowerCamelCase_: int=True ,lowerCamelCase_: Optional[Any]=99 ,lowerCamelCase_: Tuple=32 ,lowerCamelCase_: Optional[int]=5 ,lowerCamelCase_: Dict=4 ,lowerCamelCase_: str=37 ,lowerCamelCase_: Any="gelu" ,lowerCamelCase_: Optional[Any]=0.1 ,lowerCamelCase_: Dict=0.1 ,lowerCamelCase_: List[str]=512 ,lowerCamelCase_: Tuple=16 ,lowerCamelCase_: Union[str, Any]=2 ,lowerCamelCase_: Dict=0.0_2 ,lowerCamelCase_: int=3 ,lowerCamelCase_: Union[str, Any]=4 ,lowerCamelCase_: int=None ,) -> Any: UpperCAmelCase_ : Union[str, Any] = parent UpperCAmelCase_ : Any = batch_size UpperCAmelCase_ : Union[str, Any] = seq_length UpperCAmelCase_ : Tuple = is_training UpperCAmelCase_ : Optional[Any] = use_input_mask UpperCAmelCase_ : Optional[int] = use_token_type_ids UpperCAmelCase_ : Tuple = use_labels UpperCAmelCase_ : str = vocab_size UpperCAmelCase_ : Any = hidden_size UpperCAmelCase_ : int = num_hidden_layers UpperCAmelCase_ : Optional[Any] = num_attention_heads UpperCAmelCase_ : Union[str, Any] = intermediate_size UpperCAmelCase_ : List[Any] = hidden_act UpperCAmelCase_ : Any = hidden_dropout_prob UpperCAmelCase_ : Any = attention_probs_dropout_prob UpperCAmelCase_ : str = max_position_embeddings UpperCAmelCase_ : int = type_vocab_size UpperCAmelCase_ : Any = type_sequence_label_size UpperCAmelCase_ : Tuple = initializer_range UpperCAmelCase_ : Tuple = num_labels UpperCAmelCase_ : int = num_choices UpperCAmelCase_ : Optional[Any] = scope def A__ ( self: Optional[int] ) -> List[str]: UpperCAmelCase_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size ) UpperCAmelCase_ : Optional[Any] = None if self.use_input_mask: UpperCAmelCase_ : Dict = random_attention_mask([self.batch_size, self.seq_length] ) UpperCAmelCase_ : Union[str, Any] = None if self.use_token_type_ids: UpperCAmelCase_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size ) UpperCAmelCase_ : Any = None UpperCAmelCase_ : Optional[int] = None UpperCAmelCase_ : str = None if self.use_labels: UpperCAmelCase_ : int = ids_tensor([self.batch_size] ,self.type_sequence_label_size ) UpperCAmelCase_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels ) UpperCAmelCase_ : Any = ids_tensor([self.batch_size] ,self.num_choices ) UpperCAmelCase_ : Optional[int] = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def A__ ( self: int ) -> List[Any]: return BioGptConfig( vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=lowerCamelCase_ ,initializer_range=self.initializer_range ,) def A__ ( self: str ,lowerCamelCase_: Any ,lowerCamelCase_: Tuple ,lowerCamelCase_: List[str] ,lowerCamelCase_: Tuple ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: str ,lowerCamelCase_: Any ) -> Union[str, Any]: UpperCAmelCase_ : List[Any] = BioGptModel(config=lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() UpperCAmelCase_ : Tuple = model(lowerCamelCase_ ,attention_mask=lowerCamelCase_ ) UpperCAmelCase_ : List[Any] = model(lowerCamelCase_ ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) def A__ ( self: Dict ,lowerCamelCase_: Tuple ,lowerCamelCase_: Tuple ,lowerCamelCase_: List[str] ,lowerCamelCase_: List[Any] ,lowerCamelCase_: List[Any] ,lowerCamelCase_: List[Any] ,lowerCamelCase_: Optional[int] ,lowerCamelCase_: Optional[int] ,lowerCamelCase_: int ,) -> Optional[Any]: UpperCAmelCase_ : List[str] = BioGptForCausalLM(config=lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() UpperCAmelCase_ : Any = model(lowerCamelCase_ ,attention_mask=lowerCamelCase_ ,token_type_ids=lowerCamelCase_ ,labels=lowerCamelCase_ ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) ) def A__ ( self: Any ,lowerCamelCase_: int ,lowerCamelCase_: List[Any] ,lowerCamelCase_: Optional[int] ,lowerCamelCase_: Dict ,lowerCamelCase_: Tuple ,*lowerCamelCase_: str ) -> Tuple: UpperCAmelCase_ : List[Any] = BioGptModel(config=lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() # create attention mask UpperCAmelCase_ : Dict = torch.ones(input_ids.shape ,dtype=torch.long ,device=lowerCamelCase_ ) UpperCAmelCase_ : str = self.seq_length // 2 UpperCAmelCase_ : Any = 0 # first forward pass UpperCAmelCase_ , UpperCAmelCase_ : Any = model(lowerCamelCase_ ,attention_mask=lowerCamelCase_ ).to_tuple() # create hypothetical next token and extent to next_input_ids UpperCAmelCase_ : Union[str, Any] = ids_tensor((self.batch_size, 1) ,config.vocab_size ) # change a random masked slice from input_ids UpperCAmelCase_ : Optional[Any] = ids_tensor((1,) ,lowerCamelCase_ ).item() + 1 UpperCAmelCase_ : List[Any] = ids_tensor((self.batch_size, 1) ,config.vocab_size ).squeeze(-1 ) UpperCAmelCase_ : int = random_other_next_tokens # append to next input_ids and attn_mask UpperCAmelCase_ : Tuple = torch.cat([input_ids, next_tokens] ,dim=-1 ) UpperCAmelCase_ : Optional[int] = torch.cat( [attn_mask, torch.ones((attn_mask.shape[0], 1) ,dtype=torch.long ,device=lowerCamelCase_ )] ,dim=1 ,) # get two different outputs UpperCAmelCase_ : Union[str, Any] = model(lowerCamelCase_ ,attention_mask=lowerCamelCase_ )["""last_hidden_state"""] UpperCAmelCase_ : List[str] = model(lowerCamelCase_ ,past_key_values=lowerCamelCase_ ,attention_mask=lowerCamelCase_ )["""last_hidden_state"""] # select random slice UpperCAmelCase_ : Optional[Any] = ids_tensor((1,) ,output_from_past.shape[-1] ).item() UpperCAmelCase_ : Dict = output_from_no_past[:, -1, random_slice_idx].detach() UpperCAmelCase_ : str = output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(lowerCamelCase_ ,lowerCamelCase_ ,atol=1e-3 ) ) def A__ ( self: Any ,lowerCamelCase_: Optional[int] ,lowerCamelCase_: List[Any] ,lowerCamelCase_: List[str] ,lowerCamelCase_: List[str] ,lowerCamelCase_: List[str] ,*lowerCamelCase_: Optional[int] ) -> Dict: UpperCAmelCase_ : Dict = BioGptModel(config=lowerCamelCase_ ).to(lowerCamelCase_ ).eval() UpperCAmelCase_ : List[Any] = torch.ones(input_ids.shape ,dtype=torch.long ,device=lowerCamelCase_ ) # first forward pass UpperCAmelCase_ : Tuple = model(lowerCamelCase_ ,attention_mask=lowerCamelCase_ ,use_cache=lowerCamelCase_ ) UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = outputs.to_tuple() # create hypothetical multiple next token and extent to next_input_ids UpperCAmelCase_ : Any = ids_tensor((self.batch_size, 3) ,config.vocab_size ) UpperCAmelCase_ : List[str] = ids_tensor((self.batch_size, 3) ,2 ) # append to next input_ids and UpperCAmelCase_ : Optional[Any] = torch.cat([input_ids, next_tokens] ,dim=-1 ) UpperCAmelCase_ : Dict = torch.cat([attention_mask, next_attn_mask] ,dim=-1 ) UpperCAmelCase_ : Optional[Any] = model(lowerCamelCase_ ,attention_mask=lowerCamelCase_ )["""last_hidden_state"""] UpperCAmelCase_ : Optional[Any] = model(lowerCamelCase_ ,attention_mask=lowerCamelCase_ ,past_key_values=lowerCamelCase_ )[ """last_hidden_state""" ] # select random slice UpperCAmelCase_ : int = ids_tensor((1,) ,output_from_past.shape[-1] ).item() UpperCAmelCase_ : Tuple = output_from_no_past[:, -3:, random_slice_idx].detach() UpperCAmelCase_ : Union[str, Any] = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(lowerCamelCase_ ,lowerCamelCase_ ,atol=1e-3 ) ) def A__ ( self: int ,lowerCamelCase_: List[str] ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: List[str] ,lowerCamelCase_: Tuple ,lowerCamelCase_: Union[str, Any] ,*lowerCamelCase_: Optional[int] ,lowerCamelCase_: Union[str, Any]=False ) -> Tuple: UpperCAmelCase_ : Tuple = BioGptForCausalLM(lowerCamelCase_ ) model.to(lowerCamelCase_ ) if gradient_checkpointing: model.gradient_checkpointing_enable() UpperCAmelCase_ : Dict = model(lowerCamelCase_ ,labels=lowerCamelCase_ ) self.parent.assertEqual(result.loss.shape ,() ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) ) result.loss.backward() def A__ ( self: Optional[Any] ,lowerCamelCase_: Dict ,*lowerCamelCase_: Optional[Any] ) -> Any: UpperCAmelCase_ : Union[str, Any] = BioGptModel(lowerCamelCase_ ) UpperCAmelCase_ : Optional[Any] = model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers ) for key in model.state_dict().keys(): if "c_proj" in key and "weight" in key: self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key] ) - model_std ) ,0.0_0_1 ) self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key] ) - 0.0 ) ,0.0_1 ) def A__ ( self: List[Any] ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: Any ,lowerCamelCase_: int ,lowerCamelCase_: Dict ,lowerCamelCase_: Union[str, Any] ,*lowerCamelCase_: int ) -> List[Any]: UpperCAmelCase_ : Dict = self.num_labels UpperCAmelCase_ : Any = BioGptForTokenClassification(lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() UpperCAmelCase_ : Union[str, Any] = model(lowerCamelCase_ ,attention_mask=lowerCamelCase_ ,token_type_ids=lowerCamelCase_ ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) ) def A__ ( self: Any ) -> List[Any]: UpperCAmelCase_ : List[Any] = self.prepare_config_and_inputs() ( ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ) : Tuple = config_and_inputs UpperCAmelCase_ : str = {"""input_ids""": input_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class _snake_case ( __snake_case , __snake_case , __snake_case , unittest.TestCase ): '''simple docstring''' A__ : int = ( (BioGptModel, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification) if is_torch_available() else () ) A__ : Union[str, Any] = (BioGptForCausalLM,) if is_torch_available() else () A__ : Any = ( { "feature-extraction": BioGptModel, "text-classification": BioGptForSequenceClassification, "text-generation": BioGptForCausalLM, "token-classification": BioGptForTokenClassification, "zero-shot": BioGptForSequenceClassification, } if is_torch_available() else {} ) A__ : str = False def A__ ( self: Union[str, Any] ) -> Dict: UpperCAmelCase_ : Optional[Any] = BioGptModelTester(self ) UpperCAmelCase_ : Dict = ConfigTester(self ,config_class=lowerCamelCase_ ,hidden_size=37 ) def A__ ( self: Tuple ) -> Optional[int]: self.config_tester.run_common_tests() def A__ ( self: Dict ) -> int: UpperCAmelCase_ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCamelCase_ ) def A__ ( self: Any ) -> int: UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: UpperCAmelCase_ : int = type self.model_tester.create_and_check_model(*lowerCamelCase_ ) def A__ ( self: int ) -> List[Any]: UpperCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_biogpt_model_attention_mask_past(*lowerCamelCase_ ) def A__ ( self: Any ) -> Optional[Any]: UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_forward_and_backwards(*lowerCamelCase_ ,gradient_checkpointing=lowerCamelCase_ ) def A__ ( self: Optional[Any] ) -> Optional[int]: UpperCAmelCase_ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_biogpt_model_past_large_inputs(*lowerCamelCase_ ) def A__ ( self: Union[str, Any] ) -> Tuple: UpperCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_biogpt_weight_initialization(*lowerCamelCase_ ) def A__ ( self: Any ) -> str: UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_biogpt_for_token_classification(*lowerCamelCase_ ) @slow def A__ ( self: int ) -> Optional[Any]: UpperCAmelCase_ : Any = BioGptForCausalLM.from_pretrained("""microsoft/biogpt""" ) model.to(lowerCamelCase_ ) UpperCAmelCase_ : Dict = BioGptTokenizer.from_pretrained("""microsoft/biogpt""" ) UpperCAmelCase_ : Union[str, Any] = """left""" # Define PAD Token = EOS Token = 50256 UpperCAmelCase_ : Union[str, Any] = tokenizer.eos_token UpperCAmelCase_ : Optional[int] = model.config.eos_token_id # use different length sentences to test batching UpperCAmelCase_ : str = [ """Hello, my dog is a little""", """Today, I""", ] UpperCAmelCase_ : str = tokenizer(lowerCamelCase_ ,return_tensors="""pt""" ,padding=lowerCamelCase_ ) UpperCAmelCase_ : Optional[int] = inputs["""input_ids"""].to(lowerCamelCase_ ) UpperCAmelCase_ : Dict = model.generate( input_ids=lowerCamelCase_ ,attention_mask=inputs["""attention_mask"""].to(lowerCamelCase_ ) ,) UpperCAmelCase_ : List[str] = tokenizer(sentences[0] ,return_tensors="""pt""" ).input_ids.to(lowerCamelCase_ ) UpperCAmelCase_ : Optional[Any] = model.generate(input_ids=lowerCamelCase_ ) UpperCAmelCase_ : Dict = inputs_non_padded.shape[-1] - inputs["""attention_mask"""][-1].long().sum().cpu().item() UpperCAmelCase_ : Optional[int] = tokenizer(sentences[1] ,return_tensors="""pt""" ).input_ids.to(lowerCamelCase_ ) UpperCAmelCase_ : Optional[int] = model.generate(input_ids=lowerCamelCase_ ,max_length=model.config.max_length - num_paddings ) UpperCAmelCase_ : Any = tokenizer.batch_decode(lowerCamelCase_ ,skip_special_tokens=lowerCamelCase_ ) UpperCAmelCase_ : int = tokenizer.decode(output_non_padded[0] ,skip_special_tokens=lowerCamelCase_ ) UpperCAmelCase_ : int = tokenizer.decode(output_padded[0] ,skip_special_tokens=lowerCamelCase_ ) UpperCAmelCase_ : List[str] = [ """Hello, my dog is a little bit bigger than a little bit.""", """Today, I have a good idea of how to use the information""", ] self.assertListEqual(lowerCamelCase_ ,lowerCamelCase_ ) self.assertListEqual(lowerCamelCase_ ,[non_padded_sentence, padded_sentence] ) @slow def A__ ( self: Tuple ) -> List[str]: for model_name in BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase_ : List[str] = BioGptModel.from_pretrained(lowerCamelCase_ ) self.assertIsNotNone(lowerCamelCase_ ) def A__ ( self: List[Any] ) -> Tuple: UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase_ : Dict = 3 UpperCAmelCase_ : Tuple = input_dict["""input_ids"""] UpperCAmelCase_ : Optional[Any] = input_ids.ne(1 ).to(lowerCamelCase_ ) UpperCAmelCase_ : str = ids_tensor([self.model_tester.batch_size] ,self.model_tester.type_sequence_label_size ) UpperCAmelCase_ : str = BioGptForSequenceClassification(lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() UpperCAmelCase_ : Union[str, Any] = model(lowerCamelCase_ ,attention_mask=lowerCamelCase_ ,labels=lowerCamelCase_ ) self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) ) def A__ ( self: str ) -> List[str]: UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase_ : List[str] = 3 UpperCAmelCase_ : Tuple = """multi_label_classification""" UpperCAmelCase_ : Tuple = input_dict["""input_ids"""] UpperCAmelCase_ : Dict = input_ids.ne(1 ).to(lowerCamelCase_ ) UpperCAmelCase_ : List[str] = ids_tensor( [self.model_tester.batch_size, config.num_labels] ,self.model_tester.type_sequence_label_size ).to(torch.float ) UpperCAmelCase_ : Union[str, Any] = BioGptForSequenceClassification(lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() UpperCAmelCase_ : int = model(lowerCamelCase_ ,attention_mask=lowerCamelCase_ ,labels=lowerCamelCase_ ) self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) ) @require_torch class _snake_case ( unittest.TestCase ): '''simple docstring''' @slow def A__ ( self: Union[str, Any] ) -> int: UpperCAmelCase_ : int = BioGptForCausalLM.from_pretrained("""microsoft/biogpt""" ) UpperCAmelCase_ : int = torch.tensor([[2, 4805, 9, 656, 21]] ) UpperCAmelCase_ : Optional[int] = model(lowerCamelCase_ )[0] UpperCAmelCase_ : Union[str, Any] = 42384 UpperCAmelCase_ : Tuple = torch.Size((1, 5, vocab_size) ) self.assertEqual(output.shape ,lowerCamelCase_ ) UpperCAmelCase_ : Any = torch.tensor( [[[-9.5_2_3_6, -9.8_9_1_8, 1_0.4_5_5_7], [-1_1.0_4_6_9, -9.6_4_2_3, 8.1_0_2_2], [-8.8_6_6_4, -7.8_8_2_6, 5.5_3_2_5]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] ,lowerCamelCase_ ,atol=1e-4 ) ) @slow def A__ ( self: List[Any] ) -> Optional[int]: UpperCAmelCase_ : Tuple = BioGptTokenizer.from_pretrained("""microsoft/biogpt""" ) UpperCAmelCase_ : List[Any] = BioGptForCausalLM.from_pretrained("""microsoft/biogpt""" ) model.to(lowerCamelCase_ ) torch.manual_seed(0 ) UpperCAmelCase_ : Any = tokenizer("""COVID-19 is""" ,return_tensors="""pt""" ).to(lowerCamelCase_ ) UpperCAmelCase_ : Optional[Any] = model.generate( **lowerCamelCase_ ,min_length=100 ,max_length=1024 ,num_beams=5 ,early_stopping=lowerCamelCase_ ,) UpperCAmelCase_ : List[Any] = tokenizer.decode(output_ids[0] ,skip_special_tokens=lowerCamelCase_ ) UpperCAmelCase_ : Optional[Any] = ( """COVID-19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), the""" """ causative agent of coronavirus disease 2019 (COVID-19), which has spread to more than 200 countries and""" """ territories, including the United States (US), Canada, Australia, New Zealand, the United Kingdom (UK),""" """ and the United States of America (USA), as of March 11, 2020, with more than 800,000 confirmed cases and""" """ more than 800,000 deaths.""" ) self.assertEqual(lowerCamelCase_ ,lowerCamelCase_ )
345
from ..utils import DummyObject, requires_backends class _snake_case ( metaclass=__snake_case ): '''simple docstring''' A__ : Tuple = ["flax"] def __init__( self: str ,*lowerCamelCase_: int ,**lowerCamelCase_: List[str] ) -> str: requires_backends(self ,["""flax"""] ) @classmethod def A__ ( cls: Optional[Any] ,*lowerCamelCase_: Dict ,**lowerCamelCase_: List[str] ) -> Any: requires_backends(cls ,["""flax"""] ) @classmethod def A__ ( cls: Optional[int] ,*lowerCamelCase_: Optional[int] ,**lowerCamelCase_: int ) -> Optional[int]: requires_backends(cls ,["""flax"""] ) class _snake_case ( metaclass=__snake_case ): '''simple docstring''' A__ : Any = ["flax"] def __init__( self: int ,*lowerCamelCase_: List[Any] ,**lowerCamelCase_: Tuple ) -> Union[str, Any]: requires_backends(self ,["""flax"""] ) @classmethod def A__ ( cls: Optional[int] ,*lowerCamelCase_: Optional[int] ,**lowerCamelCase_: List[str] ) -> Union[str, Any]: requires_backends(cls ,["""flax"""] ) @classmethod def A__ ( cls: Tuple ,*lowerCamelCase_: Tuple ,**lowerCamelCase_: Any ) -> int: requires_backends(cls ,["""flax"""] ) class _snake_case ( metaclass=__snake_case ): '''simple docstring''' A__ : Dict = ["flax"] def __init__( self: Dict ,*lowerCamelCase_: Optional[int] ,**lowerCamelCase_: List[Any] ) -> Any: requires_backends(self ,["""flax"""] ) @classmethod def A__ ( cls: Tuple ,*lowerCamelCase_: Optional[Any] ,**lowerCamelCase_: List[Any] ) -> str: requires_backends(cls ,["""flax"""] ) @classmethod def A__ ( cls: int ,*lowerCamelCase_: Optional[Any] ,**lowerCamelCase_: Optional[Any] ) -> int: requires_backends(cls ,["""flax"""] ) class _snake_case ( metaclass=__snake_case ): '''simple docstring''' A__ : List[str] = ["flax"] def __init__( self: str ,*lowerCamelCase_: List[str] ,**lowerCamelCase_: Optional[int] ) -> Union[str, Any]: requires_backends(self ,["""flax"""] ) @classmethod def A__ ( cls: Union[str, Any] ,*lowerCamelCase_: Any ,**lowerCamelCase_: Any ) -> Any: requires_backends(cls ,["""flax"""] ) @classmethod def A__ ( cls: Dict ,*lowerCamelCase_: int ,**lowerCamelCase_: Optional[Any] ) -> int: requires_backends(cls ,["""flax"""] ) class _snake_case ( metaclass=__snake_case ): '''simple docstring''' A__ : int = ["flax"] def __init__( self: Dict ,*lowerCamelCase_: Tuple ,**lowerCamelCase_: List[str] ) -> Optional[Any]: requires_backends(self ,["""flax"""] ) @classmethod def A__ ( cls: Optional[Any] ,*lowerCamelCase_: List[Any] ,**lowerCamelCase_: str ) -> Any: requires_backends(cls ,["""flax"""] ) @classmethod def A__ ( cls: Union[str, Any] ,*lowerCamelCase_: Dict ,**lowerCamelCase_: Optional[Any] ) -> str: requires_backends(cls ,["""flax"""] ) class _snake_case ( metaclass=__snake_case ): '''simple docstring''' A__ : Optional[int] = ["flax"] def __init__( self: str ,*lowerCamelCase_: Dict ,**lowerCamelCase_: Optional[int] ) -> Tuple: requires_backends(self ,["""flax"""] ) @classmethod def A__ ( cls: int ,*lowerCamelCase_: int ,**lowerCamelCase_: Tuple ) -> List[str]: requires_backends(cls ,["""flax"""] ) @classmethod def A__ ( cls: str ,*lowerCamelCase_: Union[str, Any] ,**lowerCamelCase_: Optional[Any] ) -> Any: requires_backends(cls ,["""flax"""] ) class _snake_case ( metaclass=__snake_case ): '''simple docstring''' A__ : List[Any] = ["flax"] def __init__( self: Union[str, Any] ,*lowerCamelCase_: Tuple ,**lowerCamelCase_: int ) -> List[Any]: requires_backends(self ,["""flax"""] ) @classmethod def A__ ( cls: Tuple ,*lowerCamelCase_: List[Any] ,**lowerCamelCase_: Dict ) -> Dict: requires_backends(cls ,["""flax"""] ) @classmethod def A__ ( cls: Dict ,*lowerCamelCase_: List[Any] ,**lowerCamelCase_: str ) -> Any: requires_backends(cls ,["""flax"""] ) class _snake_case ( metaclass=__snake_case ): '''simple docstring''' A__ : Tuple = ["flax"] def __init__( self: str ,*lowerCamelCase_: Any ,**lowerCamelCase_: int ) -> Tuple: requires_backends(self ,["""flax"""] ) @classmethod def A__ ( cls: Dict ,*lowerCamelCase_: Optional[int] ,**lowerCamelCase_: Union[str, Any] ) -> List[str]: requires_backends(cls ,["""flax"""] ) @classmethod def A__ ( cls: str ,*lowerCamelCase_: Union[str, Any] ,**lowerCamelCase_: Dict ) -> Optional[int]: requires_backends(cls ,["""flax"""] ) class _snake_case ( metaclass=__snake_case ): '''simple docstring''' A__ : str = ["flax"] def __init__( self: Optional[Any] ,*lowerCamelCase_: str ,**lowerCamelCase_: List[str] ) -> Optional[Any]: requires_backends(self ,["""flax"""] ) @classmethod def A__ ( cls: List[str] ,*lowerCamelCase_: Dict ,**lowerCamelCase_: int ) -> List[str]: requires_backends(cls ,["""flax"""] ) @classmethod def A__ ( cls: str ,*lowerCamelCase_: Optional[Any] ,**lowerCamelCase_: int ) -> Union[str, Any]: requires_backends(cls ,["""flax"""] ) class _snake_case ( metaclass=__snake_case ): '''simple docstring''' A__ : Union[str, Any] = ["flax"] def __init__( self: Any ,*lowerCamelCase_: Tuple ,**lowerCamelCase_: Optional[int] ) -> List[str]: requires_backends(self ,["""flax"""] ) @classmethod def A__ ( cls: Optional[int] ,*lowerCamelCase_: List[Any] ,**lowerCamelCase_: str ) -> Union[str, Any]: requires_backends(cls ,["""flax"""] ) @classmethod def A__ ( cls: List[Any] ,*lowerCamelCase_: Any ,**lowerCamelCase_: Any ) -> int: requires_backends(cls ,["""flax"""] ) class _snake_case ( metaclass=__snake_case ): '''simple docstring''' A__ : Tuple = ["flax"] def __init__( self: Any ,*lowerCamelCase_: Optional[Any] ,**lowerCamelCase_: Dict ) -> str: requires_backends(self ,["""flax"""] ) @classmethod def A__ ( cls: Tuple ,*lowerCamelCase_: Union[str, Any] ,**lowerCamelCase_: List[str] ) -> int: requires_backends(cls ,["""flax"""] ) @classmethod def A__ ( cls: List[Any] ,*lowerCamelCase_: str ,**lowerCamelCase_: str ) -> Any: requires_backends(cls ,["""flax"""] ) class _snake_case ( metaclass=__snake_case ): '''simple docstring''' A__ : Optional[Any] = ["flax"] def __init__( self: Dict ,*lowerCamelCase_: int ,**lowerCamelCase_: Optional[Any] ) -> Union[str, Any]: requires_backends(self ,["""flax"""] ) @classmethod def A__ ( cls: int ,*lowerCamelCase_: int ,**lowerCamelCase_: Tuple ) -> Union[str, Any]: requires_backends(cls ,["""flax"""] ) @classmethod def A__ ( cls: Optional[Any] ,*lowerCamelCase_: List[Any] ,**lowerCamelCase_: Optional[int] ) -> int: requires_backends(cls ,["""flax"""] ) class _snake_case ( metaclass=__snake_case ): '''simple docstring''' A__ : Optional[int] = ["flax"] def __init__( self: List[str] ,*lowerCamelCase_: Dict ,**lowerCamelCase_: Dict ) -> int: requires_backends(self ,["""flax"""] ) @classmethod def A__ ( cls: Dict ,*lowerCamelCase_: List[Any] ,**lowerCamelCase_: Dict ) -> Union[str, Any]: requires_backends(cls ,["""flax"""] ) @classmethod def A__ ( cls: int ,*lowerCamelCase_: Any ,**lowerCamelCase_: Any ) -> Optional[Any]: requires_backends(cls ,["""flax"""] )
345
1
import json import pathlib import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import ConditionalDetrImageProcessor class _snake_case ( unittest.TestCase ): '''simple docstring''' def __init__( self: Union[str, Any] ,lowerCamelCase_: Tuple ,lowerCamelCase_: Union[str, Any]=7 ,lowerCamelCase_: Any=3 ,lowerCamelCase_: Tuple=30 ,lowerCamelCase_: Optional[int]=400 ,lowerCamelCase_: Optional[int]=True ,lowerCamelCase_: Optional[Any]=None ,lowerCamelCase_: Optional[int]=True ,lowerCamelCase_: Dict=[0.5, 0.5, 0.5] ,lowerCamelCase_: int=[0.5, 0.5, 0.5] ,lowerCamelCase_: str=True ,lowerCamelCase_: List[str]=1 / 255 ,lowerCamelCase_: Optional[Any]=True ,) -> Dict: # by setting size["longest_edge"] > max_resolution we're effectively not testing this :p UpperCAmelCase_ : List[str] = size if size is not None else {"""shortest_edge""": 18, """longest_edge""": 1333} UpperCAmelCase_ : Optional[Any] = parent UpperCAmelCase_ : Optional[Any] = batch_size UpperCAmelCase_ : List[Any] = num_channels UpperCAmelCase_ : List[str] = min_resolution UpperCAmelCase_ : Dict = max_resolution UpperCAmelCase_ : Any = do_resize UpperCAmelCase_ : Union[str, Any] = size UpperCAmelCase_ : List[Any] = do_normalize UpperCAmelCase_ : int = image_mean UpperCAmelCase_ : str = image_std UpperCAmelCase_ : List[str] = do_rescale UpperCAmelCase_ : Dict = rescale_factor UpperCAmelCase_ : int = do_pad def A__ ( self: Union[str, Any] ) -> Dict: return { "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_rescale": self.do_rescale, "rescale_factor": self.rescale_factor, "do_pad": self.do_pad, } def A__ ( self: Optional[int] ,lowerCamelCase_: Dict ,lowerCamelCase_: List[Any]=False ) -> int: if not batched: UpperCAmelCase_ : Union[str, Any] = image_inputs[0] if isinstance(lowerCamelCase_ ,Image.Image ): UpperCAmelCase_ , UpperCAmelCase_ : List[str] = image.size else: UpperCAmelCase_ , UpperCAmelCase_ : Any = image.shape[1], image.shape[2] if w < h: UpperCAmelCase_ : List[Any] = int(self.size["""shortest_edge"""] * h / w ) UpperCAmelCase_ : List[str] = self.size["""shortest_edge"""] elif w > h: UpperCAmelCase_ : Any = self.size["""shortest_edge"""] UpperCAmelCase_ : Dict = int(self.size["""shortest_edge"""] * w / h ) else: UpperCAmelCase_ : Dict = self.size["""shortest_edge"""] UpperCAmelCase_ : Dict = self.size["""shortest_edge"""] else: UpperCAmelCase_ : List[Any] = [] for image in image_inputs: UpperCAmelCase_ , UpperCAmelCase_ : str = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) UpperCAmelCase_ : Union[str, Any] = max(lowerCamelCase_ ,key=lambda lowerCamelCase_ : item[0] )[0] UpperCAmelCase_ : Optional[int] = max(lowerCamelCase_ ,key=lambda lowerCamelCase_ : item[1] )[1] return expected_height, expected_width @require_torch @require_vision class _snake_case ( __snake_case , unittest.TestCase ): '''simple docstring''' A__ : List[str] = ConditionalDetrImageProcessor if is_vision_available() else None def A__ ( self: Dict ) -> str: UpperCAmelCase_ : int = ConditionalDetrImageProcessingTester(self ) @property def A__ ( self: int ) -> Optional[int]: return self.image_processor_tester.prepare_image_processor_dict() def A__ ( self: Optional[Any] ) -> str: UpperCAmelCase_ : Dict = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(lowerCamelCase_ ,"""image_mean""" ) ) self.assertTrue(hasattr(lowerCamelCase_ ,"""image_std""" ) ) self.assertTrue(hasattr(lowerCamelCase_ ,"""do_normalize""" ) ) self.assertTrue(hasattr(lowerCamelCase_ ,"""do_resize""" ) ) self.assertTrue(hasattr(lowerCamelCase_ ,"""size""" ) ) def A__ ( self: int ) -> Optional[Any]: UpperCAmelCase_ : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size ,{"""shortest_edge""": 18, """longest_edge""": 1333} ) self.assertEqual(image_processor.do_pad ,lowerCamelCase_ ) UpperCAmelCase_ : Optional[Any] = self.image_processing_class.from_dict( self.image_processor_dict ,size=42 ,max_size=84 ,pad_and_return_pixel_mask=lowerCamelCase_ ) self.assertEqual(image_processor.size ,{"""shortest_edge""": 42, """longest_edge""": 84} ) self.assertEqual(image_processor.do_pad ,lowerCamelCase_ ) def A__ ( self: List[str] ) -> int: pass def A__ ( self: str ) -> Any: # Initialize image_processing UpperCAmelCase_ : Dict = self.image_processing_class(**self.image_processor_dict ) # create random PIL images UpperCAmelCase_ : int = prepare_image_inputs(self.image_processor_tester ,equal_resolution=lowerCamelCase_ ) for image in image_inputs: self.assertIsInstance(lowerCamelCase_ ,Image.Image ) # Test not batched input UpperCAmelCase_ : Any = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = self.image_processor_tester.get_expected_values(lowerCamelCase_ ) self.assertEqual( encoded_images.shape ,(1, self.image_processor_tester.num_channels, expected_height, expected_width) ,) # Test batched UpperCAmelCase_ , UpperCAmelCase_ : str = self.image_processor_tester.get_expected_values(lowerCamelCase_ ,batched=lowerCamelCase_ ) UpperCAmelCase_ : Any = image_processing(lowerCamelCase_ ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) ,) def A__ ( self: Dict ) -> List[str]: # Initialize image_processing UpperCAmelCase_ : Optional[Any] = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors UpperCAmelCase_ : List[Any] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=lowerCamelCase_ ,numpify=lowerCamelCase_ ) for image in image_inputs: self.assertIsInstance(lowerCamelCase_ ,np.ndarray ) # Test not batched input UpperCAmelCase_ : Optional[int] = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = self.image_processor_tester.get_expected_values(lowerCamelCase_ ) self.assertEqual( encoded_images.shape ,(1, self.image_processor_tester.num_channels, expected_height, expected_width) ,) # Test batched UpperCAmelCase_ : Tuple = image_processing(lowerCamelCase_ ,return_tensors="""pt""" ).pixel_values UpperCAmelCase_ , UpperCAmelCase_ : Dict = self.image_processor_tester.get_expected_values(lowerCamelCase_ ,batched=lowerCamelCase_ ) self.assertEqual( encoded_images.shape ,( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) ,) def A__ ( self: Any ) -> Dict: # Initialize image_processing UpperCAmelCase_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors UpperCAmelCase_ : str = prepare_image_inputs(self.image_processor_tester ,equal_resolution=lowerCamelCase_ ,torchify=lowerCamelCase_ ) for image in image_inputs: self.assertIsInstance(lowerCamelCase_ ,torch.Tensor ) # Test not batched input UpperCAmelCase_ : Optional[int] = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = self.image_processor_tester.get_expected_values(lowerCamelCase_ ) self.assertEqual( encoded_images.shape ,(1, self.image_processor_tester.num_channels, expected_height, expected_width) ,) # Test batched UpperCAmelCase_ : str = image_processing(lowerCamelCase_ ,return_tensors="""pt""" ).pixel_values UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = self.image_processor_tester.get_expected_values(lowerCamelCase_ ,batched=lowerCamelCase_ ) self.assertEqual( encoded_images.shape ,( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) ,) @slow def A__ ( self: Tuple ) -> int: # prepare image and target UpperCAmelCase_ : str = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) with open("""./tests/fixtures/tests_samples/COCO/coco_annotations.txt""" ,"""r""" ) as f: UpperCAmelCase_ : List[str] = json.loads(f.read() ) UpperCAmelCase_ : Optional[Any] = {"""image_id""": 39769, """annotations""": target} # encode them UpperCAmelCase_ : Any = ConditionalDetrImageProcessor.from_pretrained("""microsoft/conditional-detr-resnet-50""" ) UpperCAmelCase_ : List[str] = image_processing(images=lowerCamelCase_ ,annotations=lowerCamelCase_ ,return_tensors="""pt""" ) # verify pixel values UpperCAmelCase_ : Any = torch.Size([1, 3, 800, 1066] ) self.assertEqual(encoding["""pixel_values"""].shape ,lowerCamelCase_ ) UpperCAmelCase_ : int = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] ) self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] ,lowerCamelCase_ ,atol=1e-4 ) ) # verify area UpperCAmelCase_ : int = torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] ,lowerCamelCase_ ) ) # verify boxes UpperCAmelCase_ : Optional[int] = torch.Size([6, 4] ) self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape ,lowerCamelCase_ ) UpperCAmelCase_ : Dict = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] ,lowerCamelCase_ ,atol=1e-3 ) ) # verify image_id UpperCAmelCase_ : str = torch.tensor([39769] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] ,lowerCamelCase_ ) ) # verify is_crowd UpperCAmelCase_ : Any = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] ,lowerCamelCase_ ) ) # verify class_labels UpperCAmelCase_ : Any = torch.tensor([75, 75, 63, 65, 17, 17] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] ,lowerCamelCase_ ) ) # verify orig_size UpperCAmelCase_ : Union[str, Any] = torch.tensor([480, 640] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] ,lowerCamelCase_ ) ) # verify size UpperCAmelCase_ : Dict = torch.tensor([800, 1066] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] ,lowerCamelCase_ ) ) @slow def A__ ( self: Optional[int] ) -> Dict: # prepare image, target and masks_path UpperCAmelCase_ : Optional[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) with open("""./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt""" ,"""r""" ) as f: UpperCAmelCase_ : Union[str, Any] = json.loads(f.read() ) UpperCAmelCase_ : Tuple = {"""file_name""": """000000039769.png""", """image_id""": 39769, """segments_info""": target} UpperCAmelCase_ : Union[str, Any] = pathlib.Path("""./tests/fixtures/tests_samples/COCO/coco_panoptic""" ) # encode them UpperCAmelCase_ : int = ConditionalDetrImageProcessor(format="""coco_panoptic""" ) UpperCAmelCase_ : Optional[int] = image_processing(images=lowerCamelCase_ ,annotations=lowerCamelCase_ ,masks_path=lowerCamelCase_ ,return_tensors="""pt""" ) # verify pixel values UpperCAmelCase_ : Optional[int] = torch.Size([1, 3, 800, 1066] ) self.assertEqual(encoding["""pixel_values"""].shape ,lowerCamelCase_ ) UpperCAmelCase_ : Optional[Any] = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] ) self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] ,lowerCamelCase_ ,atol=1e-4 ) ) # verify area UpperCAmelCase_ : Optional[int] = torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] ,lowerCamelCase_ ) ) # verify boxes UpperCAmelCase_ : str = torch.Size([6, 4] ) self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape ,lowerCamelCase_ ) UpperCAmelCase_ : List[Any] = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] ,lowerCamelCase_ ,atol=1e-3 ) ) # verify image_id UpperCAmelCase_ : List[str] = torch.tensor([39769] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] ,lowerCamelCase_ ) ) # verify is_crowd UpperCAmelCase_ : Tuple = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] ,lowerCamelCase_ ) ) # verify class_labels UpperCAmelCase_ : Any = torch.tensor([17, 17, 63, 75, 75, 93] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] ,lowerCamelCase_ ) ) # verify masks UpperCAmelCase_ : Tuple = 822873 self.assertEqual(encoding["""labels"""][0]["""masks"""].sum().item() ,lowerCamelCase_ ) # verify orig_size UpperCAmelCase_ : List[Any] = torch.tensor([480, 640] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] ,lowerCamelCase_ ) ) # verify size UpperCAmelCase_ : Optional[Any] = torch.tensor([800, 1066] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] ,lowerCamelCase_ ) )
345
import random from typing import Any def lowerCamelCase_ ( _a : list ): '''simple docstring''' for _ in range(len(_a ) ): UpperCAmelCase_ : Tuple = random.randint(0 , len(_a ) - 1 ) UpperCAmelCase_ : List[Any] = random.randint(0 , len(_a ) - 1 ) UpperCAmelCase_ , UpperCAmelCase_ : int = data[b], data[a] return data if __name__ == "__main__": UpperCamelCase_ = [0, 1, 2, 3, 4, 5, 6, 7] UpperCamelCase_ = ['''python''', '''says''', '''hello''', '''!'''] print('''Fisher-Yates Shuffle:''') print('''List''', integers, strings) print('''FY Shuffle''', fisher_yates_shuffle(integers), fisher_yates_shuffle(strings))
345
1
from typing import List, Optional, Union import torch from ...models import UNetaDConditionModel, VQModel from ...pipelines import DiffusionPipeline from ...pipelines.pipeline_utils import ImagePipelineOutput from ...schedulers import DDPMScheduler from ...utils import ( is_accelerate_available, is_accelerate_version, logging, randn_tensor, replace_example_docstring, ) UpperCamelCase_ = logging.get_logger(__name__) # pylint: disable=invalid-name UpperCamelCase_ = ''' Examples: ```py >>> import torch >>> import numpy as np >>> from diffusers import KandinskyV22PriorPipeline, KandinskyV22ControlnetPipeline >>> from transformers import pipeline >>> from diffusers.utils import load_image >>> def make_hint(image, depth_estimator): ... image = depth_estimator(image)["depth"] ... image = np.array(image) ... image = image[:, :, None] ... image = np.concatenate([image, image, image], axis=2) ... detected_map = torch.from_numpy(image).float() / 255.0 ... hint = detected_map.permute(2, 0, 1) ... return hint >>> depth_estimator = pipeline("depth-estimation") >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained( ... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16 ... ) >>> pipe_prior = pipe_prior.to("cuda") >>> pipe = KandinskyV22ControlnetPipeline.from_pretrained( ... "kandinsky-community/kandinsky-2-2-controlnet-depth", torch_dtype=torch.float16 ... ) >>> pipe = pipe.to("cuda") >>> img = load_image( ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" ... "/kandinsky/cat.png" ... ).resize((768, 768)) >>> hint = make_hint(img, depth_estimator).unsqueeze(0).half().to("cuda") >>> prompt = "A robot, 4k photo" >>> negative_prior_prompt = "lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature" >>> generator = torch.Generator(device="cuda").manual_seed(43) >>> image_emb, zero_image_emb = pipe_prior( ... prompt=prompt, negative_prompt=negative_prior_prompt, generator=generator ... ).to_tuple() >>> images = pipe( ... image_embeds=image_emb, ... negative_image_embeds=zero_image_emb, ... hint=hint, ... num_inference_steps=50, ... generator=generator, ... height=768, ... width=768, ... ).images >>> images[0].save("robot_cat.png") ``` ''' def lowerCamelCase_ ( _a : Optional[Any] , _a : Any , _a : Dict=8 ): '''simple docstring''' UpperCAmelCase_ : Optional[Any] = height // scale_factor**2 if height % scale_factor**2 != 0: new_height += 1 UpperCAmelCase_ : Tuple = width // scale_factor**2 if width % scale_factor**2 != 0: new_width += 1 return new_height * scale_factor, new_width * scale_factor class _snake_case ( __snake_case ): '''simple docstring''' def __init__( self: Any ,lowerCamelCase_: UNetaDConditionModel ,lowerCamelCase_: DDPMScheduler ,lowerCamelCase_: VQModel ,) -> Dict: super().__init__() self.register_modules( unet=lowerCamelCase_ ,scheduler=lowerCamelCase_ ,movq=lowerCamelCase_ ,) UpperCAmelCase_ : List[Any] = 2 ** (len(self.movq.config.block_out_channels ) - 1) def A__ ( self: Tuple ,lowerCamelCase_: int ,lowerCamelCase_: Tuple ,lowerCamelCase_: Dict ,lowerCamelCase_: Tuple ,lowerCamelCase_: Optional[int] ,lowerCamelCase_: Optional[int] ) -> Union[str, Any]: if latents is None: UpperCAmelCase_ : Union[str, Any] = randn_tensor(lowerCamelCase_ ,generator=lowerCamelCase_ ,device=lowerCamelCase_ ,dtype=lowerCamelCase_ ) else: if latents.shape != shape: raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {shape}''' ) UpperCAmelCase_ : Optional[int] = latents.to(lowerCamelCase_ ) UpperCAmelCase_ : List[str] = latents * scheduler.init_noise_sigma return latents def A__ ( self: Optional[Any] ,lowerCamelCase_: int=0 ) -> List[str]: if is_accelerate_available(): from accelerate import cpu_offload else: raise ImportError("""Please install accelerate via `pip install accelerate`""" ) UpperCAmelCase_ : Union[str, Any] = torch.device(F'''cuda:{gpu_id}''' ) UpperCAmelCase_ : Union[str, Any] = [ self.unet, self.movq, ] for cpu_offloaded_model in models: if cpu_offloaded_model is not None: cpu_offload(lowerCamelCase_ ,lowerCamelCase_ ) def A__ ( self: Union[str, Any] ,lowerCamelCase_: List[Any]=0 ) -> List[Any]: if is_accelerate_available() and is_accelerate_version(""">=""" ,"""0.17.0.dev0""" ): from accelerate import cpu_offload_with_hook else: raise ImportError("""`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.""" ) UpperCAmelCase_ : Optional[int] = torch.device(F'''cuda:{gpu_id}''' ) if self.device.type != "cpu": self.to("""cpu""" ,silence_dtype_warnings=lowerCamelCase_ ) torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist) UpperCAmelCase_ : Dict = None for cpu_offloaded_model in [self.unet, self.movq]: UpperCAmelCase_ , UpperCAmelCase_ : Any = cpu_offload_with_hook(lowerCamelCase_ ,lowerCamelCase_ ,prev_module_hook=lowerCamelCase_ ) # We'll offload the last model manually. UpperCAmelCase_ : str = hook @property # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device def A__ ( self: str ) -> Union[str, Any]: if not hasattr(self.unet ,"""_hf_hook""" ): return self.device for module in self.unet.modules(): if ( hasattr(lowerCamelCase_ ,"""_hf_hook""" ) and hasattr(module._hf_hook ,"""execution_device""" ) and module._hf_hook.execution_device is not None ): return torch.device(module._hf_hook.execution_device ) return self.device @torch.no_grad() @replace_example_docstring(lowerCamelCase_ ) def __call__( self: List[Any] ,lowerCamelCase_: Union[torch.FloatTensor, List[torch.FloatTensor]] ,lowerCamelCase_: Union[torch.FloatTensor, List[torch.FloatTensor]] ,lowerCamelCase_: torch.FloatTensor ,lowerCamelCase_: int = 512 ,lowerCamelCase_: int = 512 ,lowerCamelCase_: int = 100 ,lowerCamelCase_: float = 4.0 ,lowerCamelCase_: int = 1 ,lowerCamelCase_: Optional[Union[torch.Generator, List[torch.Generator]]] = None ,lowerCamelCase_: Optional[torch.FloatTensor] = None ,lowerCamelCase_: Optional[str] = "pil" ,lowerCamelCase_: bool = True ,) -> Optional[Any]: UpperCAmelCase_ : Optional[Any] = self._execution_device UpperCAmelCase_ : str = guidance_scale > 1.0 if isinstance(lowerCamelCase_ ,lowerCamelCase_ ): UpperCAmelCase_ : Optional[int] = torch.cat(lowerCamelCase_ ,dim=0 ) if isinstance(lowerCamelCase_ ,lowerCamelCase_ ): UpperCAmelCase_ : List[str] = torch.cat(lowerCamelCase_ ,dim=0 ) if isinstance(lowerCamelCase_ ,lowerCamelCase_ ): UpperCAmelCase_ : List[Any] = torch.cat(lowerCamelCase_ ,dim=0 ) UpperCAmelCase_ : int = image_embeds.shape[0] * num_images_per_prompt if do_classifier_free_guidance: UpperCAmelCase_ : Union[str, Any] = image_embeds.repeat_interleave(lowerCamelCase_ ,dim=0 ) UpperCAmelCase_ : Dict = negative_image_embeds.repeat_interleave(lowerCamelCase_ ,dim=0 ) UpperCAmelCase_ : str = hint.repeat_interleave(lowerCamelCase_ ,dim=0 ) UpperCAmelCase_ : Any = torch.cat([negative_image_embeds, image_embeds] ,dim=0 ).to(dtype=self.unet.dtype ,device=lowerCamelCase_ ) UpperCAmelCase_ : Tuple = torch.cat([hint, hint] ,dim=0 ).to(dtype=self.unet.dtype ,device=lowerCamelCase_ ) self.scheduler.set_timesteps(lowerCamelCase_ ,device=lowerCamelCase_ ) UpperCAmelCase_ : int = self.scheduler.timesteps UpperCAmelCase_ : str = self.movq.config.latent_channels UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = downscale_height_and_width(lowerCamelCase_ ,lowerCamelCase_ ,self.movq_scale_factor ) # create initial latent UpperCAmelCase_ : List[str] = self.prepare_latents( (batch_size, num_channels_latents, height, width) ,image_embeds.dtype ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,self.scheduler ,) for i, t in enumerate(self.progress_bar(lowerCamelCase_ ) ): # expand the latents if we are doing classifier free guidance UpperCAmelCase_ : Any = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents UpperCAmelCase_ : Tuple = {"""image_embeds""": image_embeds, """hint""": hint} UpperCAmelCase_ : Tuple = self.unet( sample=lowerCamelCase_ ,timestep=lowerCamelCase_ ,encoder_hidden_states=lowerCamelCase_ ,added_cond_kwargs=lowerCamelCase_ ,return_dict=lowerCamelCase_ ,)[0] if do_classifier_free_guidance: UpperCAmelCase_ , UpperCAmelCase_ : List[str] = noise_pred.split(latents.shape[1] ,dim=1 ) UpperCAmelCase_ , UpperCAmelCase_ : List[str] = noise_pred.chunk(2 ) UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = variance_pred.chunk(2 ) UpperCAmelCase_ : Tuple = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) UpperCAmelCase_ : Dict = torch.cat([noise_pred, variance_pred_text] ,dim=1 ) if not ( hasattr(self.scheduler.config ,"""variance_type""" ) and self.scheduler.config.variance_type in ["learned", "learned_range"] ): UpperCAmelCase_ , UpperCAmelCase_ : Tuple = noise_pred.split(latents.shape[1] ,dim=1 ) # compute the previous noisy sample x_t -> x_t-1 UpperCAmelCase_ : Dict = self.scheduler.step( lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,generator=lowerCamelCase_ ,)[0] # post-processing UpperCAmelCase_ : Dict = self.movq.decode(lowerCamelCase_ ,force_not_quantize=lowerCamelCase_ )["""sample"""] if output_type not in ["pt", "np", "pil"]: raise ValueError(F'''Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}''' ) if output_type in ["np", "pil"]: UpperCAmelCase_ : List[str] = image * 0.5 + 0.5 UpperCAmelCase_ : Optional[int] = image.clamp(0 ,1 ) UpperCAmelCase_ : Optional[Any] = image.cpu().permute(0 ,2 ,3 ,1 ).float().numpy() if output_type == "pil": UpperCAmelCase_ : Union[str, Any] = self.numpy_to_pil(lowerCamelCase_ ) if not return_dict: return (image,) return ImagePipelineOutput(images=lowerCamelCase_ )
345
import flax.linen as nn import jax.numpy as jnp from .attention_flax import FlaxTransformeraDModel from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD class _snake_case ( nn.Module ): '''simple docstring''' A__ : int A__ : int A__ : float = 0.0 A__ : int = 1 A__ : int = 1 A__ : bool = True A__ : bool = False A__ : bool = False A__ : bool = False A__ : jnp.dtype = jnp.floataa def A__ ( self: Dict ) -> List[str]: UpperCAmelCase_ : Optional[int] = [] UpperCAmelCase_ : Optional[int] = [] for i in range(self.num_layers ): UpperCAmelCase_ : List[Any] = self.in_channels if i == 0 else self.out_channels UpperCAmelCase_ : List[Any] = FlaxResnetBlockaD( in_channels=lowerCamelCase_ ,out_channels=self.out_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,) resnets.append(lowerCamelCase_ ) UpperCAmelCase_ : Union[str, Any] = FlaxTransformeraDModel( in_channels=self.out_channels ,n_heads=self.num_attention_heads ,d_head=self.out_channels // self.num_attention_heads ,depth=1 ,use_linear_projection=self.use_linear_projection ,only_cross_attention=self.only_cross_attention ,use_memory_efficient_attention=self.use_memory_efficient_attention ,dtype=self.dtype ,) attentions.append(lowerCamelCase_ ) UpperCAmelCase_ : int = resnets UpperCAmelCase_ : Tuple = attentions if self.add_downsample: UpperCAmelCase_ : List[Any] = FlaxDownsampleaD(self.out_channels ,dtype=self.dtype ) def __call__( self: Optional[Any] ,lowerCamelCase_: Optional[int] ,lowerCamelCase_: str ,lowerCamelCase_: Optional[int] ,lowerCamelCase_: int=True ) -> int: UpperCAmelCase_ : List[Any] = () for resnet, attn in zip(self.resnets ,self.attentions ): UpperCAmelCase_ : str = resnet(lowerCamelCase_ ,lowerCamelCase_ ,deterministic=lowerCamelCase_ ) UpperCAmelCase_ : Union[str, Any] = attn(lowerCamelCase_ ,lowerCamelCase_ ,deterministic=lowerCamelCase_ ) output_states += (hidden_states,) if self.add_downsample: UpperCAmelCase_ : List[Any] = self.downsamplers_a(lowerCamelCase_ ) output_states += (hidden_states,) return hidden_states, output_states class _snake_case ( nn.Module ): '''simple docstring''' A__ : int A__ : int A__ : float = 0.0 A__ : int = 1 A__ : bool = True A__ : jnp.dtype = jnp.floataa def A__ ( self: Dict ) -> int: UpperCAmelCase_ : List[str] = [] for i in range(self.num_layers ): UpperCAmelCase_ : int = self.in_channels if i == 0 else self.out_channels UpperCAmelCase_ : Dict = FlaxResnetBlockaD( in_channels=lowerCamelCase_ ,out_channels=self.out_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,) resnets.append(lowerCamelCase_ ) UpperCAmelCase_ : Union[str, Any] = resnets if self.add_downsample: UpperCAmelCase_ : List[str] = FlaxDownsampleaD(self.out_channels ,dtype=self.dtype ) def __call__( self: Any ,lowerCamelCase_: List[Any] ,lowerCamelCase_: Any ,lowerCamelCase_: List[Any]=True ) -> Any: UpperCAmelCase_ : Union[str, Any] = () for resnet in self.resnets: UpperCAmelCase_ : Tuple = resnet(lowerCamelCase_ ,lowerCamelCase_ ,deterministic=lowerCamelCase_ ) output_states += (hidden_states,) if self.add_downsample: UpperCAmelCase_ : List[str] = self.downsamplers_a(lowerCamelCase_ ) output_states += (hidden_states,) return hidden_states, output_states class _snake_case ( nn.Module ): '''simple docstring''' A__ : int A__ : int A__ : int A__ : float = 0.0 A__ : int = 1 A__ : int = 1 A__ : bool = True A__ : bool = False A__ : bool = False A__ : bool = False A__ : jnp.dtype = jnp.floataa def A__ ( self: str ) -> Any: UpperCAmelCase_ : Dict = [] UpperCAmelCase_ : List[str] = [] for i in range(self.num_layers ): UpperCAmelCase_ : int = self.in_channels if (i == self.num_layers - 1) else self.out_channels UpperCAmelCase_ : int = self.prev_output_channel if i == 0 else self.out_channels UpperCAmelCase_ : Optional[Any] = FlaxResnetBlockaD( in_channels=resnet_in_channels + res_skip_channels ,out_channels=self.out_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,) resnets.append(lowerCamelCase_ ) UpperCAmelCase_ : int = FlaxTransformeraDModel( in_channels=self.out_channels ,n_heads=self.num_attention_heads ,d_head=self.out_channels // self.num_attention_heads ,depth=1 ,use_linear_projection=self.use_linear_projection ,only_cross_attention=self.only_cross_attention ,use_memory_efficient_attention=self.use_memory_efficient_attention ,dtype=self.dtype ,) attentions.append(lowerCamelCase_ ) UpperCAmelCase_ : List[str] = resnets UpperCAmelCase_ : Dict = attentions if self.add_upsample: UpperCAmelCase_ : Optional[Any] = FlaxUpsampleaD(self.out_channels ,dtype=self.dtype ) def __call__( self: Optional[int] ,lowerCamelCase_: List[Any] ,lowerCamelCase_: int ,lowerCamelCase_: Any ,lowerCamelCase_: str ,lowerCamelCase_: List[str]=True ) -> List[str]: for resnet, attn in zip(self.resnets ,self.attentions ): # pop res hidden states UpperCAmelCase_ : List[str] = res_hidden_states_tuple[-1] UpperCAmelCase_ : Union[str, Any] = res_hidden_states_tuple[:-1] UpperCAmelCase_ : Optional[Any] = jnp.concatenate((hidden_states, res_hidden_states) ,axis=-1 ) UpperCAmelCase_ : Tuple = resnet(lowerCamelCase_ ,lowerCamelCase_ ,deterministic=lowerCamelCase_ ) UpperCAmelCase_ : List[Any] = attn(lowerCamelCase_ ,lowerCamelCase_ ,deterministic=lowerCamelCase_ ) if self.add_upsample: UpperCAmelCase_ : Dict = self.upsamplers_a(lowerCamelCase_ ) return hidden_states class _snake_case ( nn.Module ): '''simple docstring''' A__ : int A__ : int A__ : int A__ : float = 0.0 A__ : int = 1 A__ : bool = True A__ : jnp.dtype = jnp.floataa def A__ ( self: Dict ) -> Dict: UpperCAmelCase_ : Any = [] for i in range(self.num_layers ): UpperCAmelCase_ : str = self.in_channels if (i == self.num_layers - 1) else self.out_channels UpperCAmelCase_ : Optional[int] = self.prev_output_channel if i == 0 else self.out_channels UpperCAmelCase_ : Any = FlaxResnetBlockaD( in_channels=resnet_in_channels + res_skip_channels ,out_channels=self.out_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,) resnets.append(lowerCamelCase_ ) UpperCAmelCase_ : str = resnets if self.add_upsample: UpperCAmelCase_ : Union[str, Any] = FlaxUpsampleaD(self.out_channels ,dtype=self.dtype ) def __call__( self: Dict ,lowerCamelCase_: Dict ,lowerCamelCase_: List[Any] ,lowerCamelCase_: Tuple ,lowerCamelCase_: Any=True ) -> List[str]: for resnet in self.resnets: # pop res hidden states UpperCAmelCase_ : Dict = res_hidden_states_tuple[-1] UpperCAmelCase_ : str = res_hidden_states_tuple[:-1] UpperCAmelCase_ : List[Any] = jnp.concatenate((hidden_states, res_hidden_states) ,axis=-1 ) UpperCAmelCase_ : List[str] = resnet(lowerCamelCase_ ,lowerCamelCase_ ,deterministic=lowerCamelCase_ ) if self.add_upsample: UpperCAmelCase_ : Optional[Any] = self.upsamplers_a(lowerCamelCase_ ) return hidden_states class _snake_case ( nn.Module ): '''simple docstring''' A__ : int A__ : float = 0.0 A__ : int = 1 A__ : int = 1 A__ : bool = False A__ : bool = False A__ : jnp.dtype = jnp.floataa def A__ ( self: Dict ) -> List[str]: # there is always at least one resnet UpperCAmelCase_ : List[Any] = [ FlaxResnetBlockaD( in_channels=self.in_channels ,out_channels=self.in_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,) ] UpperCAmelCase_ : Any = [] for _ in range(self.num_layers ): UpperCAmelCase_ : Optional[Any] = FlaxTransformeraDModel( in_channels=self.in_channels ,n_heads=self.num_attention_heads ,d_head=self.in_channels // self.num_attention_heads ,depth=1 ,use_linear_projection=self.use_linear_projection ,use_memory_efficient_attention=self.use_memory_efficient_attention ,dtype=self.dtype ,) attentions.append(lowerCamelCase_ ) UpperCAmelCase_ : Any = FlaxResnetBlockaD( in_channels=self.in_channels ,out_channels=self.in_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,) resnets.append(lowerCamelCase_ ) UpperCAmelCase_ : Dict = resnets UpperCAmelCase_ : Any = attentions def __call__( self: str ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: str ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: Union[str, Any]=True ) -> List[Any]: UpperCAmelCase_ : List[Any] = self.resnets[0](lowerCamelCase_ ,lowerCamelCase_ ) for attn, resnet in zip(self.attentions ,self.resnets[1:] ): UpperCAmelCase_ : Optional[Any] = attn(lowerCamelCase_ ,lowerCamelCase_ ,deterministic=lowerCamelCase_ ) UpperCAmelCase_ : Union[str, Any] = resnet(lowerCamelCase_ ,lowerCamelCase_ ,deterministic=lowerCamelCase_ ) return hidden_states
345
1
import logging import os from dataclasses import dataclass, field from functools import partial from pathlib import Path from tempfile import TemporaryDirectory from typing import List, Optional import faiss import torch from datasets import Features, Sequence, Value, load_dataset from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser UpperCamelCase_ = logging.getLogger(__name__) torch.set_grad_enabled(False) UpperCamelCase_ = '''cuda''' if torch.cuda.is_available() else '''cpu''' def lowerCamelCase_ ( _a : str , _a : Any=100 , _a : int=" " ): '''simple docstring''' UpperCAmelCase_ : Optional[int] = text.split(_a ) return [character.join(text[i : i + n] ).strip() for i in range(0 , len(_a ) , _a )] def lowerCamelCase_ ( _a : dict ): '''simple docstring''' UpperCAmelCase_ , UpperCAmelCase_ : Dict = [], [] for title, text in zip(documents["""title"""] , documents["""text"""] ): if text is not None: for passage in split_text(_a ): titles.append(title if title is not None else """""" ) texts.append(_a ) return {"title": titles, "text": texts} def lowerCamelCase_ ( _a : dict , _a : DPRContextEncoder , _a : DPRContextEncoderTokenizerFast ): '''simple docstring''' UpperCAmelCase_ : List[str] = ctx_tokenizer( documents["""title"""] , documents["""text"""] , truncation=_a , padding="""longest""" , return_tensors="""pt""" )["""input_ids"""] UpperCAmelCase_ : Tuple = ctx_encoder(input_ids.to(device=_a ) , return_dict=_a ).pooler_output return {"embeddings": embeddings.detach().cpu().numpy()} def lowerCamelCase_ ( _a : "RagExampleArguments" , _a : "ProcessingArguments" , _a : "IndexHnswArguments" , ): '''simple docstring''' logger.info("""Step 1 - Create the dataset""" ) ###################################### # The dataset needed for RAG must have three columns: # - title (string): title of the document # - text (string): text of a passage of the document # - embeddings (array of dimension d): DPR representation of the passage # Let's say you have documents in tab-separated csv files with columns "title" and "text" assert os.path.isfile(rag_example_args.csv_path ), "Please provide a valid path to a csv file" # You can load a Dataset object this way UpperCAmelCase_ : Optional[int] = load_dataset( """csv""" , data_files=[rag_example_args.csv_path] , split="""train""" , delimiter="""\t""" , column_names=["""title""", """text"""] ) # More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files # Then split the documents into passages of 100 words UpperCAmelCase_ : Tuple = dataset.map(_a , batched=_a , num_proc=processing_args.num_proc ) # And compute the embeddings UpperCAmelCase_ : List[str] = DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ).to(device=_a ) UpperCAmelCase_ : Dict = DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ) UpperCAmelCase_ : Any = Features( {"""text""": Value("""string""" ), """title""": Value("""string""" ), """embeddings""": Sequence(Value("""float32""" ) )} ) # optional, save as float32 instead of float64 to save space UpperCAmelCase_ : List[str] = dataset.map( partial(_a , ctx_encoder=_a , ctx_tokenizer=_a ) , batched=_a , batch_size=processing_args.batch_size , features=_a , ) # And finally save your dataset UpperCAmelCase_ : Union[str, Any] = os.path.join(rag_example_args.output_dir , """my_knowledge_dataset""" ) dataset.save_to_disk(_a ) # from datasets import load_from_disk # dataset = load_from_disk(passages_path) # to reload the dataset ###################################### logger.info("""Step 2 - Index the dataset""" ) ###################################### # Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search UpperCAmelCase_ : Union[str, Any] = faiss.IndexHNSWFlat(index_hnsw_args.d , index_hnsw_args.m , faiss.METRIC_INNER_PRODUCT ) dataset.add_faiss_index("""embeddings""" , custom_index=_a ) # And save the index UpperCAmelCase_ : Optional[Any] = os.path.join(rag_example_args.output_dir , """my_knowledge_dataset_hnsw_index.faiss""" ) dataset.get_index("""embeddings""" ).save(_a ) # dataset.load_faiss_index("embeddings", index_path) # to reload the index @dataclass class _snake_case : '''simple docstring''' A__ : str = field( default=str(Path(__snake_case ).parent / "test_run" / "dummy-kb" / "my_knowledge_dataset.csv" ) , metadata={"help": "Path to a tab-separated csv file with columns 'title' and 'text'"} , ) A__ : Optional[str] = field( default=__snake_case , metadata={"help": "Question that is passed as input to RAG. Default is 'What does Moses' rod turn into ?'."} , ) A__ : str = field( default="facebook/rag-sequence-nq" , metadata={"help": "The RAG model to use. Either 'facebook/rag-sequence-nq' or 'facebook/rag-token-nq'"} , ) A__ : str = field( default="facebook/dpr-ctx_encoder-multiset-base" , metadata={ "help": ( "The DPR context encoder model to use. Either 'facebook/dpr-ctx_encoder-single-nq-base' or" " 'facebook/dpr-ctx_encoder-multiset-base'" ) } , ) A__ : Optional[str] = field( default=str(Path(__snake_case ).parent / "test_run" / "dummy-kb" ) , metadata={"help": "Path to a directory where the dataset passages and the index will be saved"} , ) @dataclass class _snake_case : '''simple docstring''' A__ : Optional[int] = field( default=__snake_case , metadata={ "help": "The number of processes to use to split the documents into passages. Default is single process." } , ) A__ : int = field( default=16 , metadata={ "help": "The batch size to use when computing the passages embeddings using the DPR context encoder." } , ) @dataclass class _snake_case : '''simple docstring''' A__ : int = field( default=768 , metadata={"help": "The dimension of the embeddings to pass to the HNSW Faiss index."} , ) A__ : int = field( default=128 , metadata={ "help": ( "The number of bi-directional links created for every new element during the HNSW index construction." ) } , ) if __name__ == "__main__": logging.basicConfig(level=logging.WARNING) logger.setLevel(logging.INFO) UpperCamelCase_ = HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments)) UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ = parser.parse_args_into_dataclasses() with TemporaryDirectory() as tmp_dir: UpperCamelCase_ = rag_example_args.output_dir or tmp_dir main(rag_example_args, processing_args, index_hnsw_args)
345
import pickle import numpy as np from matplotlib import pyplot as plt class _snake_case : '''simple docstring''' def __init__( self: Any ,lowerCamelCase_: Dict ,lowerCamelCase_: Tuple ,lowerCamelCase_: Dict ,lowerCamelCase_: Tuple ,lowerCamelCase_: Any ,lowerCamelCase_: Tuple=0.2 ,lowerCamelCase_: Union[str, Any]=0.2 ) -> List[str]: UpperCAmelCase_ : List[Any] = bp_numa UpperCAmelCase_ : str = bp_numa UpperCAmelCase_ : List[Any] = bp_numa UpperCAmelCase_ : Optional[int] = conva_get[:2] UpperCAmelCase_ : List[Any] = conva_get[2] UpperCAmelCase_ : str = size_pa UpperCAmelCase_ : Optional[int] = rate_w UpperCAmelCase_ : Dict = rate_t UpperCAmelCase_ : List[Any] = [ np.mat(-1 * np.random.rand(self.conva[0] ,self.conva[0] ) + 0.5 ) for i in range(self.conva[1] ) ] UpperCAmelCase_ : int = np.mat(-1 * np.random.rand(self.num_bpa ,self.num_bpa ) + 0.5 ) UpperCAmelCase_ : int = np.mat(-1 * np.random.rand(self.num_bpa ,self.num_bpa ) + 0.5 ) UpperCAmelCase_ : Dict = -2 * np.random.rand(self.conva[1] ) + 1 UpperCAmelCase_ : str = -2 * np.random.rand(self.num_bpa ) + 1 UpperCAmelCase_ : Union[str, Any] = -2 * np.random.rand(self.num_bpa ) + 1 def A__ ( self: str ,lowerCamelCase_: Optional[Any] ) -> Tuple: # save model dict with pickle UpperCAmelCase_ : Dict = { """num_bp1""": self.num_bpa, """num_bp2""": self.num_bpa, """num_bp3""": self.num_bpa, """conv1""": self.conva, """step_conv1""": self.step_conva, """size_pooling1""": self.size_poolinga, """rate_weight""": self.rate_weight, """rate_thre""": self.rate_thre, """w_conv1""": self.w_conva, """wkj""": self.wkj, """vji""": self.vji, """thre_conv1""": self.thre_conva, """thre_bp2""": self.thre_bpa, """thre_bp3""": self.thre_bpa, } with open(lowerCamelCase_ ,"""wb""" ) as f: pickle.dump(lowerCamelCase_ ,lowerCamelCase_ ) print(F'''Model saved: {save_path}''' ) @classmethod def A__ ( cls: List[str] ,lowerCamelCase_: str ) -> List[str]: # read saved model with open(lowerCamelCase_ ,"""rb""" ) as f: UpperCAmelCase_ : Any = pickle.load(lowerCamelCase_ ) # noqa: S301 UpperCAmelCase_ : Union[str, Any] = model_dic.get("""conv1""" ) conv_get.append(model_dic.get("""step_conv1""" ) ) UpperCAmelCase_ : List[str] = model_dic.get("""size_pooling1""" ) UpperCAmelCase_ : Tuple = model_dic.get("""num_bp1""" ) UpperCAmelCase_ : Optional[Any] = model_dic.get("""num_bp2""" ) UpperCAmelCase_ : List[str] = model_dic.get("""num_bp3""" ) UpperCAmelCase_ : List[Any] = model_dic.get("""rate_weight""" ) UpperCAmelCase_ : Dict = model_dic.get("""rate_thre""" ) # create model instance UpperCAmelCase_ : List[Any] = CNN(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ) # modify model parameter UpperCAmelCase_ : Any = model_dic.get("""w_conv1""" ) UpperCAmelCase_ : int = model_dic.get("""wkj""" ) UpperCAmelCase_ : int = model_dic.get("""vji""" ) UpperCAmelCase_ : Optional[int] = model_dic.get("""thre_conv1""" ) UpperCAmelCase_ : List[str] = model_dic.get("""thre_bp2""" ) UpperCAmelCase_ : Dict = model_dic.get("""thre_bp3""" ) return conv_ins def A__ ( self: List[Any] ,lowerCamelCase_: Union[str, Any] ) -> Tuple: return 1 / (1 + np.exp(-1 * x )) def A__ ( self: Union[str, Any] ,lowerCamelCase_: Union[str, Any] ) -> Optional[Any]: return round(lowerCamelCase_ ,3 ) def A__ ( self: Tuple ,lowerCamelCase_: Any ,lowerCamelCase_: List[str] ,lowerCamelCase_: str ,lowerCamelCase_: Any ,lowerCamelCase_: Union[str, Any] ) -> Any: # convolution process UpperCAmelCase_ : Optional[Any] = convs[0] UpperCAmelCase_ : int = convs[1] UpperCAmelCase_ : int = np.shape(lowerCamelCase_ )[0] # get the data slice of original image data, data_focus UpperCAmelCase_ : Dict = [] for i_focus in range(0 ,size_data - size_conv + 1 ,lowerCamelCase_ ): for j_focus in range(0 ,size_data - size_conv + 1 ,lowerCamelCase_ ): UpperCAmelCase_ : Union[str, Any] = data[ i_focus : i_focus + size_conv, j_focus : j_focus + size_conv ] data_focus.append(lowerCamelCase_ ) # calculate the feature map of every single kernel, and saved as list of matrix UpperCAmelCase_ : Any = [] UpperCAmelCase_ : Tuple = int((size_data - size_conv) / conv_step + 1 ) for i_map in range(lowerCamelCase_ ): UpperCAmelCase_ : Optional[int] = [] for i_focus in range(len(lowerCamelCase_ ) ): UpperCAmelCase_ : int = ( np.sum(np.multiply(data_focus[i_focus] ,w_convs[i_map] ) ) - thre_convs[i_map] ) featuremap.append(self.sig(lowerCamelCase_ ) ) UpperCAmelCase_ : Union[str, Any] = np.asmatrix(lowerCamelCase_ ).reshape( lowerCamelCase_ ,lowerCamelCase_ ) data_featuremap.append(lowerCamelCase_ ) # expanding the data slice to One dimenssion UpperCAmelCase_ : Optional[Any] = [] for each_focus in data_focus: focusa_list.extend(self.Expand_Mat(lowerCamelCase_ ) ) UpperCAmelCase_ : Optional[int] = np.asarray(lowerCamelCase_ ) return focus_list, data_featuremap def A__ ( self: Tuple ,lowerCamelCase_: Optional[int] ,lowerCamelCase_: Tuple ,lowerCamelCase_: Optional[Any]="average_pool" ) -> List[Any]: # pooling process UpperCAmelCase_ : Optional[Any] = len(featuremaps[0] ) UpperCAmelCase_ : Any = int(size_map / size_pooling ) UpperCAmelCase_ : Optional[int] = [] for i_map in range(len(lowerCamelCase_ ) ): UpperCAmelCase_ : Any = featuremaps[i_map] UpperCAmelCase_ : Tuple = [] for i_focus in range(0 ,lowerCamelCase_ ,lowerCamelCase_ ): for j_focus in range(0 ,lowerCamelCase_ ,lowerCamelCase_ ): UpperCAmelCase_ : str = feature_map[ i_focus : i_focus + size_pooling, j_focus : j_focus + size_pooling, ] if pooling_type == "average_pool": # average pooling map_pooled.append(np.average(lowerCamelCase_ ) ) elif pooling_type == "max_pooling": # max pooling map_pooled.append(np.max(lowerCamelCase_ ) ) UpperCAmelCase_ : int = np.asmatrix(lowerCamelCase_ ).reshape(lowerCamelCase_ ,lowerCamelCase_ ) featuremap_pooled.append(lowerCamelCase_ ) return featuremap_pooled def A__ ( self: Union[str, Any] ,lowerCamelCase_: Tuple ) -> Optional[int]: # expanding three dimension data to one dimension list UpperCAmelCase_ : List[Any] = [] for i in range(len(lowerCamelCase_ ) ): UpperCAmelCase_ : Tuple = np.shape(data[i] ) UpperCAmelCase_ : Optional[int] = data[i].reshape(1 ,shapes[0] * shapes[1] ) UpperCAmelCase_ : Optional[int] = data_listed.getA().tolist()[0] data_expanded.extend(lowerCamelCase_ ) UpperCAmelCase_ : int = np.asarray(lowerCamelCase_ ) return data_expanded def A__ ( self: Optional[Any] ,lowerCamelCase_: Optional[int] ) -> Union[str, Any]: # expanding matrix to one dimension list UpperCAmelCase_ : List[Any] = np.asarray(lowerCamelCase_ ) UpperCAmelCase_ : str = np.shape(lowerCamelCase_ ) UpperCAmelCase_ : Dict = data_mat.reshape(1 ,shapes[0] * shapes[1] ) return data_expanded def A__ ( self: str ,lowerCamelCase_: Dict ,lowerCamelCase_: int ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: Any ) -> Union[str, Any]: UpperCAmelCase_ : Any = [] UpperCAmelCase_ : Tuple = 0 for i_map in range(lowerCamelCase_ ): UpperCAmelCase_ : Optional[Any] = np.ones((size_map, size_map) ) for i in range(0 ,lowerCamelCase_ ,lowerCamelCase_ ): for j in range(0 ,lowerCamelCase_ ,lowerCamelCase_ ): UpperCAmelCase_ : Any = pd_pool[ i_pool ] UpperCAmelCase_ : List[str] = i_pool + 1 UpperCAmelCase_ : Optional[Any] = np.multiply( lowerCamelCase_ ,np.multiply(out_map[i_map] ,(1 - out_map[i_map]) ) ) pd_all.append(lowerCamelCase_ ) return pd_all def A__ ( self: str ,lowerCamelCase_: int ,lowerCamelCase_: int ,lowerCamelCase_: List[Any] ,lowerCamelCase_: Any ,lowerCamelCase_: List[str] ,lowerCamelCase_: Any=bool ) -> Optional[int]: # model traning print("""----------------------Start Training-------------------------""" ) print((""" - - Shape: Train_Data """, np.shape(lowerCamelCase_ )) ) print((""" - - Shape: Teach_Data """, np.shape(lowerCamelCase_ )) ) UpperCAmelCase_ : str = 0 UpperCAmelCase_ : Tuple = [] UpperCAmelCase_ : Any = 10000 while rp < n_repeat and mse >= error_accuracy: UpperCAmelCase_ : List[str] = 0 print(F'''-------------Learning Time {rp}--------------''' ) for p in range(len(lowerCamelCase_ ) ): # print('------------Learning Image: %d--------------'%p) UpperCAmelCase_ : str = np.asmatrix(datas_train[p] ) UpperCAmelCase_ : Optional[Any] = np.asarray(datas_teach[p] ) UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = self.convolute( lowerCamelCase_ ,self.conva ,self.w_conva ,self.thre_conva ,conv_step=self.step_conva ,) UpperCAmelCase_ : List[Any] = self.pooling(lowerCamelCase_ ,self.size_poolinga ) UpperCAmelCase_ : int = np.shape(lowerCamelCase_ ) UpperCAmelCase_ : Dict = self._expand(lowerCamelCase_ ) UpperCAmelCase_ : Union[str, Any] = data_bp_input UpperCAmelCase_ : Optional[Any] = np.dot(lowerCamelCase_ ,self.vji.T ) - self.thre_bpa UpperCAmelCase_ : int = self.sig(lowerCamelCase_ ) UpperCAmelCase_ : Union[str, Any] = np.dot(lowerCamelCase_ ,self.wkj.T ) - self.thre_bpa UpperCAmelCase_ : Optional[Any] = self.sig(lowerCamelCase_ ) # --------------Model Leaning ------------------------ # calculate error and gradient--------------- UpperCAmelCase_ : List[str] = np.multiply( (data_teach - bp_outa) ,np.multiply(lowerCamelCase_ ,(1 - bp_outa) ) ) UpperCAmelCase_ : List[Any] = np.multiply( np.dot(lowerCamelCase_ ,self.wkj ) ,np.multiply(lowerCamelCase_ ,(1 - bp_outa) ) ) UpperCAmelCase_ : Any = np.dot(lowerCamelCase_ ,self.vji ) UpperCAmelCase_ : Tuple = pd_i_all / (self.size_poolinga * self.size_poolinga) UpperCAmelCase_ : List[str] = pd_conva_pooled.T.getA().tolist() UpperCAmelCase_ : str = self._calculate_gradient_from_pool( lowerCamelCase_ ,lowerCamelCase_ ,shape_featuremapa[0] ,shape_featuremapa[1] ,self.size_poolinga ,) # weight and threshold learning process--------- # convolution layer for k_conv in range(self.conva[1] ): UpperCAmelCase_ : List[str] = self._expand_mat(pd_conva_all[k_conv] ) UpperCAmelCase_ : Optional[Any] = self.rate_weight * np.dot(lowerCamelCase_ ,lowerCamelCase_ ) UpperCAmelCase_ : int = self.w_conva[k_conv] + delta_w.reshape( (self.conva[0], self.conva[0]) ) UpperCAmelCase_ : str = ( self.thre_conva[k_conv] - np.sum(pd_conva_all[k_conv] ) * self.rate_thre ) # all connected layer UpperCAmelCase_ : int = self.wkj + pd_k_all.T * bp_outa * self.rate_weight UpperCAmelCase_ : Tuple = self.vji + pd_j_all.T * bp_outa * self.rate_weight UpperCAmelCase_ : int = self.thre_bpa - pd_k_all * self.rate_thre UpperCAmelCase_ : str = self.thre_bpa - pd_j_all * self.rate_thre # calculate the sum error of all single image UpperCAmelCase_ : int = np.sum(abs(data_teach - bp_outa ) ) error_count += errors # print(' ----Teach ',data_teach) # print(' ----BP_output ',bp_out3) UpperCAmelCase_ : int = rp + 1 UpperCAmelCase_ : Any = error_count / patterns all_mse.append(lowerCamelCase_ ) def draw_error(): UpperCAmelCase_ : Any = [error_accuracy for i in range(int(n_repeat * 1.2 ) )] plt.plot(lowerCamelCase_ ,"""+-""" ) plt.plot(lowerCamelCase_ ,"""r--""" ) plt.xlabel("""Learning Times""" ) plt.ylabel("""All_mse""" ) plt.grid(lowerCamelCase_ ,alpha=0.5 ) plt.show() print("""------------------Training Complished---------------------""" ) print((""" - - Training epoch: """, rp, F''' - - Mse: {mse:.6f}''') ) if draw_e: draw_error() return mse def A__ ( self: Optional[int] ,lowerCamelCase_: Any ) -> Tuple: # model predict UpperCAmelCase_ : Union[str, Any] = [] print("""-------------------Start Testing-------------------------""" ) print((""" - - Shape: Test_Data """, np.shape(lowerCamelCase_ )) ) for p in range(len(lowerCamelCase_ ) ): UpperCAmelCase_ : int = np.asmatrix(datas_test[p] ) UpperCAmelCase_ , UpperCAmelCase_ : List[str] = self.convolute( lowerCamelCase_ ,self.conva ,self.w_conva ,self.thre_conva ,conv_step=self.step_conva ,) UpperCAmelCase_ : Optional[Any] = self.pooling(lowerCamelCase_ ,self.size_poolinga ) UpperCAmelCase_ : str = self._expand(lowerCamelCase_ ) UpperCAmelCase_ : str = data_bp_input UpperCAmelCase_ : Union[str, Any] = bp_outa * self.vji.T - self.thre_bpa UpperCAmelCase_ : Optional[int] = self.sig(lowerCamelCase_ ) UpperCAmelCase_ : Tuple = bp_outa * self.wkj.T - self.thre_bpa UpperCAmelCase_ : List[Any] = self.sig(lowerCamelCase_ ) produce_out.extend(bp_outa.getA().tolist() ) UpperCAmelCase_ : int = [list(map(self.do_round ,lowerCamelCase_ ) ) for each in produce_out] return np.asarray(lowerCamelCase_ ) def A__ ( self: Optional[Any] ,lowerCamelCase_: Dict ) -> Tuple: # return the data of image after convoluting process so we can check it out UpperCAmelCase_ : Optional[int] = np.asmatrix(lowerCamelCase_ ) UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = self.convolute( lowerCamelCase_ ,self.conva ,self.w_conva ,self.thre_conva ,conv_step=self.step_conva ,) UpperCAmelCase_ : Dict = self.pooling(lowerCamelCase_ ,self.size_poolinga ) return data_conveda, data_pooleda if __name__ == "__main__": pass
345
1
import datasets from .evaluate import evaluate UpperCamelCase_ = '''\ @article{hendrycks2021cuad, title={CUAD: An Expert-Annotated NLP Dataset for Legal Contract Review}, author={Dan Hendrycks and Collin Burns and Anya Chen and Spencer Ball}, journal={arXiv preprint arXiv:2103.06268}, year={2021} } ''' UpperCamelCase_ = ''' This metric wrap the official scoring script for version 1 of the Contract Understanding Atticus Dataset (CUAD). Contract Understanding Atticus Dataset (CUAD) v1 is a corpus of more than 13,000 labels in 510 commercial legal contracts that have been manually labeled to identify 41 categories of important clauses that lawyers look for when reviewing contracts in connection with corporate transactions. ''' UpperCamelCase_ = ''' Computes CUAD scores (EM, F1, AUPR, Precision@80%Recall, and Precision@90%Recall). Args: predictions: List of question-answers dictionaries with the following key-values: - \'id\': id of the question-answer pair as given in the references (see below) - \'prediction_text\': list of possible texts for the answer, as a list of strings depending on a threshold on the confidence probability of each prediction. references: List of question-answers dictionaries with the following key-values: - \'id\': id of the question-answer pair (see above), - \'answers\': a Dict in the CUAD dataset format { \'text\': list of possible texts for the answer, as a list of strings \'answer_start\': list of start positions for the answer, as a list of ints } Note that answer_start values are not taken into account to compute the metric. Returns: \'exact_match\': Exact match (the normalized answer exactly match the gold answer) \'f1\': The F-score of predicted tokens versus the gold answer \'aupr\': Area Under the Precision-Recall curve \'prec_at_80_recall\': Precision at 80% recall \'prec_at_90_recall\': Precision at 90% recall Examples: >>> predictions = [{\'prediction_text\': [\'The seller:\', \'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.\'], \'id\': \'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties\'}] >>> references = [{\'answers\': {\'answer_start\': [143, 49], \'text\': [\'The seller:\', \'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.\']}, \'id\': \'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties\'}] >>> cuad_metric = datasets.load_metric("cuad") >>> results = cuad_metric.compute(predictions=predictions, references=references) >>> print(results) {\'exact_match\': 100.0, \'f1\': 100.0, \'aupr\': 0.0, \'prec_at_80_recall\': 1.0, \'prec_at_90_recall\': 1.0} ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class _snake_case ( datasets.Metric ): '''simple docstring''' def A__ ( self: Union[str, Any] ) -> int: return datasets.MetricInfo( description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features( { """predictions""": { """id""": datasets.Value("""string""" ), """prediction_text""": datasets.features.Sequence(datasets.Value("""string""" ) ), }, """references""": { """id""": datasets.Value("""string""" ), """answers""": datasets.features.Sequence( { """text""": datasets.Value("""string""" ), """answer_start""": datasets.Value("""int32""" ), } ), }, } ) ,codebase_urls=["""https://www.atticusprojectai.org/cuad"""] ,reference_urls=["""https://www.atticusprojectai.org/cuad"""] ,) def A__ ( self: Optional[Any] ,lowerCamelCase_: List[str] ,lowerCamelCase_: Union[str, Any] ) -> Dict: UpperCAmelCase_ : str = {prediction["""id"""]: prediction["""prediction_text"""] for prediction in predictions} UpperCAmelCase_ : Any = [ { """paragraphs""": [ { """qas""": [ { """answers""": [{"""text""": answer_text} for answer_text in ref["""answers"""]["""text"""]], """id""": ref["""id"""], } for ref in references ] } ] } ] UpperCAmelCase_ : List[Any] = evaluate(dataset=lowerCamelCase_ ,predictions=lowerCamelCase_ ) return score
345
import json import os import unittest from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer from ...test_tokenization_common import TokenizerTesterMixin class _snake_case ( __snake_case , unittest.TestCase ): '''simple docstring''' A__ : Optional[Any] = CTRLTokenizer A__ : Optional[Any] = False A__ : str = False def A__ ( self: Optional[int] ) -> List[Any]: super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt UpperCAmelCase_ : Dict = ["""adapt""", """re@@""", """a@@""", """apt""", """c@@""", """t""", """<unk>"""] UpperCAmelCase_ : Union[str, Any] = dict(zip(lowerCamelCase_ ,range(len(lowerCamelCase_ ) ) ) ) UpperCAmelCase_ : List[Any] = ["""#version: 0.2""", """a p""", """ap t</w>""", """r e""", """a d""", """ad apt</w>""", """"""] UpperCAmelCase_ : Optional[Any] = {"""unk_token""": """<unk>"""} UpperCAmelCase_ : Union[str, Any] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""vocab_file"""] ) UpperCAmelCase_ : Optional[Any] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""merges_file"""] ) with open(self.vocab_file ,"""w""" ,encoding="""utf-8""" ) as fp: fp.write(json.dumps(lowerCamelCase_ ) + """\n""" ) with open(self.merges_file ,"""w""" ,encoding="""utf-8""" ) as fp: fp.write("""\n""".join(lowerCamelCase_ ) ) def A__ ( self: Optional[int] ,**lowerCamelCase_: Any ) -> str: kwargs.update(self.special_tokens_map ) return CTRLTokenizer.from_pretrained(self.tmpdirname ,**lowerCamelCase_ ) def A__ ( self: int ,lowerCamelCase_: int ) -> str: UpperCAmelCase_ : List[str] = """adapt react readapt apt""" UpperCAmelCase_ : List[Any] = """adapt react readapt apt""" return input_text, output_text def A__ ( self: Union[str, Any] ) -> Optional[int]: UpperCAmelCase_ : Union[str, Any] = CTRLTokenizer(self.vocab_file ,self.merges_file ,**self.special_tokens_map ) UpperCAmelCase_ : List[Any] = """adapt react readapt apt""" UpperCAmelCase_ : Optional[int] = """adapt re@@ a@@ c@@ t re@@ adapt apt""".split() UpperCAmelCase_ : Tuple = tokenizer.tokenize(lowerCamelCase_ ) self.assertListEqual(lowerCamelCase_ ,lowerCamelCase_ ) UpperCAmelCase_ : Union[str, Any] = tokens + [tokenizer.unk_token] UpperCAmelCase_ : List[str] = [0, 1, 2, 4, 5, 1, 0, 3, 6] self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase_ ) ,lowerCamelCase_ )
345
1
from __future__ import annotations import random # Maximum size of the population. Bigger could be faster but is more memory expensive. UpperCamelCase_ = 200 # Number of elements selected in every generation of evolution. The selection takes # place from best to worst of that generation and must be smaller than N_POPULATION. UpperCamelCase_ = 50 # Probability that an element of a generation can mutate, changing one of its genes. # This will guarantee that all genes will be used during evolution. UpperCamelCase_ = 0.4 # Just a seed to improve randomness required by the algorithm. random.seed(random.randint(0, 1000)) def lowerCamelCase_ ( _a : str , _a : str ): '''simple docstring''' UpperCAmelCase_ : Tuple = len([g for position, g in enumerate(_a ) if g == main_target[position]] ) return (item, float(_a )) def lowerCamelCase_ ( _a : str , _a : str ): '''simple docstring''' UpperCAmelCase_ : int = random.randint(0 , len(_a ) - 1 ) UpperCAmelCase_ : Optional[Any] = parent_a[:random_slice] + parent_a[random_slice:] UpperCAmelCase_ : Tuple = parent_a[:random_slice] + parent_a[random_slice:] return (child_a, child_a) def lowerCamelCase_ ( _a : str , _a : list[str] ): '''simple docstring''' UpperCAmelCase_ : str = list(_a ) if random.uniform(0 , 1 ) < MUTATION_PROBABILITY: UpperCAmelCase_ : Tuple = random.choice(_a ) return "".join(_a ) def lowerCamelCase_ ( _a : tuple[str, float] , _a : list[tuple[str, float]] , _a : list[str] , ): '''simple docstring''' UpperCAmelCase_ : List[str] = [] # Generate more children proportionally to the fitness score. UpperCAmelCase_ : int = int(parent_a[1] * 100 ) + 1 UpperCAmelCase_ : Dict = 10 if child_n >= 10 else child_n for _ in range(_a ): UpperCAmelCase_ : List[str] = population_score[random.randint(0 , _a )][0] UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = crossover(parent_a[0] , _a ) # Append new string to the population list. pop.append(mutate(_a , _a ) ) pop.append(mutate(_a , _a ) ) return pop def lowerCamelCase_ ( _a : str , _a : list[str] , _a : bool = True ): '''simple docstring''' if N_POPULATION < N_SELECTED: UpperCAmelCase_ : Any = F'''{N_POPULATION} must be bigger than {N_SELECTED}''' raise ValueError(_a ) # Verify that the target contains no genes besides the ones inside genes variable. UpperCAmelCase_ : Tuple = sorted({c for c in target if c not in genes} ) if not_in_genes_list: UpperCAmelCase_ : List[Any] = F'''{not_in_genes_list} is not in genes list, evolution cannot converge''' raise ValueError(_a ) # Generate random starting population. UpperCAmelCase_ : Any = [] for _ in range(_a ): population.append("""""".join([random.choice(_a ) for i in range(len(_a ) )] ) ) # Just some logs to know what the algorithms is doing. UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = 0, 0 # This loop will end when we find a perfect match for our target. while True: generation += 1 total_population += len(_a ) # Random population created. Now it's time to evaluate. # Adding a bit of concurrency can make everything faster, # # import concurrent.futures # population_score: list[tuple[str, float]] = [] # with concurrent.futures.ThreadPoolExecutor( # max_workers=NUM_WORKERS) as executor: # futures = {executor.submit(evaluate, item) for item in population} # concurrent.futures.wait(futures) # population_score = [item.result() for item in futures] # # but with a simple algorithm like this, it will probably be slower. # We just need to call evaluate for every item inside the population. UpperCAmelCase_ : int = [evaluate(_a , _a ) for item in population] # Check if there is a matching evolution. UpperCAmelCase_ : List[str] = sorted(_a , key=lambda _a : x[1] , reverse=_a ) if population_score[0][0] == target: return (generation, total_population, population_score[0][0]) # Print the best result every 10 generation. # Just to know that the algorithm is working. if debug and generation % 10 == 0: print( F'''\nGeneration: {generation}''' F'''\nTotal Population:{total_population}''' F'''\nBest score: {population_score[0][1]}''' F'''\nBest string: {population_score[0][0]}''' ) # Flush the old population, keeping some of the best evolutions. # Keeping this avoid regression of evolution. UpperCAmelCase_ : Any = population[: int(N_POPULATION / 3 )] population.clear() population.extend(_a ) # Normalize population score to be between 0 and 1. UpperCAmelCase_ : Optional[int] = [ (item, score / len(_a )) for item, score in population_score ] # This is selection for i in range(_a ): population.extend(select(population_score[int(_a )] , _a , _a ) ) # Check if the population has already reached the maximum value and if so, # break the cycle. If this check is disabled, the algorithm will take # forever to compute large strings, but will also calculate small strings in # a far fewer generations. if len(_a ) > N_POPULATION: break if __name__ == "__main__": UpperCamelCase_ = ( '''This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!''' ) UpperCamelCase_ = list( ''' ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm''' '''nopqrstuvwxyz.,;!?+-*#@^\'èéòà€ù=)(&%$£/\\''' ) UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ = basic(target_str, genes_list) print( F"\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}" )
345
from __future__ import annotations from typing import Dict from ...configuration_utils import PretrainedConfig UpperCamelCase_ = { '''susnato/ernie-m-base_pytorch''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/config.json''', '''susnato/ernie-m-large_pytorch''': '''https://huggingface.co/susnato/ernie-m-large_pytorch/blob/main/config.json''', } class _snake_case ( __snake_case ): '''simple docstring''' A__ : Union[str, Any] = "ernie_m" A__ : Dict[str, str] = {"dropout": "classifier_dropout", "num_classes": "num_labels"} def __init__( self: str ,lowerCamelCase_: int = 250002 ,lowerCamelCase_: int = 768 ,lowerCamelCase_: int = 12 ,lowerCamelCase_: int = 12 ,lowerCamelCase_: int = 3072 ,lowerCamelCase_: str = "gelu" ,lowerCamelCase_: float = 0.1 ,lowerCamelCase_: float = 0.1 ,lowerCamelCase_: int = 514 ,lowerCamelCase_: float = 0.0_2 ,lowerCamelCase_: int = 1 ,lowerCamelCase_: float = 1e-05 ,lowerCamelCase_: Any=None ,lowerCamelCase_: List[Any]=False ,lowerCamelCase_: Tuple=0.0 ,**lowerCamelCase_: Optional[int] ,) -> Optional[Any]: super().__init__(pad_token_id=lowerCamelCase_ ,**lowerCamelCase_ ) UpperCAmelCase_ : Optional[Any] = vocab_size UpperCAmelCase_ : Any = hidden_size UpperCAmelCase_ : Optional[Any] = num_hidden_layers UpperCAmelCase_ : Union[str, Any] = num_attention_heads UpperCAmelCase_ : List[Any] = intermediate_size UpperCAmelCase_ : List[Any] = hidden_act UpperCAmelCase_ : Any = hidden_dropout_prob UpperCAmelCase_ : List[Any] = attention_probs_dropout_prob UpperCAmelCase_ : str = max_position_embeddings UpperCAmelCase_ : Union[str, Any] = initializer_range UpperCAmelCase_ : Union[str, Any] = layer_norm_eps UpperCAmelCase_ : List[Any] = classifier_dropout UpperCAmelCase_ : str = is_decoder UpperCAmelCase_ : List[str] = act_dropout
345
1
import numpy as np import torch from torch.nn import CrossEntropyLoss from transformers import AutoModelForCausalLM, AutoTokenizer import datasets from datasets import logging UpperCamelCase_ = '''\ ''' UpperCamelCase_ = ''' Perplexity (PPL) is one of the most common metrics for evaluating language models. It is defined as the exponentiated average negative log-likelihood of a sequence. For more information, see https://huggingface.co/docs/transformers/perplexity ''' UpperCamelCase_ = ''' Args: model_id (str): model used for calculating Perplexity NOTE: Perplexity can only be calculated for causal language models. This includes models such as gpt2, causal variations of bert, causal versions of t5, and more (the full list can be found in the AutoModelForCausalLM documentation here: https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM ) input_texts (list of str): input text, each separate text snippet is one list entry. batch_size (int): the batch size to run texts through the model. Defaults to 16. add_start_token (bool): whether to add the start token to the texts, so the perplexity can include the probability of the first word. Defaults to True. device (str): device to run on, defaults to \'cuda\' when available Returns: perplexity: dictionary containing the perplexity scores for the texts in the input list, as well as the mean perplexity. If one of the input texts is longer than the max input length of the model, then it is truncated to the max length for the perplexity computation. Examples: Example 1: >>> perplexity = datasets.load_metric("perplexity") >>> input_texts = ["lorem ipsum", "Happy Birthday!", "Bienvenue"] >>> results = perplexity.compute(model_id=\'gpt2\', ... add_start_token=False, ... input_texts=input_texts) # doctest:+ELLIPSIS >>> print(list(results.keys())) [\'perplexities\', \'mean_perplexity\'] >>> print(round(results["mean_perplexity"], 2)) 78.22 >>> print(round(results["perplexities"][0], 2)) 11.11 Example 2: >>> perplexity = datasets.load_metric("perplexity") >>> input_texts = datasets.load_dataset("wikitext", ... "wikitext-2-raw-v1", ... split="test")["text"][:50] # doctest:+ELLIPSIS [...] >>> input_texts = [s for s in input_texts if s!=\'\'] >>> results = perplexity.compute(model_id=\'gpt2\', ... input_texts=input_texts) # doctest:+ELLIPSIS >>> print(list(results.keys())) [\'perplexities\', \'mean_perplexity\'] >>> print(round(results["mean_perplexity"], 2)) 60.35 >>> print(round(results["perplexities"][0], 2)) 81.12 ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class _snake_case ( datasets.Metric ): '''simple docstring''' def A__ ( self: Tuple ) -> List[Any]: return datasets.MetricInfo( description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features( { """input_texts""": datasets.Value("""string""" ), } ) ,reference_urls=["""https://huggingface.co/docs/transformers/perplexity"""] ,) def A__ ( self: List[Any] ,lowerCamelCase_: Dict ,lowerCamelCase_: Optional[int] ,lowerCamelCase_: int = 16 ,lowerCamelCase_: bool = True ,lowerCamelCase_: Any=None ) -> List[str]: if device is not None: assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu." if device == "gpu": UpperCAmelCase_ : Union[str, Any] = """cuda""" else: UpperCAmelCase_ : Union[str, Any] = """cuda""" if torch.cuda.is_available() else """cpu""" UpperCAmelCase_ : int = AutoModelForCausalLM.from_pretrained(lowerCamelCase_ ) UpperCAmelCase_ : Union[str, Any] = model.to(lowerCamelCase_ ) UpperCAmelCase_ : List[Any] = AutoTokenizer.from_pretrained(lowerCamelCase_ ) # if batch_size > 1 (which generally leads to padding being required), and # if there is not an already assigned pad_token, assign an existing # special token to also be the padding token if tokenizer.pad_token is None and batch_size > 1: UpperCAmelCase_ : Optional[int] = list(tokenizer.special_tokens_map_extended.values() ) # check that the model already has at least one special token defined assert ( len(lowerCamelCase_ ) > 0 ), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1." # assign one of the special tokens to also be the pad token tokenizer.add_special_tokens({"""pad_token""": existing_special_tokens[0]} ) if add_start_token: # leave room for <BOS> token to be added: assert ( tokenizer.bos_token is not None ), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False" UpperCAmelCase_ : int = model.config.max_length - 1 else: UpperCAmelCase_ : str = model.config.max_length UpperCAmelCase_ : Tuple = tokenizer( lowerCamelCase_ ,add_special_tokens=lowerCamelCase_ ,padding=lowerCamelCase_ ,truncation=lowerCamelCase_ ,max_length=lowerCamelCase_ ,return_tensors="""pt""" ,return_attention_mask=lowerCamelCase_ ,).to(lowerCamelCase_ ) UpperCAmelCase_ : List[str] = encodings["""input_ids"""] UpperCAmelCase_ : Any = encodings["""attention_mask"""] # check that each input is long enough: if add_start_token: assert torch.all(torch.ge(attn_masks.sum(1 ) ,1 ) ), "Each input text must be at least one token long." else: assert torch.all( torch.ge(attn_masks.sum(1 ) ,2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings." UpperCAmelCase_ : List[str] = [] UpperCAmelCase_ : List[Any] = CrossEntropyLoss(reduction="""none""" ) for start_index in logging.tqdm(range(0 ,len(lowerCamelCase_ ) ,lowerCamelCase_ ) ): UpperCAmelCase_ : Tuple = min(start_index + batch_size ,len(lowerCamelCase_ ) ) UpperCAmelCase_ : List[Any] = encoded_texts[start_index:end_index] UpperCAmelCase_ : int = attn_masks[start_index:end_index] if add_start_token: UpperCAmelCase_ : Union[str, Any] = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(lowerCamelCase_ ) UpperCAmelCase_ : Any = torch.cat([bos_tokens_tensor, encoded_batch] ,dim=1 ) UpperCAmelCase_ : Optional[int] = torch.cat( [torch.ones(bos_tokens_tensor.size() ,dtype=torch.intaa ).to(lowerCamelCase_ ), attn_mask] ,dim=1 ) UpperCAmelCase_ : int = encoded_batch with torch.no_grad(): UpperCAmelCase_ : str = model(lowerCamelCase_ ,attention_mask=lowerCamelCase_ ).logits UpperCAmelCase_ : Any = out_logits[..., :-1, :].contiguous() UpperCAmelCase_ : int = labels[..., 1:].contiguous() UpperCAmelCase_ : Optional[Any] = attn_mask[..., 1:].contiguous() UpperCAmelCase_ : List[Any] = torch.expa( (loss_fct(shift_logits.transpose(1 ,2 ) ,lowerCamelCase_ ) * shift_attention_mask_batch).sum(1 ) / shift_attention_mask_batch.sum(1 ) ) ppls += perplexity_batch.tolist() return {"perplexities": ppls, "mean_perplexity": np.mean(lowerCamelCase_ )}
345
import logging import os from dataclasses import dataclass, field from functools import partial from pathlib import Path from tempfile import TemporaryDirectory from typing import List, Optional import faiss import torch from datasets import Features, Sequence, Value, load_dataset from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser UpperCamelCase_ = logging.getLogger(__name__) torch.set_grad_enabled(False) UpperCamelCase_ = '''cuda''' if torch.cuda.is_available() else '''cpu''' def lowerCamelCase_ ( _a : str , _a : Any=100 , _a : int=" " ): '''simple docstring''' UpperCAmelCase_ : Optional[int] = text.split(_a ) return [character.join(text[i : i + n] ).strip() for i in range(0 , len(_a ) , _a )] def lowerCamelCase_ ( _a : dict ): '''simple docstring''' UpperCAmelCase_ , UpperCAmelCase_ : Dict = [], [] for title, text in zip(documents["""title"""] , documents["""text"""] ): if text is not None: for passage in split_text(_a ): titles.append(title if title is not None else """""" ) texts.append(_a ) return {"title": titles, "text": texts} def lowerCamelCase_ ( _a : dict , _a : DPRContextEncoder , _a : DPRContextEncoderTokenizerFast ): '''simple docstring''' UpperCAmelCase_ : List[str] = ctx_tokenizer( documents["""title"""] , documents["""text"""] , truncation=_a , padding="""longest""" , return_tensors="""pt""" )["""input_ids"""] UpperCAmelCase_ : Tuple = ctx_encoder(input_ids.to(device=_a ) , return_dict=_a ).pooler_output return {"embeddings": embeddings.detach().cpu().numpy()} def lowerCamelCase_ ( _a : "RagExampleArguments" , _a : "ProcessingArguments" , _a : "IndexHnswArguments" , ): '''simple docstring''' logger.info("""Step 1 - Create the dataset""" ) ###################################### # The dataset needed for RAG must have three columns: # - title (string): title of the document # - text (string): text of a passage of the document # - embeddings (array of dimension d): DPR representation of the passage # Let's say you have documents in tab-separated csv files with columns "title" and "text" assert os.path.isfile(rag_example_args.csv_path ), "Please provide a valid path to a csv file" # You can load a Dataset object this way UpperCAmelCase_ : Optional[int] = load_dataset( """csv""" , data_files=[rag_example_args.csv_path] , split="""train""" , delimiter="""\t""" , column_names=["""title""", """text"""] ) # More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files # Then split the documents into passages of 100 words UpperCAmelCase_ : Tuple = dataset.map(_a , batched=_a , num_proc=processing_args.num_proc ) # And compute the embeddings UpperCAmelCase_ : List[str] = DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ).to(device=_a ) UpperCAmelCase_ : Dict = DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ) UpperCAmelCase_ : Any = Features( {"""text""": Value("""string""" ), """title""": Value("""string""" ), """embeddings""": Sequence(Value("""float32""" ) )} ) # optional, save as float32 instead of float64 to save space UpperCAmelCase_ : List[str] = dataset.map( partial(_a , ctx_encoder=_a , ctx_tokenizer=_a ) , batched=_a , batch_size=processing_args.batch_size , features=_a , ) # And finally save your dataset UpperCAmelCase_ : Union[str, Any] = os.path.join(rag_example_args.output_dir , """my_knowledge_dataset""" ) dataset.save_to_disk(_a ) # from datasets import load_from_disk # dataset = load_from_disk(passages_path) # to reload the dataset ###################################### logger.info("""Step 2 - Index the dataset""" ) ###################################### # Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search UpperCAmelCase_ : Union[str, Any] = faiss.IndexHNSWFlat(index_hnsw_args.d , index_hnsw_args.m , faiss.METRIC_INNER_PRODUCT ) dataset.add_faiss_index("""embeddings""" , custom_index=_a ) # And save the index UpperCAmelCase_ : Optional[Any] = os.path.join(rag_example_args.output_dir , """my_knowledge_dataset_hnsw_index.faiss""" ) dataset.get_index("""embeddings""" ).save(_a ) # dataset.load_faiss_index("embeddings", index_path) # to reload the index @dataclass class _snake_case : '''simple docstring''' A__ : str = field( default=str(Path(__snake_case ).parent / "test_run" / "dummy-kb" / "my_knowledge_dataset.csv" ) , metadata={"help": "Path to a tab-separated csv file with columns 'title' and 'text'"} , ) A__ : Optional[str] = field( default=__snake_case , metadata={"help": "Question that is passed as input to RAG. Default is 'What does Moses' rod turn into ?'."} , ) A__ : str = field( default="facebook/rag-sequence-nq" , metadata={"help": "The RAG model to use. Either 'facebook/rag-sequence-nq' or 'facebook/rag-token-nq'"} , ) A__ : str = field( default="facebook/dpr-ctx_encoder-multiset-base" , metadata={ "help": ( "The DPR context encoder model to use. Either 'facebook/dpr-ctx_encoder-single-nq-base' or" " 'facebook/dpr-ctx_encoder-multiset-base'" ) } , ) A__ : Optional[str] = field( default=str(Path(__snake_case ).parent / "test_run" / "dummy-kb" ) , metadata={"help": "Path to a directory where the dataset passages and the index will be saved"} , ) @dataclass class _snake_case : '''simple docstring''' A__ : Optional[int] = field( default=__snake_case , metadata={ "help": "The number of processes to use to split the documents into passages. Default is single process." } , ) A__ : int = field( default=16 , metadata={ "help": "The batch size to use when computing the passages embeddings using the DPR context encoder." } , ) @dataclass class _snake_case : '''simple docstring''' A__ : int = field( default=768 , metadata={"help": "The dimension of the embeddings to pass to the HNSW Faiss index."} , ) A__ : int = field( default=128 , metadata={ "help": ( "The number of bi-directional links created for every new element during the HNSW index construction." ) } , ) if __name__ == "__main__": logging.basicConfig(level=logging.WARNING) logger.setLevel(logging.INFO) UpperCamelCase_ = HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments)) UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ = parser.parse_args_into_dataclasses() with TemporaryDirectory() as tmp_dir: UpperCamelCase_ = rag_example_args.output_dir or tmp_dir main(rag_example_args, processing_args, index_hnsw_args)
345
1
import multiprocessing import time from arguments import PretokenizationArguments from datasets import load_dataset from transformers import AutoTokenizer, HfArgumentParser def lowerCamelCase_ ( _a : Any ): '''simple docstring''' UpperCAmelCase_ : Optional[int] = {} UpperCAmelCase_ : Tuple = tokenizer(example["""content"""] , truncation=_a )["""input_ids"""] UpperCAmelCase_ : int = len(example["""content"""] ) / len(output["""input_ids"""] ) return output UpperCamelCase_ = HfArgumentParser(PretokenizationArguments) UpperCamelCase_ = parser.parse_args() if args.num_workers is None: UpperCamelCase_ = multiprocessing.cpu_count() UpperCamelCase_ = AutoTokenizer.from_pretrained(args.tokenizer_dir) UpperCamelCase_ = time.time() UpperCamelCase_ = load_dataset(args.dataset_name, split='''train''') print(F"Dataset loaded in {time.time()-t_start:.2f}s") UpperCamelCase_ = time.time() UpperCamelCase_ = ds.map( tokenize, num_proc=args.num_workers, remove_columns=[ '''repo_name''', '''path''', '''copies''', '''size''', '''content''', '''license''', '''hash''', '''line_mean''', '''line_max''', '''alpha_frac''', '''autogenerated''', ], ) print(F"Dataset tokenized in {time.time()-t_start:.2f}s") UpperCamelCase_ = time.time() ds.push_to_hub(args.tokenized_data_repo) print(F"Data pushed to the hub in {time.time()-t_start:.2f}s")
345
import gc import unittest import torch from parameterized import parameterized from diffusers import AutoencoderKL from diffusers.utils import floats_tensor, load_hf_numpy, require_torch_gpu, slow, torch_all_close, torch_device from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import enable_full_determinism from .test_modeling_common import ModelTesterMixin, UNetTesterMixin enable_full_determinism() class _snake_case ( __snake_case , __snake_case , unittest.TestCase ): '''simple docstring''' A__ : Dict = AutoencoderKL A__ : Optional[int] = "sample" A__ : Tuple = 1E-2 @property def A__ ( self: List[Any] ) -> Union[str, Any]: UpperCAmelCase_ : Tuple = 4 UpperCAmelCase_ : str = 3 UpperCAmelCase_ : Any = (32, 32) UpperCAmelCase_ : Optional[int] = floats_tensor((batch_size, num_channels) + sizes ).to(lowerCamelCase_ ) return {"sample": image} @property def A__ ( self: List[str] ) -> Tuple: return (3, 32, 32) @property def A__ ( self: Optional[Any] ) -> Any: return (3, 32, 32) def A__ ( self: Any ) -> Tuple: UpperCAmelCase_ : List[Any] = { """block_out_channels""": [32, 64], """in_channels""": 3, """out_channels""": 3, """down_block_types""": ["""DownEncoderBlock2D""", """DownEncoderBlock2D"""], """up_block_types""": ["""UpDecoderBlock2D""", """UpDecoderBlock2D"""], """latent_channels""": 4, } UpperCAmelCase_ : int = self.dummy_input return init_dict, inputs_dict def A__ ( self: Optional[Any] ) -> int: pass def A__ ( self: str ) -> Any: pass @unittest.skipIf(torch_device == """mps""" ,"""Gradient checkpointing skipped on MPS""" ) def A__ ( self: Union[str, Any] ) -> Dict: # enable deterministic behavior for gradient checkpointing UpperCAmelCase_ , UpperCAmelCase_ : List[str] = self.prepare_init_args_and_inputs_for_common() UpperCAmelCase_ : List[Any] = self.model_class(**lowerCamelCase_ ) model.to(lowerCamelCase_ ) assert not model.is_gradient_checkpointing and model.training UpperCAmelCase_ : Optional[Any] = model(**lowerCamelCase_ ).sample # run the backwards pass on the model. For backwards pass, for simplicity purpose, # we won't calculate the loss and rather backprop on out.sum() model.zero_grad() UpperCAmelCase_ : Any = torch.randn_like(lowerCamelCase_ ) UpperCAmelCase_ : Optional[int] = (out - labels).mean() loss.backward() # re-instantiate the model now enabling gradient checkpointing UpperCAmelCase_ : str = self.model_class(**lowerCamelCase_ ) # clone model model_a.load_state_dict(model.state_dict() ) model_a.to(lowerCamelCase_ ) model_a.enable_gradient_checkpointing() assert model_a.is_gradient_checkpointing and model_a.training UpperCAmelCase_ : Optional[int] = model_a(**lowerCamelCase_ ).sample # run the backwards pass on the model. For backwards pass, for simplicity purpose, # we won't calculate the loss and rather backprop on out.sum() model_a.zero_grad() UpperCAmelCase_ : Dict = (out_a - labels).mean() loss_a.backward() # compare the output and parameters gradients self.assertTrue((loss - loss_a).abs() < 1e-5 ) UpperCAmelCase_ : Dict = dict(model.named_parameters() ) UpperCAmelCase_ : Union[str, Any] = dict(model_a.named_parameters() ) for name, param in named_params.items(): self.assertTrue(torch_all_close(param.grad.data ,named_params_a[name].grad.data ,atol=5e-5 ) ) def A__ ( self: Optional[Any] ) -> str: UpperCAmelCase_ , UpperCAmelCase_ : int = AutoencoderKL.from_pretrained("""fusing/autoencoder-kl-dummy""" ,output_loading_info=lowerCamelCase_ ) self.assertIsNotNone(lowerCamelCase_ ) self.assertEqual(len(loading_info["""missing_keys"""] ) ,0 ) model.to(lowerCamelCase_ ) UpperCAmelCase_ : Dict = model(**self.dummy_input ) assert image is not None, "Make sure output is not None" def A__ ( self: Optional[int] ) -> int: UpperCAmelCase_ : Dict = AutoencoderKL.from_pretrained("""fusing/autoencoder-kl-dummy""" ) UpperCAmelCase_ : Tuple = model.to(lowerCamelCase_ ) model.eval() if torch_device == "mps": UpperCAmelCase_ : Tuple = torch.manual_seed(0 ) else: UpperCAmelCase_ : Optional[int] = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 ) UpperCAmelCase_ : str = torch.randn( 1 ,model.config.in_channels ,model.config.sample_size ,model.config.sample_size ,generator=torch.manual_seed(0 ) ,) UpperCAmelCase_ : int = image.to(lowerCamelCase_ ) with torch.no_grad(): UpperCAmelCase_ : Dict = model(lowerCamelCase_ ,sample_posterior=lowerCamelCase_ ,generator=lowerCamelCase_ ).sample UpperCAmelCase_ : Optional[int] = output[0, -1, -3:, -3:].flatten().cpu() # Since the VAE Gaussian prior's generator is seeded on the appropriate device, # the expected output slices are not the same for CPU and GPU. if torch_device == "mps": UpperCAmelCase_ : Tuple = torch.tensor( [ -4.0078e-01, -3.8323e-04, -1.2681e-01, -1.1462e-01, 2.0095e-01, 1.0893e-01, -8.8247e-02, -3.0361e-01, -9.8644e-03, ] ) elif torch_device == "cpu": UpperCAmelCase_ : List[str] = torch.tensor( [-0.1_3_5_2, 0.0_8_7_8, 0.0_4_1_9, -0.0_8_1_8, -0.1_0_6_9, 0.0_6_8_8, -0.1_4_5_8, -0.4_4_4_6, -0.0_0_2_6] ) else: UpperCAmelCase_ : List[str] = torch.tensor( [-0.2_4_2_1, 0.4_6_4_2, 0.2_5_0_7, -0.0_4_3_8, 0.0_6_8_2, 0.3_1_6_0, -0.2_0_1_8, -0.0_7_2_7, 0.2_4_8_5] ) self.assertTrue(torch_all_close(lowerCamelCase_ ,lowerCamelCase_ ,rtol=1e-2 ) ) @slow class _snake_case ( unittest.TestCase ): '''simple docstring''' def A__ ( self: Any ,lowerCamelCase_: List[Any] ,lowerCamelCase_: Any ) -> Optional[Any]: return F'''gaussian_noise_s={seed}_shape={'_'.join([str(lowerCamelCase_ ) for s in shape] )}.npy''' def A__ ( self: Union[str, Any] ) -> Optional[int]: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def A__ ( self: List[str] ,lowerCamelCase_: Optional[int]=0 ,lowerCamelCase_: List[Any]=(4, 3, 512, 512) ,lowerCamelCase_: Optional[Any]=False ) -> Optional[int]: UpperCAmelCase_ : Tuple = torch.floataa if fpaa else torch.floataa UpperCAmelCase_ : Tuple = torch.from_numpy(load_hf_numpy(self.get_file_format(lowerCamelCase_ ,lowerCamelCase_ ) ) ).to(lowerCamelCase_ ).to(lowerCamelCase_ ) return image def A__ ( self: List[Any] ,lowerCamelCase_: List[str]="CompVis/stable-diffusion-v1-4" ,lowerCamelCase_: Union[str, Any]=False ) -> Any: UpperCAmelCase_ : Optional[Any] = """fp16""" if fpaa else None UpperCAmelCase_ : str = torch.floataa if fpaa else torch.floataa UpperCAmelCase_ : int = AutoencoderKL.from_pretrained( lowerCamelCase_ ,subfolder="""vae""" ,torch_dtype=lowerCamelCase_ ,revision=lowerCamelCase_ ,) model.to(lowerCamelCase_ ).eval() return model def A__ ( self: Dict ,lowerCamelCase_: Union[str, Any]=0 ) -> Optional[int]: if torch_device == "mps": return torch.manual_seed(lowerCamelCase_ ) return torch.Generator(device=lowerCamelCase_ ).manual_seed(lowerCamelCase_ ) @parameterized.expand( [ # fmt: off [33, [-0.1_6_0_3, 0.9_8_7_8, -0.0_4_9_5, -0.0_7_9_0, -0.2_7_0_9, 0.8_3_7_5, -0.2_0_6_0, -0.0_8_2_4], [-0.2_3_9_5, 0.0_0_9_8, 0.0_1_0_2, -0.0_7_0_9, -0.2_8_4_0, -0.0_2_7_4, -0.0_7_1_8, -0.1_8_2_4]], [47, [-0.2_3_7_6, 0.1_1_6_8, 0.1_3_3_2, -0.4_8_4_0, -0.2_5_0_8, -0.0_7_9_1, -0.0_4_9_3, -0.4_0_8_9], [0.0_3_5_0, 0.0_8_4_7, 0.0_4_6_7, 0.0_3_4_4, -0.0_8_4_2, -0.0_5_4_7, -0.0_6_3_3, -0.1_1_3_1]], # fmt: on ] ) def A__ ( self: List[Any] ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: str ,lowerCamelCase_: Dict ) -> Tuple: UpperCAmelCase_ : List[Any] = self.get_sd_vae_model() UpperCAmelCase_ : int = self.get_sd_image(lowerCamelCase_ ) UpperCAmelCase_ : Optional[int] = self.get_generator(lowerCamelCase_ ) with torch.no_grad(): UpperCAmelCase_ : Union[str, Any] = model(lowerCamelCase_ ,generator=lowerCamelCase_ ,sample_posterior=lowerCamelCase_ ).sample assert sample.shape == image.shape UpperCAmelCase_ : Optional[Any] = sample[-1, -2:, -2:, :2].flatten().float().cpu() UpperCAmelCase_ : Tuple = torch.tensor(expected_slice_mps if torch_device == """mps""" else expected_slice ) assert torch_all_close(lowerCamelCase_ ,lowerCamelCase_ ,atol=3e-3 ) @parameterized.expand( [ # fmt: off [33, [-0.0_5_1_3, 0.0_2_8_9, 1.3_7_9_9, 0.2_1_6_6, -0.2_5_7_3, -0.0_8_7_1, 0.5_1_0_3, -0.0_9_9_9]], [47, [-0.4_1_2_8, -0.1_3_2_0, -0.3_7_0_4, 0.1_9_6_5, -0.4_1_1_6, -0.2_3_3_2, -0.3_3_4_0, 0.2_2_4_7]], # fmt: on ] ) @require_torch_gpu def A__ ( self: Union[str, Any] ,lowerCamelCase_: Any ,lowerCamelCase_: List[str] ) -> Tuple: UpperCAmelCase_ : List[str] = self.get_sd_vae_model(fpaa=lowerCamelCase_ ) UpperCAmelCase_ : Any = self.get_sd_image(lowerCamelCase_ ,fpaa=lowerCamelCase_ ) UpperCAmelCase_ : Union[str, Any] = self.get_generator(lowerCamelCase_ ) with torch.no_grad(): UpperCAmelCase_ : Union[str, Any] = model(lowerCamelCase_ ,generator=lowerCamelCase_ ,sample_posterior=lowerCamelCase_ ).sample assert sample.shape == image.shape UpperCAmelCase_ : Tuple = sample[-1, -2:, :2, -2:].flatten().float().cpu() UpperCAmelCase_ : Optional[int] = torch.tensor(lowerCamelCase_ ) assert torch_all_close(lowerCamelCase_ ,lowerCamelCase_ ,atol=1e-2 ) @parameterized.expand( [ # fmt: off [33, [-0.1_6_0_9, 0.9_8_6_6, -0.0_4_8_7, -0.0_7_7_7, -0.2_7_1_6, 0.8_3_6_8, -0.2_0_5_5, -0.0_8_1_4], [-0.2_3_9_5, 0.0_0_9_8, 0.0_1_0_2, -0.0_7_0_9, -0.2_8_4_0, -0.0_2_7_4, -0.0_7_1_8, -0.1_8_2_4]], [47, [-0.2_3_7_7, 0.1_1_4_7, 0.1_3_3_3, -0.4_8_4_1, -0.2_5_0_6, -0.0_8_0_5, -0.0_4_9_1, -0.4_0_8_5], [0.0_3_5_0, 0.0_8_4_7, 0.0_4_6_7, 0.0_3_4_4, -0.0_8_4_2, -0.0_5_4_7, -0.0_6_3_3, -0.1_1_3_1]], # fmt: on ] ) def A__ ( self: Tuple ,lowerCamelCase_: List[Any] ,lowerCamelCase_: Optional[int] ,lowerCamelCase_: List[str] ) -> Dict: UpperCAmelCase_ : Optional[int] = self.get_sd_vae_model() UpperCAmelCase_ : Dict = self.get_sd_image(lowerCamelCase_ ) with torch.no_grad(): UpperCAmelCase_ : str = model(lowerCamelCase_ ).sample assert sample.shape == image.shape UpperCAmelCase_ : List[Any] = sample[-1, -2:, -2:, :2].flatten().float().cpu() UpperCAmelCase_ : Any = torch.tensor(expected_slice_mps if torch_device == """mps""" else expected_slice ) assert torch_all_close(lowerCamelCase_ ,lowerCamelCase_ ,atol=3e-3 ) @parameterized.expand( [ # fmt: off [13, [-0.2_0_5_1, -0.1_8_0_3, -0.2_3_1_1, -0.2_1_1_4, -0.3_2_9_2, -0.3_5_7_4, -0.2_9_5_3, -0.3_3_2_3]], [37, [-0.2_6_3_2, -0.2_6_2_5, -0.2_1_9_9, -0.2_7_4_1, -0.4_5_3_9, -0.4_9_9_0, -0.3_7_2_0, -0.4_9_2_5]], # fmt: on ] ) @require_torch_gpu def A__ ( self: Optional[Any] ,lowerCamelCase_: Tuple ,lowerCamelCase_: str ) -> Optional[Any]: UpperCAmelCase_ : List[str] = self.get_sd_vae_model() UpperCAmelCase_ : Optional[int] = self.get_sd_image(lowerCamelCase_ ,shape=(3, 4, 64, 64) ) with torch.no_grad(): UpperCAmelCase_ : str = model.decode(lowerCamelCase_ ).sample assert list(sample.shape ) == [3, 3, 512, 512] UpperCAmelCase_ : Any = sample[-1, -2:, :2, -2:].flatten().cpu() UpperCAmelCase_ : Union[str, Any] = torch.tensor(lowerCamelCase_ ) assert torch_all_close(lowerCamelCase_ ,lowerCamelCase_ ,atol=1e-3 ) @parameterized.expand( [ # fmt: off [27, [-0.0_3_6_9, 0.0_2_0_7, -0.0_7_7_6, -0.0_6_8_2, -0.1_7_4_7, -0.1_9_3_0, -0.1_4_6_5, -0.2_0_3_9]], [16, [-0.1_6_2_8, -0.2_1_3_4, -0.2_7_4_7, -0.2_6_4_2, -0.3_7_7_4, -0.4_4_0_4, -0.3_6_8_7, -0.4_2_7_7]], # fmt: on ] ) @require_torch_gpu def A__ ( self: str ,lowerCamelCase_: List[Any] ,lowerCamelCase_: Any ) -> Optional[Any]: UpperCAmelCase_ : Dict = self.get_sd_vae_model(fpaa=lowerCamelCase_ ) UpperCAmelCase_ : List[Any] = self.get_sd_image(lowerCamelCase_ ,shape=(3, 4, 64, 64) ,fpaa=lowerCamelCase_ ) with torch.no_grad(): UpperCAmelCase_ : List[str] = model.decode(lowerCamelCase_ ).sample assert list(sample.shape ) == [3, 3, 512, 512] UpperCAmelCase_ : str = sample[-1, -2:, :2, -2:].flatten().float().cpu() UpperCAmelCase_ : str = torch.tensor(lowerCamelCase_ ) assert torch_all_close(lowerCamelCase_ ,lowerCamelCase_ ,atol=5e-3 ) @parameterized.expand([(13,), (16,), (27,)] ) @require_torch_gpu @unittest.skipIf(not is_xformers_available() ,reason="""xformers is not required when using PyTorch 2.0.""" ) def A__ ( self: List[Any] ,lowerCamelCase_: Union[str, Any] ) -> int: UpperCAmelCase_ : Optional[Any] = self.get_sd_vae_model(fpaa=lowerCamelCase_ ) UpperCAmelCase_ : List[str] = self.get_sd_image(lowerCamelCase_ ,shape=(3, 4, 64, 64) ,fpaa=lowerCamelCase_ ) with torch.no_grad(): UpperCAmelCase_ : Optional[Any] = model.decode(lowerCamelCase_ ).sample model.enable_xformers_memory_efficient_attention() with torch.no_grad(): UpperCAmelCase_ : List[str] = model.decode(lowerCamelCase_ ).sample assert list(sample.shape ) == [3, 3, 512, 512] assert torch_all_close(lowerCamelCase_ ,lowerCamelCase_ ,atol=1e-1 ) @parameterized.expand([(13,), (16,), (37,)] ) @require_torch_gpu @unittest.skipIf(not is_xformers_available() ,reason="""xformers is not required when using PyTorch 2.0.""" ) def A__ ( self: Optional[Any] ,lowerCamelCase_: Dict ) -> Union[str, Any]: UpperCAmelCase_ : Tuple = self.get_sd_vae_model() UpperCAmelCase_ : Any = self.get_sd_image(lowerCamelCase_ ,shape=(3, 4, 64, 64) ) with torch.no_grad(): UpperCAmelCase_ : Union[str, Any] = model.decode(lowerCamelCase_ ).sample model.enable_xformers_memory_efficient_attention() with torch.no_grad(): UpperCAmelCase_ : Optional[Any] = model.decode(lowerCamelCase_ ).sample assert list(sample.shape ) == [3, 3, 512, 512] assert torch_all_close(lowerCamelCase_ ,lowerCamelCase_ ,atol=1e-2 ) @parameterized.expand( [ # fmt: off [33, [-0.3_0_0_1, 0.0_9_1_8, -2.6_9_8_4, -3.9_7_2_0, -3.2_0_9_9, -5.0_3_5_3, 1.7_3_3_8, -0.2_0_6_5, 3.4_2_6_7]], [47, [-1.5_0_3_0, -4.3_8_7_1, -6.0_3_5_5, -9.1_1_5_7, -1.6_6_6_1, -2.7_8_5_3, 2.1_6_0_7, -5.0_8_2_3, 2.5_6_3_3]], # fmt: on ] ) def A__ ( self: Union[str, Any] ,lowerCamelCase_: Any ,lowerCamelCase_: Union[str, Any] ) -> Union[str, Any]: UpperCAmelCase_ : Dict = self.get_sd_vae_model() UpperCAmelCase_ : Optional[Any] = self.get_sd_image(lowerCamelCase_ ) UpperCAmelCase_ : str = self.get_generator(lowerCamelCase_ ) with torch.no_grad(): UpperCAmelCase_ : int = model.encode(lowerCamelCase_ ).latent_dist UpperCAmelCase_ : Optional[Any] = dist.sample(generator=lowerCamelCase_ ) assert list(sample.shape ) == [image.shape[0], 4] + [i // 8 for i in image.shape[2:]] UpperCAmelCase_ : Tuple = sample[0, -1, -3:, -3:].flatten().cpu() UpperCAmelCase_ : Optional[Any] = torch.tensor(lowerCamelCase_ ) UpperCAmelCase_ : List[Any] = 3e-3 if torch_device != """mps""" else 1e-2 assert torch_all_close(lowerCamelCase_ ,lowerCamelCase_ ,atol=lowerCamelCase_ )
345
1
import math import flax.linen as nn import jax.numpy as jnp def lowerCamelCase_ ( _a : jnp.ndarray , _a : int , _a : float = 1 , _a : float = 1 , _a : float = 1.0E4 , _a : bool = False , _a : float = 1.0 , ): '''simple docstring''' assert timesteps.ndim == 1, "Timesteps should be a 1d-array" assert embedding_dim % 2 == 0, F'''Embedding dimension {embedding_dim} should be even''' UpperCAmelCase_ : Optional[Any] = float(embedding_dim // 2 ) UpperCAmelCase_ : Dict = math.log(max_timescale / min_timescale ) / (num_timescales - freq_shift) UpperCAmelCase_ : Tuple = min_timescale * jnp.exp(jnp.arange(_a , dtype=jnp.floataa ) * -log_timescale_increment ) UpperCAmelCase_ : Any = jnp.expand_dims(_a , 1 ) * jnp.expand_dims(_a , 0 ) # scale embeddings UpperCAmelCase_ : List[str] = scale * emb if flip_sin_to_cos: UpperCAmelCase_ : int = jnp.concatenate([jnp.cos(_a ), jnp.sin(_a )] , axis=1 ) else: UpperCAmelCase_ : Optional[Any] = jnp.concatenate([jnp.sin(_a ), jnp.cos(_a )] , axis=1 ) UpperCAmelCase_ : Union[str, Any] = jnp.reshape(_a , [jnp.shape(_a )[0], embedding_dim] ) return signal class _snake_case ( nn.Module ): '''simple docstring''' A__ : int = 32 A__ : jnp.dtype = jnp.floataa @nn.compact def __call__( self: Any ,lowerCamelCase_: Any ) -> List[str]: UpperCAmelCase_ : Optional[int] = nn.Dense(self.time_embed_dim ,dtype=self.dtype ,name="""linear_1""" )(lowerCamelCase_ ) UpperCAmelCase_ : Optional[int] = nn.silu(lowerCamelCase_ ) UpperCAmelCase_ : List[Any] = nn.Dense(self.time_embed_dim ,dtype=self.dtype ,name="""linear_2""" )(lowerCamelCase_ ) return temb class _snake_case ( nn.Module ): '''simple docstring''' A__ : int = 32 A__ : bool = False A__ : float = 1 @nn.compact def __call__( self: Union[str, Any] ,lowerCamelCase_: Union[str, Any] ) -> List[Any]: return get_sinusoidal_embeddings( lowerCamelCase_ ,embedding_dim=self.dim ,flip_sin_to_cos=self.flip_sin_to_cos ,freq_shift=self.freq_shift )
345
import copy from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import Audio, Features, Value from .base import TaskTemplate @dataclass(frozen=__snake_case ) class _snake_case ( __snake_case ): '''simple docstring''' A__ : str = field(default="automatic-speech-recognition" , metadata={"include_in_asdict_even_if_is_default": True} ) A__ : ClassVar[Features] = Features({"audio": Audio()} ) A__ : ClassVar[Features] = Features({"transcription": Value("string" )} ) A__ : str = "audio" A__ : str = "transcription" def A__ ( self: int ,lowerCamelCase_: Union[str, Any] ) -> Optional[Any]: if self.audio_column not in features: raise ValueError(F'''Column {self.audio_column} is not present in features.''' ) if not isinstance(features[self.audio_column] ,lowerCamelCase_ ): raise ValueError(F'''Column {self.audio_column} is not an Audio type.''' ) UpperCAmelCase_ : Any = copy.deepcopy(self ) UpperCAmelCase_ : Union[str, Any] = self.input_schema.copy() UpperCAmelCase_ : Any = features[self.audio_column] UpperCAmelCase_ : Union[str, Any] = input_schema return task_template @property def A__ ( self: List[str] ) -> Dict[str, str]: return {self.audio_column: "audio", self.transcription_column: "transcription"}
345
1
from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available UpperCamelCase_ = { '''configuration_autoformer''': [ '''AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''AutoformerConfig''', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase_ = [ '''AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''AutoformerForPrediction''', '''AutoformerModel''', '''AutoformerPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_autoformer import ( AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, AutoformerConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_autoformer import ( AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, AutoformerForPrediction, AutoformerModel, AutoformerPreTrainedModel, ) else: import sys UpperCamelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
345
from collections import OrderedDict from typing import TYPE_CHECKING, Any, Mapping, Optional from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...onnx.utils import compute_effective_axis_dimension from ...utils import logging if TYPE_CHECKING: from ...processing_utils import ProcessorMixin from ...utils import TensorType UpperCamelCase_ = logging.get_logger(__name__) UpperCamelCase_ = { '''microsoft/layoutlmv3-base''': '''https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json''', } class _snake_case ( __snake_case ): '''simple docstring''' A__ : Optional[Any] = "layoutlmv3" def __init__( self: str ,lowerCamelCase_: Any=50265 ,lowerCamelCase_: int=768 ,lowerCamelCase_: Any=12 ,lowerCamelCase_: Any=12 ,lowerCamelCase_: List[Any]=3072 ,lowerCamelCase_: str="gelu" ,lowerCamelCase_: List[str]=0.1 ,lowerCamelCase_: Any=0.1 ,lowerCamelCase_: Tuple=512 ,lowerCamelCase_: Union[str, Any]=2 ,lowerCamelCase_: Dict=0.0_2 ,lowerCamelCase_: List[str]=1e-5 ,lowerCamelCase_: int=1 ,lowerCamelCase_: int=0 ,lowerCamelCase_: List[str]=2 ,lowerCamelCase_: Dict=1024 ,lowerCamelCase_: Tuple=128 ,lowerCamelCase_: Tuple=128 ,lowerCamelCase_: Dict=True ,lowerCamelCase_: Union[str, Any]=32 ,lowerCamelCase_: Union[str, Any]=128 ,lowerCamelCase_: Tuple=64 ,lowerCamelCase_: Tuple=256 ,lowerCamelCase_: List[str]=True ,lowerCamelCase_: Optional[int]=True ,lowerCamelCase_: Any=True ,lowerCamelCase_: Dict=224 ,lowerCamelCase_: Optional[int]=3 ,lowerCamelCase_: Optional[int]=16 ,lowerCamelCase_: Dict=None ,**lowerCamelCase_: str ,) -> List[Any]: super().__init__( vocab_size=lowerCamelCase_ ,hidden_size=lowerCamelCase_ ,num_hidden_layers=lowerCamelCase_ ,num_attention_heads=lowerCamelCase_ ,intermediate_size=lowerCamelCase_ ,hidden_act=lowerCamelCase_ ,hidden_dropout_prob=lowerCamelCase_ ,attention_probs_dropout_prob=lowerCamelCase_ ,max_position_embeddings=lowerCamelCase_ ,type_vocab_size=lowerCamelCase_ ,initializer_range=lowerCamelCase_ ,layer_norm_eps=lowerCamelCase_ ,pad_token_id=lowerCamelCase_ ,bos_token_id=lowerCamelCase_ ,eos_token_id=lowerCamelCase_ ,**lowerCamelCase_ ,) UpperCAmelCase_ : List[Any] = max_ad_position_embeddings UpperCAmelCase_ : Optional[int] = coordinate_size UpperCAmelCase_ : Optional[int] = shape_size UpperCAmelCase_ : Optional[Any] = has_relative_attention_bias UpperCAmelCase_ : Optional[int] = rel_pos_bins UpperCAmelCase_ : Union[str, Any] = max_rel_pos UpperCAmelCase_ : Dict = has_spatial_attention_bias UpperCAmelCase_ : Optional[int] = rel_ad_pos_bins UpperCAmelCase_ : Tuple = max_rel_ad_pos UpperCAmelCase_ : Union[str, Any] = text_embed UpperCAmelCase_ : Optional[Any] = visual_embed UpperCAmelCase_ : List[str] = input_size UpperCAmelCase_ : str = num_channels UpperCAmelCase_ : Optional[int] = patch_size UpperCAmelCase_ : Tuple = classifier_dropout class _snake_case ( __snake_case ): '''simple docstring''' A__ : Optional[Any] = version.parse("1.12" ) @property def A__ ( self: Dict ) -> Mapping[str, Mapping[int, str]]: # The order of inputs is different for question answering and sequence classification if self.task in ["question-answering", "sequence-classification"]: return OrderedDict( [ ("""input_ids""", {0: """batch""", 1: """sequence"""}), ("""attention_mask""", {0: """batch""", 1: """sequence"""}), ("""bbox""", {0: """batch""", 1: """sequence"""}), ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ] ) else: return OrderedDict( [ ("""input_ids""", {0: """batch""", 1: """sequence"""}), ("""bbox""", {0: """batch""", 1: """sequence"""}), ("""attention_mask""", {0: """batch""", 1: """sequence"""}), ("""pixel_values""", {0: """batch""", 1: """num_channels"""}), ] ) @property def A__ ( self: Any ) -> float: return 1e-5 @property def A__ ( self: int ) -> int: return 12 def A__ ( self: List[str] ,lowerCamelCase_: "ProcessorMixin" ,lowerCamelCase_: int = -1 ,lowerCamelCase_: int = -1 ,lowerCamelCase_: bool = False ,lowerCamelCase_: Optional["TensorType"] = None ,lowerCamelCase_: int = 3 ,lowerCamelCase_: int = 40 ,lowerCamelCase_: int = 40 ,) -> Mapping[str, Any]: setattr(processor.image_processor ,"""apply_ocr""" ,lowerCamelCase_ ) # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX UpperCAmelCase_ : List[str] = compute_effective_axis_dimension( lowerCamelCase_ ,fixed_dimension=OnnxConfig.default_fixed_batch ,num_token_to_add=0 ) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX UpperCAmelCase_ : int = processor.tokenizer.num_special_tokens_to_add(lowerCamelCase_ ) UpperCAmelCase_ : int = compute_effective_axis_dimension( lowerCamelCase_ ,fixed_dimension=OnnxConfig.default_fixed_sequence ,num_token_to_add=lowerCamelCase_ ) # Generate dummy inputs according to compute batch and sequence UpperCAmelCase_ : Optional[int] = [[""" """.join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size # Generate dummy bounding boxes UpperCAmelCase_ : List[Any] = [[[48, 84, 73, 128]]] * batch_size # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX # batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch) UpperCAmelCase_ : Any = self._generate_dummy_images(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ) UpperCAmelCase_ : Optional[Any] = dict( processor( lowerCamelCase_ ,text=lowerCamelCase_ ,boxes=lowerCamelCase_ ,return_tensors=lowerCamelCase_ ,) ) return inputs
345
1
import gc import random import tempfile import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline from diffusers.utils import floats_tensor, nightly, torch_device from diffusers.utils.testing_utils import require_torch_gpu class _snake_case ( unittest.TestCase ): '''simple docstring''' def A__ ( self: Union[str, Any] ) -> Union[str, Any]: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() @property def A__ ( self: List[str] ) -> Dict: UpperCAmelCase_ : Union[str, Any] = 1 UpperCAmelCase_ : Tuple = 3 UpperCAmelCase_ : Optional[Any] = (32, 32) UpperCAmelCase_ : Optional[int] = floats_tensor((batch_size, num_channels) + sizes ,rng=random.Random(0 ) ).to(lowerCamelCase_ ) return image @property def A__ ( self: List[Any] ) -> Optional[Any]: torch.manual_seed(0 ) UpperCAmelCase_ : int = UNetaDConditionModel( block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") ,up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") ,cross_attention_dim=32 ,) return model @property def A__ ( self: str ) -> List[str]: torch.manual_seed(0 ) UpperCAmelCase_ : Optional[int] = AutoencoderKL( block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] ,up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] ,latent_channels=4 ,) return model @property def A__ ( self: Optional[int] ) -> int: torch.manual_seed(0 ) UpperCAmelCase_ : Dict = CLIPTextConfig( bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1e-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1000 ,) return CLIPTextModel(lowerCamelCase_ ) @property def A__ ( self: Tuple ) -> Tuple: def extract(*lowerCamelCase_: Optional[Any] ,**lowerCamelCase_: str ): class _snake_case : '''simple docstring''' def __init__( self: List[Any] ) -> Optional[Any]: UpperCAmelCase_ : List[str] = torch.ones([0] ) def A__ ( self: List[Any] ,lowerCamelCase_: str ) -> int: self.pixel_values.to(lowerCamelCase_ ) return self return Out() return extract def A__ ( self: Union[str, Any] ) -> Tuple: UpperCAmelCase_ : int = """cpu""" # ensure determinism for the device-dependent torch.Generator UpperCAmelCase_ : int = self.dummy_cond_unet UpperCAmelCase_ : Optional[Any] = DDIMScheduler( beta_start=0.0_0_0_8_5 ,beta_end=0.0_1_2 ,beta_schedule="""scaled_linear""" ,clip_sample=lowerCamelCase_ ,set_alpha_to_one=lowerCamelCase_ ,) UpperCAmelCase_ : str = self.dummy_vae UpperCAmelCase_ : List[str] = self.dummy_text_encoder UpperCAmelCase_ : int = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) # make sure here that pndm scheduler skips prk UpperCAmelCase_ : str = StableDiffusionPipeline( unet=lowerCamelCase_ ,scheduler=lowerCamelCase_ ,vae=lowerCamelCase_ ,text_encoder=lowerCamelCase_ ,tokenizer=lowerCamelCase_ ,safety_checker=lowerCamelCase_ ,feature_extractor=self.dummy_extractor ,) UpperCAmelCase_ : List[str] = sd_pipe.to(lowerCamelCase_ ) sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ ) UpperCAmelCase_ : List[str] = """A painting of a squirrel eating a burger""" UpperCAmelCase_ : str = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 ) UpperCAmelCase_ : int = sd_pipe([prompt] ,generator=lowerCamelCase_ ,guidance_scale=6.0 ,num_inference_steps=2 ,output_type="""np""" ) UpperCAmelCase_ : List[Any] = output.images UpperCAmelCase_ : str = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 ) UpperCAmelCase_ : Dict = sd_pipe( [prompt] ,generator=lowerCamelCase_ ,guidance_scale=6.0 ,num_inference_steps=2 ,output_type="""np""" ,return_dict=lowerCamelCase_ ,)[0] UpperCAmelCase_ : int = image[0, -3:, -3:, -1] UpperCAmelCase_ : Union[str, Any] = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) UpperCAmelCase_ : Tuple = np.array([0.5_7_5_6, 0.6_1_1_8, 0.5_0_0_5, 0.5_0_4_1, 0.5_4_7_1, 0.4_7_2_6, 0.4_9_7_6, 0.4_8_6_5, 0.4_8_6_4] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 def A__ ( self: Optional[Any] ) -> Any: UpperCAmelCase_ : Tuple = """cpu""" # ensure determinism for the device-dependent torch.Generator UpperCAmelCase_ : Dict = self.dummy_cond_unet UpperCAmelCase_ : List[Any] = PNDMScheduler(skip_prk_steps=lowerCamelCase_ ) UpperCAmelCase_ : str = self.dummy_vae UpperCAmelCase_ : Union[str, Any] = self.dummy_text_encoder UpperCAmelCase_ : str = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) # make sure here that pndm scheduler skips prk UpperCAmelCase_ : Any = StableDiffusionPipeline( unet=lowerCamelCase_ ,scheduler=lowerCamelCase_ ,vae=lowerCamelCase_ ,text_encoder=lowerCamelCase_ ,tokenizer=lowerCamelCase_ ,safety_checker=lowerCamelCase_ ,feature_extractor=self.dummy_extractor ,) UpperCAmelCase_ : int = sd_pipe.to(lowerCamelCase_ ) sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ ) UpperCAmelCase_ : Optional[Any] = """A painting of a squirrel eating a burger""" UpperCAmelCase_ : Optional[Any] = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 ) UpperCAmelCase_ : Optional[Any] = sd_pipe([prompt] ,generator=lowerCamelCase_ ,guidance_scale=6.0 ,num_inference_steps=2 ,output_type="""np""" ) UpperCAmelCase_ : str = output.images UpperCAmelCase_ : Union[str, Any] = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 ) UpperCAmelCase_ : int = sd_pipe( [prompt] ,generator=lowerCamelCase_ ,guidance_scale=6.0 ,num_inference_steps=2 ,output_type="""np""" ,return_dict=lowerCamelCase_ ,)[0] UpperCAmelCase_ : Dict = image[0, -3:, -3:, -1] UpperCAmelCase_ : List[Any] = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) UpperCAmelCase_ : Tuple = np.array([0.5_1_2_5, 0.5_7_1_6, 0.4_8_2_8, 0.5_0_6_0, 0.5_6_5_0, 0.4_7_6_8, 0.5_1_8_5, 0.4_8_9_5, 0.4_9_9_3] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 def A__ ( self: str ) -> Dict: UpperCAmelCase_ : Any = StableDiffusionPipeline.from_pretrained( """hf-internal-testing/tiny-stable-diffusion-lms-pipe""" ,safety_checker=lowerCamelCase_ ) assert isinstance(lowerCamelCase_ ,lowerCamelCase_ ) assert isinstance(pipe.scheduler ,lowerCamelCase_ ) assert pipe.safety_checker is None UpperCAmelCase_ : List[Any] = pipe("""example prompt""" ,num_inference_steps=2 ).images[0] assert image is not None # check that there's no error when saving a pipeline with one of the models being None with tempfile.TemporaryDirectory() as tmpdirname: pipe.save_pretrained(lowerCamelCase_ ) UpperCAmelCase_ : Any = StableDiffusionPipeline.from_pretrained(lowerCamelCase_ ) # sanity check that the pipeline still works assert pipe.safety_checker is None UpperCAmelCase_ : Optional[int] = pipe("""example prompt""" ,num_inference_steps=2 ).images[0] assert image is not None @unittest.skipIf(torch_device != """cuda""" ,"""This test requires a GPU""" ) def A__ ( self: List[str] ) -> Any: UpperCAmelCase_ : Tuple = self.dummy_cond_unet UpperCAmelCase_ : Dict = PNDMScheduler(skip_prk_steps=lowerCamelCase_ ) UpperCAmelCase_ : List[Any] = self.dummy_vae UpperCAmelCase_ : List[str] = self.dummy_text_encoder UpperCAmelCase_ : Union[str, Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) # put models in fp16 UpperCAmelCase_ : Optional[Any] = unet.half() UpperCAmelCase_ : Optional[int] = vae.half() UpperCAmelCase_ : int = bert.half() # make sure here that pndm scheduler skips prk UpperCAmelCase_ : Any = StableDiffusionPipeline( unet=lowerCamelCase_ ,scheduler=lowerCamelCase_ ,vae=lowerCamelCase_ ,text_encoder=lowerCamelCase_ ,tokenizer=lowerCamelCase_ ,safety_checker=lowerCamelCase_ ,feature_extractor=self.dummy_extractor ,) UpperCAmelCase_ : List[Any] = sd_pipe.to(lowerCamelCase_ ) sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ ) UpperCAmelCase_ : Tuple = """A painting of a squirrel eating a burger""" UpperCAmelCase_ : Optional[int] = sd_pipe([prompt] ,num_inference_steps=2 ,output_type="""np""" ).images assert image.shape == (1, 64, 64, 3) @nightly @require_torch_gpu class _snake_case ( unittest.TestCase ): '''simple docstring''' def A__ ( self: Optional[int] ) -> Optional[Any]: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def A__ ( self: List[str] ) -> List[Any]: UpperCAmelCase_ : Tuple = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" ,safety_checker=lowerCamelCase_ ) UpperCAmelCase_ : Optional[int] = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config ) UpperCAmelCase_ : str = sd_pipe.to(lowerCamelCase_ ) sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ ) UpperCAmelCase_ : str = ( """portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle""" """ coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with""" """ anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and""" """ children from bahnhof zoo, detailed """ ) UpperCAmelCase_ : Optional[int] = 4003660346 UpperCAmelCase_ : int = 7 # without safety guidance (sld_guidance_scale = 0) UpperCAmelCase_ : Dict = torch.manual_seed(lowerCamelCase_ ) UpperCAmelCase_ : List[Any] = sd_pipe( [prompt] ,generator=lowerCamelCase_ ,guidance_scale=lowerCamelCase_ ,num_inference_steps=50 ,output_type="""np""" ,width=512 ,height=512 ,sld_guidance_scale=0 ,) UpperCAmelCase_ : Optional[int] = output.images UpperCAmelCase_ : Union[str, Any] = image[0, -3:, -3:, -1] UpperCAmelCase_ : Dict = [0.2_2_7_8, 0.2_2_3_1, 0.2_2_4_9, 0.2_3_3_3, 0.2_3_0_3, 0.1_8_8_5, 0.2_2_7_3, 0.2_1_4_4, 0.2_1_7_6] assert image.shape == (1, 512, 512, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 # without safety guidance (strong configuration) UpperCAmelCase_ : Union[str, Any] = torch.manual_seed(lowerCamelCase_ ) UpperCAmelCase_ : Any = sd_pipe( [prompt] ,generator=lowerCamelCase_ ,guidance_scale=lowerCamelCase_ ,num_inference_steps=50 ,output_type="""np""" ,width=512 ,height=512 ,sld_guidance_scale=2000 ,sld_warmup_steps=7 ,sld_threshold=0.0_2_5 ,sld_momentum_scale=0.5 ,sld_mom_beta=0.7 ,) UpperCAmelCase_ : Tuple = output.images UpperCAmelCase_ : Union[str, Any] = image[0, -3:, -3:, -1] UpperCAmelCase_ : str = [0.2_3_8_3, 0.2_2_7_6, 0.2_3_6, 0.2_1_9_2, 0.2_1_8_6, 0.2_0_5_3, 0.1_9_7_1, 0.1_9_0_1, 0.1_7_1_9] assert image.shape == (1, 512, 512, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def A__ ( self: Optional[int] ) -> Any: UpperCAmelCase_ : Any = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" ,safety_checker=lowerCamelCase_ ) UpperCAmelCase_ : Any = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config ) UpperCAmelCase_ : str = sd_pipe.to(lowerCamelCase_ ) sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ ) UpperCAmelCase_ : Any = """padme amidala taking a bath artwork, safe for work, no nudity""" UpperCAmelCase_ : List[Any] = 2734971755 UpperCAmelCase_ : Optional[Any] = 7 UpperCAmelCase_ : int = torch.manual_seed(lowerCamelCase_ ) UpperCAmelCase_ : Optional[int] = sd_pipe( [prompt] ,generator=lowerCamelCase_ ,guidance_scale=lowerCamelCase_ ,num_inference_steps=50 ,output_type="""np""" ,width=512 ,height=512 ,sld_guidance_scale=0 ,) UpperCAmelCase_ : Dict = output.images UpperCAmelCase_ : Tuple = image[0, -3:, -3:, -1] UpperCAmelCase_ : Optional[Any] = [0.3_5_0_2, 0.3_6_2_2, 0.3_3_9_6, 0.3_6_4_2, 0.3_4_7_8, 0.3_3_1_8, 0.3_5, 0.3_3_4_8, 0.3_2_9_7] assert image.shape == (1, 512, 512, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 UpperCAmelCase_ : Any = torch.manual_seed(lowerCamelCase_ ) UpperCAmelCase_ : Tuple = sd_pipe( [prompt] ,generator=lowerCamelCase_ ,guidance_scale=lowerCamelCase_ ,num_inference_steps=50 ,output_type="""np""" ,width=512 ,height=512 ,sld_guidance_scale=2000 ,sld_warmup_steps=7 ,sld_threshold=0.0_2_5 ,sld_momentum_scale=0.5 ,sld_mom_beta=0.7 ,) UpperCAmelCase_ : Dict = output.images UpperCAmelCase_ : List[Any] = image[0, -3:, -3:, -1] UpperCAmelCase_ : Tuple = [0.5_5_3_1, 0.5_2_0_6, 0.4_8_9_5, 0.5_1_5_6, 0.5_1_8_2, 0.4_7_5_1, 0.4_8_0_2, 0.4_8_0_3, 0.4_4_4_3] assert image.shape == (1, 512, 512, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def A__ ( self: Union[str, Any] ) -> int: UpperCAmelCase_ : List[Any] = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" ) UpperCAmelCase_ : List[str] = sd_pipe.to(lowerCamelCase_ ) sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ ) UpperCAmelCase_ : Any = ( """the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c.""" """ leyendecker""" ) UpperCAmelCase_ : Optional[Any] = 1044355234 UpperCAmelCase_ : List[str] = 12 UpperCAmelCase_ : List[Any] = torch.manual_seed(lowerCamelCase_ ) UpperCAmelCase_ : List[Any] = sd_pipe( [prompt] ,generator=lowerCamelCase_ ,guidance_scale=lowerCamelCase_ ,num_inference_steps=50 ,output_type="""np""" ,width=512 ,height=512 ,sld_guidance_scale=0 ,) UpperCAmelCase_ : Any = output.images UpperCAmelCase_ : Dict = image[0, -3:, -3:, -1] UpperCAmelCase_ : Optional[Any] = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] ) assert image.shape == (1, 512, 512, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-7 UpperCAmelCase_ : Optional[int] = torch.manual_seed(lowerCamelCase_ ) UpperCAmelCase_ : Optional[Any] = sd_pipe( [prompt] ,generator=lowerCamelCase_ ,guidance_scale=lowerCamelCase_ ,num_inference_steps=50 ,output_type="""np""" ,width=512 ,height=512 ,sld_guidance_scale=2000 ,sld_warmup_steps=7 ,sld_threshold=0.0_2_5 ,sld_momentum_scale=0.5 ,sld_mom_beta=0.7 ,) UpperCAmelCase_ : List[str] = output.images UpperCAmelCase_ : Any = image[0, -3:, -3:, -1] UpperCAmelCase_ : Any = np.array([0.5_8_1_8, 0.6_2_8_5, 0.6_8_3_5, 0.6_0_1_9, 0.6_2_5, 0.6_7_5_4, 0.6_0_9_6, 0.6_3_3_4, 0.6_5_6_1] ) assert image.shape == (1, 512, 512, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
345
import argparse from argparse import Namespace import torch from torch import nn from transformers import XGLMConfig, XGLMForCausalLM def lowerCamelCase_ ( _a : List[Any] ): '''simple docstring''' UpperCAmelCase_ : Optional[int] = [ """decoder.version""", """decoder.output_projection.weight""", """_float_tensor""", """decoder.embed_positions._float_tensor""", ] for k in ignore_keys: state_dict.pop(_a , _a ) def lowerCamelCase_ ( _a : Any ): '''simple docstring''' UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = emb.weight.shape UpperCAmelCase_ : Tuple = nn.Linear(_a , _a , bias=_a ) UpperCAmelCase_ : List[Any] = emb.weight.data return lin_layer def lowerCamelCase_ ( _a : Dict ): '''simple docstring''' UpperCAmelCase_ : int = torch.load(_a , map_location="""cpu""" ) UpperCAmelCase_ : Dict = Namespace(**checkpoint["""cfg"""]["""model"""] ) UpperCAmelCase_ : Optional[int] = checkpoint["""model"""] remove_ignore_keys_(_a ) UpperCAmelCase_ : str = state_dict["""decoder.embed_tokens.weight"""].shape[0] UpperCAmelCase_ : List[str] = {key.replace("""decoder""" , """model""" ): val for key, val in state_dict.items()} UpperCAmelCase_ : int = XGLMConfig( vocab_size=_a , max_position_embeddings=args.max_target_positions , num_layers=args.decoder_layers , attention_heads=args.decoder_attention_heads , ffn_dim=args.decoder_ffn_embed_dim , d_model=args.decoder_embed_dim , layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="""gelu""" , scale_embedding=not args.no_scale_embedding , tie_word_embeddings=args.share_decoder_input_output_embed , ) UpperCAmelCase_ : List[str] = XGLMForCausalLM(_a ) UpperCAmelCase_ : Tuple = model.load_state_dict(_a , strict=_a ) print(_a ) UpperCAmelCase_ : Optional[Any] = make_linear_from_emb(model.model.embed_tokens ) return model if __name__ == "__main__": UpperCamelCase_ = argparse.ArgumentParser() # Required parameters parser.add_argument('''fairseq_path''', type=str, help='''path to a model.pt on local filesystem.''') parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') UpperCamelCase_ = parser.parse_args() UpperCamelCase_ = convert_fairseq_xglm_checkpoint_from_disk(args.fairseq_path) model.save_pretrained(args.pytorch_dump_folder_path)
345
1
import logging import os import quant_trainer import torch from torch.utils.data import DataLoader from transformers import Trainer, is_torch_tpu_available from transformers.trainer_utils import PredictionOutput UpperCamelCase_ = logging.getLogger(__name__) if is_torch_tpu_available(check_device=False): import torch_xla.core.xla_model as xm import torch_xla.debug.metrics as met class _snake_case ( __snake_case ): '''simple docstring''' def __init__( self: Tuple ,*lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: int=None ,lowerCamelCase_: str=None ,lowerCamelCase_: Any=None ,**lowerCamelCase_: Optional[int] ) -> List[str]: super().__init__(*lowerCamelCase_ ,**lowerCamelCase_ ) UpperCAmelCase_ : Optional[Any] = eval_examples UpperCAmelCase_ : str = post_process_function UpperCAmelCase_ : str = quant_trainer_args UpperCAmelCase_ : List[Any] = 128 # default number of calibration samples def A__ ( self: str ,lowerCamelCase_: Optional[int]=None ) -> Union[str, Any]: if calib_dataset is None and self.calib_dataset is None: raise ValueError("""Trainer: calibration requires an calib_dataset.""" ) UpperCAmelCase_ : Union[str, Any] = calib_dataset if calib_dataset is not None else self.calib_dataset UpperCAmelCase_ : Optional[int] = self._remove_unused_columns(lowerCamelCase_ ,description="""Calibration""" ) return DataLoader( lowerCamelCase_ ,batch_size=self.args.eval_batch_size ,collate_fn=self.data_collator ,drop_last=self.args.dataloader_drop_last ,num_workers=self.args.dataloader_num_workers ,pin_memory=self.args.dataloader_pin_memory ,shuffle=lowerCamelCase_ ,) def A__ ( self: Dict ,lowerCamelCase_: Union[str, Any]=None ) -> str: UpperCAmelCase_ : Tuple = self.train_dataset if calib_dataset is None else calib_dataset UpperCAmelCase_ : Tuple = self.get_calib_dataloader(lowerCamelCase_ ) UpperCAmelCase_ : List[str] = self.model quant_trainer.configure_model(lowerCamelCase_ ,self.quant_trainer_args ,calib=lowerCamelCase_ ) model.eval() quant_trainer.enable_calibration(lowerCamelCase_ ) logger.info("""***** Running calibration *****""" ) logger.info(F''' Num examples = {self.calib_num}''' ) logger.info(F''' Batch size = {calib_dataloader.batch_size}''' ) for step, inputs in enumerate(lowerCamelCase_ ): # Prediction step UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Tuple = self.prediction_step(lowerCamelCase_ ,lowerCamelCase_ ,prediction_loss_only=lowerCamelCase_ ) if (step + 1) * calib_dataloader.batch_size >= self.calib_num: break quant_trainer.finish_calibration(lowerCamelCase_ ,self.quant_trainer_args ) UpperCAmelCase_ : int = model def A__ ( self: str ,lowerCamelCase_: Optional[int]=None ,lowerCamelCase_: Optional[Any]=None ,lowerCamelCase_: int=None ,lowerCamelCase_: str = "eval" ) -> int: UpperCAmelCase_ : List[str] = self.eval_dataset if eval_dataset is None else eval_dataset UpperCAmelCase_ : Optional[Any] = self.get_eval_dataloader(lowerCamelCase_ ) UpperCAmelCase_ : Optional[Any] = self.eval_examples if eval_examples is None else eval_examples # Temporarily disable metric computation, we will do it in the loop here. UpperCAmelCase_ : int = self.compute_metrics UpperCAmelCase_ : Optional[Any] = None UpperCAmelCase_ : Any = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop try: UpperCAmelCase_ : Dict = eval_loop( lowerCamelCase_ ,description="""Evaluation""" ,prediction_loss_only=True if compute_metrics is None else None ,ignore_keys=lowerCamelCase_ ,) finally: UpperCAmelCase_ : Optional[Any] = compute_metrics if self.post_process_function is not None and self.compute_metrics is not None: UpperCAmelCase_ : List[str] = self.post_process_function(lowerCamelCase_ ,lowerCamelCase_ ,output.predictions ) UpperCAmelCase_ : Dict = self.compute_metrics(lowerCamelCase_ ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(F'''{metric_key_prefix}_''' ): UpperCAmelCase_ : int = metrics.pop(lowerCamelCase_ ) self.log(lowerCamelCase_ ) else: UpperCAmelCase_ : Optional[int] = {} if self.args.tpu_metrics_debug or self.args.debug: # tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.) xm.master_print(met.metrics_report() ) UpperCAmelCase_ : List[Any] = self.callback_handler.on_evaluate(self.args ,self.state ,self.control ,lowerCamelCase_ ) return metrics def A__ ( self: str ,lowerCamelCase_: List[str] ,lowerCamelCase_: Tuple ,lowerCamelCase_: Optional[Any]=None ,lowerCamelCase_: str = "test" ) -> Tuple: UpperCAmelCase_ : List[str] = self.get_test_dataloader(lowerCamelCase_ ) # Temporarily disable metric computation, we will do it in the loop here. UpperCAmelCase_ : List[Any] = self.compute_metrics UpperCAmelCase_ : Any = None UpperCAmelCase_ : str = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop try: UpperCAmelCase_ : Dict = eval_loop( lowerCamelCase_ ,description="""Prediction""" ,prediction_loss_only=True if compute_metrics is None else None ,ignore_keys=lowerCamelCase_ ,) finally: UpperCAmelCase_ : str = compute_metrics if self.post_process_function is None or self.compute_metrics is None: return output UpperCAmelCase_ : List[str] = self.post_process_function(lowerCamelCase_ ,lowerCamelCase_ ,output.predictions ,"""predict""" ) UpperCAmelCase_ : List[Any] = self.compute_metrics(lowerCamelCase_ ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(F'''{metric_key_prefix}_''' ): UpperCAmelCase_ : Optional[Any] = metrics.pop(lowerCamelCase_ ) return PredictionOutput(predictions=predictions.predictions ,label_ids=predictions.label_ids ,metrics=lowerCamelCase_ ) def A__ ( self: List[Any] ,lowerCamelCase_: Any="./" ) -> str: UpperCAmelCase_ : Optional[Any] = self.eval_dataset UpperCAmelCase_ : Dict = self.get_eval_dataloader(lowerCamelCase_ ) UpperCAmelCase_ : Optional[Any] = next(iter(lowerCamelCase_ ) ) # saving device - to make it consistent UpperCAmelCase_ : Optional[Any] = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" ) # convert to tuple UpperCAmelCase_ : Optional[Any] = tuple(v.to(lowerCamelCase_ ) for k, v in batch.items() ) logger.info("""Converting model to be onnx compatible""" ) from pytorch_quantization.nn import TensorQuantizer UpperCAmelCase_ : str = True UpperCAmelCase_ : Dict = self.model.to(lowerCamelCase_ ) model.eval() model.float() UpperCAmelCase_ : List[str] = model.module if hasattr(lowerCamelCase_ ,"""module""" ) else model quant_trainer.configure_model(lowerCamelCase_ ,self.quant_trainer_args ) UpperCAmelCase_ : List[str] = os.path.join(lowerCamelCase_ ,"""model.onnx""" ) logger.info(F'''exporting model to {output_model_file}''' ) UpperCAmelCase_ : Any = {0: """batch_size""", 1: """seq_len"""} torch.onnx.export( lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,export_params=lowerCamelCase_ ,opset_version=13 ,do_constant_folding=lowerCamelCase_ ,input_names=["""input_ids""", """attention_mask""", """token_type_ids"""] ,output_names=["""output_start_logits""", """output_end_logits"""] ,dynamic_axes={ """input_ids""": axes, """attention_mask""": axes, """token_type_ids""": axes, """output_start_logits""": axes, """output_end_logits""": axes, } ,verbose=lowerCamelCase_ ,) logger.info("""onnx export finished""" )
345
import collections import inspect import unittest from transformers import FocalNetConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( FocalNetBackbone, FocalNetForImageClassification, FocalNetForMaskedImageModeling, FocalNetModel, ) from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class _snake_case : '''simple docstring''' def __init__( self: List[Any] ,lowerCamelCase_: Tuple ,lowerCamelCase_: Union[str, Any]=13 ,lowerCamelCase_: Optional[int]=32 ,lowerCamelCase_: List[str]=2 ,lowerCamelCase_: Optional[Any]=3 ,lowerCamelCase_: int=16 ,lowerCamelCase_: Optional[Any]=[32, 64, 128] ,lowerCamelCase_: Optional[int]=[1, 2, 1] ,lowerCamelCase_: Union[str, Any]=[2, 2, 4] ,lowerCamelCase_: int=2 ,lowerCamelCase_: List[str]=2.0 ,lowerCamelCase_: List[Any]=True ,lowerCamelCase_: List[str]=0.0 ,lowerCamelCase_: List[str]=0.0 ,lowerCamelCase_: Optional[int]=0.1 ,lowerCamelCase_: Optional[int]="gelu" ,lowerCamelCase_: Any=False ,lowerCamelCase_: Dict=True ,lowerCamelCase_: Union[str, Any]=0.0_2 ,lowerCamelCase_: int=1e-5 ,lowerCamelCase_: int=True ,lowerCamelCase_: Tuple=None ,lowerCamelCase_: str=True ,lowerCamelCase_: Dict=10 ,lowerCamelCase_: str=8 ,lowerCamelCase_: Union[str, Any]=["stage1", "stage2"] ,lowerCamelCase_: Optional[Any]=[1, 2] ,) -> str: UpperCAmelCase_ : List[Any] = parent UpperCAmelCase_ : Tuple = batch_size UpperCAmelCase_ : Any = image_size UpperCAmelCase_ : str = patch_size UpperCAmelCase_ : List[str] = num_channels UpperCAmelCase_ : Dict = embed_dim UpperCAmelCase_ : Dict = hidden_sizes UpperCAmelCase_ : str = depths UpperCAmelCase_ : int = num_heads UpperCAmelCase_ : List[Any] = window_size UpperCAmelCase_ : Union[str, Any] = mlp_ratio UpperCAmelCase_ : int = qkv_bias UpperCAmelCase_ : List[str] = hidden_dropout_prob UpperCAmelCase_ : Union[str, Any] = attention_probs_dropout_prob UpperCAmelCase_ : Optional[int] = drop_path_rate UpperCAmelCase_ : Union[str, Any] = hidden_act UpperCAmelCase_ : List[Any] = use_absolute_embeddings UpperCAmelCase_ : List[Any] = patch_norm UpperCAmelCase_ : int = layer_norm_eps UpperCAmelCase_ : int = initializer_range UpperCAmelCase_ : Optional[Any] = is_training UpperCAmelCase_ : Optional[Any] = scope UpperCAmelCase_ : Union[str, Any] = use_labels UpperCAmelCase_ : Union[str, Any] = type_sequence_label_size UpperCAmelCase_ : Optional[int] = encoder_stride UpperCAmelCase_ : Optional[int] = out_features UpperCAmelCase_ : Optional[int] = out_indices def A__ ( self: Union[str, Any] ) -> List[Any]: UpperCAmelCase_ : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) UpperCAmelCase_ : int = None if self.use_labels: UpperCAmelCase_ : str = ids_tensor([self.batch_size] ,self.type_sequence_label_size ) UpperCAmelCase_ : Any = self.get_config() return config, pixel_values, labels def A__ ( self: List[Any] ) -> Tuple: return FocalNetConfig( image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,embed_dim=self.embed_dim ,hidden_sizes=self.hidden_sizes ,depths=self.depths ,num_heads=self.num_heads ,window_size=self.window_size ,mlp_ratio=self.mlp_ratio ,qkv_bias=self.qkv_bias ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,drop_path_rate=self.drop_path_rate ,hidden_act=self.hidden_act ,use_absolute_embeddings=self.use_absolute_embeddings ,path_norm=self.patch_norm ,layer_norm_eps=self.layer_norm_eps ,initializer_range=self.initializer_range ,encoder_stride=self.encoder_stride ,out_features=self.out_features ,out_indices=self.out_indices ,) def A__ ( self: Dict ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: str ,lowerCamelCase_: str ) -> List[str]: UpperCAmelCase_ : Optional[int] = FocalNetModel(config=lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() UpperCAmelCase_ : List[Any] = model(lowerCamelCase_ ) UpperCAmelCase_ : Dict = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1)) UpperCAmelCase_ : Optional[Any] = int(config.embed_dim * 2 ** (len(config.depths ) - 1) ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, expected_seq_len, expected_dim) ) def A__ ( self: Union[str, Any] ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: Any ,lowerCamelCase_: Optional[int] ) -> List[str]: UpperCAmelCase_ : List[str] = FocalNetBackbone(config=lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() UpperCAmelCase_ : Tuple = model(lowerCamelCase_ ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) ,len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) ,[self.batch_size, self.image_size, 8, 8] ) # verify channels self.parent.assertEqual(len(model.channels ) ,len(config.out_features ) ) self.parent.assertListEqual(model.channels ,config.hidden_sizes[:-1] ) # verify backbone works with out_features=None UpperCAmelCase_ : Union[str, Any] = None UpperCAmelCase_ : List[str] = FocalNetBackbone(config=lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() UpperCAmelCase_ : Tuple = model(lowerCamelCase_ ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) ,1 ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) ,[self.batch_size, self.image_size * 2, 4, 4] ) # verify channels self.parent.assertEqual(len(model.channels ) ,1 ) self.parent.assertListEqual(model.channels ,[config.hidden_sizes[-1]] ) def A__ ( self: Optional[int] ,lowerCamelCase_: List[str] ,lowerCamelCase_: Tuple ,lowerCamelCase_: Union[str, Any] ) -> List[Any]: UpperCAmelCase_ : Any = FocalNetForMaskedImageModeling(config=lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() UpperCAmelCase_ : Optional[Any] = model(lowerCamelCase_ ) self.parent.assertEqual( result.reconstruction.shape ,(self.batch_size, self.num_channels, self.image_size, self.image_size) ) # test greyscale images UpperCAmelCase_ : int = 1 UpperCAmelCase_ : List[str] = FocalNetForMaskedImageModeling(lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() UpperCAmelCase_ : int = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) UpperCAmelCase_ : Optional[int] = model(lowerCamelCase_ ) self.parent.assertEqual(result.reconstruction.shape ,(self.batch_size, 1, self.image_size, self.image_size) ) def A__ ( self: List[str] ,lowerCamelCase_: List[str] ,lowerCamelCase_: List[str] ,lowerCamelCase_: Any ) -> int: UpperCAmelCase_ : List[Any] = self.type_sequence_label_size UpperCAmelCase_ : int = FocalNetForImageClassification(lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() UpperCAmelCase_ : Union[str, Any] = model(lowerCamelCase_ ,labels=lowerCamelCase_ ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) ) # test greyscale images UpperCAmelCase_ : List[Any] = 1 UpperCAmelCase_ : Optional[int] = FocalNetForImageClassification(lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() UpperCAmelCase_ : List[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) UpperCAmelCase_ : List[str] = model(lowerCamelCase_ ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) ) def A__ ( self: Union[str, Any] ) -> Optional[int]: UpperCAmelCase_ : List[Any] = self.prepare_config_and_inputs() UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = config_and_inputs UpperCAmelCase_ : int = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class _snake_case ( __snake_case , __snake_case , unittest.TestCase ): '''simple docstring''' A__ : List[Any] = ( ( FocalNetModel, FocalNetForImageClassification, FocalNetForMaskedImageModeling, FocalNetBackbone, ) if is_torch_available() else () ) A__ : Union[str, Any] = ( {"feature-extraction": FocalNetModel, "image-classification": FocalNetForImageClassification} if is_torch_available() else {} ) A__ : Optional[Any] = False A__ : Any = False A__ : List[str] = False A__ : Any = False A__ : Any = False def A__ ( self: List[str] ) -> Tuple: UpperCAmelCase_ : Dict = FocalNetModelTester(self ) UpperCAmelCase_ : int = ConfigTester(self ,config_class=lowerCamelCase_ ,embed_dim=37 ,has_text_modality=lowerCamelCase_ ) def A__ ( self: List[str] ) -> int: self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def A__ ( self: List[str] ) -> Union[str, Any]: return def A__ ( self: str ) -> List[str]: UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCamelCase_ ) def A__ ( self: Tuple ) -> int: UpperCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*lowerCamelCase_ ) def A__ ( self: Dict ) -> List[str]: UpperCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*lowerCamelCase_ ) def A__ ( self: int ) -> int: UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*lowerCamelCase_ ) @unittest.skip(reason="""FocalNet does not use inputs_embeds""" ) def A__ ( self: int ) -> Dict: pass @unittest.skip(reason="""FocalNet does not use feedforward chunking""" ) def A__ ( self: Optional[Any] ) -> Optional[Any]: pass def A__ ( self: Optional[Any] ) -> List[str]: UpperCAmelCase_ , UpperCAmelCase_ : str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes[:-1]: UpperCAmelCase_ : Optional[Any] = model_class(lowerCamelCase_ ) self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) ) UpperCAmelCase_ : List[Any] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(lowerCamelCase_ ,nn.Linear ) ) def A__ ( self: str ) -> Optional[int]: UpperCAmelCase_ , UpperCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes[:-1]: UpperCAmelCase_ : str = model_class(lowerCamelCase_ ) UpperCAmelCase_ : Dict = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCAmelCase_ : Any = [*signature.parameters.keys()] UpperCAmelCase_ : List[str] = ["""pixel_values"""] self.assertListEqual(arg_names[:1] ,lowerCamelCase_ ) def A__ ( self: Dict ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: List[str] ,lowerCamelCase_: Dict ,lowerCamelCase_: Any ) -> List[str]: UpperCAmelCase_ : Tuple = model_class(lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() with torch.no_grad(): UpperCAmelCase_ : Optional[int] = model(**self._prepare_for_class(lowerCamelCase_ ,lowerCamelCase_ ) ) UpperCAmelCase_ : Any = outputs.hidden_states UpperCAmelCase_ : List[Any] = getattr( self.model_tester ,"""expected_num_hidden_layers""" ,len(self.model_tester.depths ) + 1 ) self.assertEqual(len(lowerCamelCase_ ) ,lowerCamelCase_ ) # FocalNet has a different seq_length UpperCAmelCase_ : int = ( config.patch_size if isinstance(config.patch_size ,collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) UpperCAmelCase_ : Optional[int] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.assertListEqual( list(hidden_states[0].shape[-2:] ) ,[num_patches, self.model_tester.embed_dim] ,) UpperCAmelCase_ : Union[str, Any] = outputs.reshaped_hidden_states self.assertEqual(len(lowerCamelCase_ ) ,lowerCamelCase_ ) UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Tuple = reshaped_hidden_states[0].shape UpperCAmelCase_ : List[Any] = ( reshaped_hidden_states[0].view(lowerCamelCase_ ,lowerCamelCase_ ,height * width ).permute(0 ,2 ,1 ) ) self.assertListEqual( list(reshaped_hidden_states.shape[-2:] ) ,[num_patches, self.model_tester.embed_dim] ,) def A__ ( self: Any ) -> List[Any]: UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase_ : Optional[int] = ( self.model_tester.image_size if isinstance(self.model_tester.image_size ,collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) for model_class in self.all_model_classes[:-1]: UpperCAmelCase_ : str = True self.check_hidden_states_output(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] UpperCAmelCase_ : Union[str, Any] = True self.check_hidden_states_output(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ) def A__ ( self: List[str] ) -> str: UpperCAmelCase_ , UpperCAmelCase_ : int = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase_ : Tuple = 3 UpperCAmelCase_ : Tuple = ( self.model_tester.image_size if isinstance(self.model_tester.image_size ,collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) UpperCAmelCase_ : Union[str, Any] = ( config.patch_size if isinstance(config.patch_size ,collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) UpperCAmelCase_ : Union[str, Any] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0]) UpperCAmelCase_ : Any = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1]) for model_class in self.all_model_classes[:-1]: UpperCAmelCase_ : Optional[Any] = True self.check_hidden_states_output(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,(padded_height, padded_width) ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] UpperCAmelCase_ : Optional[int] = True self.check_hidden_states_output(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,(padded_height, padded_width) ) @slow def A__ ( self: Optional[int] ) -> Optional[Any]: for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase_ : Tuple = FocalNetModel.from_pretrained(lowerCamelCase_ ) self.assertIsNotNone(lowerCamelCase_ ) def A__ ( self: Optional[Any] ) -> Optional[int]: UpperCAmelCase_ , UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase_ : Optional[int] = _config_zero_init(lowerCamelCase_ ) for model_class in self.all_model_classes: UpperCAmelCase_ : List[Any] = model_class(config=lowerCamelCase_ ) for name, param in model.named_parameters(): if "embeddings" not in name and param.requires_grad: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item() ,[0.0, 1.0] ,msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' ,) @require_vision @require_torch class _snake_case ( unittest.TestCase ): '''simple docstring''' @cached_property def A__ ( self: Optional[int] ) -> str: # TODO update organization return AutoImageProcessor.from_pretrained("""microsoft/focalnet-tiny""" ) if is_vision_available() else None @slow def A__ ( self: List[Any] ) -> List[str]: UpperCAmelCase_ : Optional[int] = FocalNetForImageClassification.from_pretrained("""microsoft/focalnet-tiny""" ).to(lowerCamelCase_ ) UpperCAmelCase_ : Tuple = self.default_image_processor UpperCAmelCase_ : Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) UpperCAmelCase_ : Dict = image_processor(images=lowerCamelCase_ ,return_tensors="""pt""" ).to(lowerCamelCase_ ) # forward pass with torch.no_grad(): UpperCAmelCase_ : Dict = model(**lowerCamelCase_ ) # verify the logits UpperCAmelCase_ : str = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape ,lowerCamelCase_ ) UpperCAmelCase_ : List[Any] = torch.tensor([0.2_1_6_6, -0.4_3_6_8, 0.2_1_9_1] ).to(lowerCamelCase_ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] ,lowerCamelCase_ ,atol=1e-4 ) ) self.assertTrue(outputs.logits.argmax(dim=-1 ).item() ,281 ) @require_torch class _snake_case ( __snake_case , unittest.TestCase ): '''simple docstring''' A__ : List[Any] = (FocalNetBackbone,) if is_torch_available() else () A__ : int = FocalNetConfig A__ : List[str] = False def A__ ( self: Any ) -> Optional[int]: UpperCAmelCase_ : str = FocalNetModelTester(self )
345
1
from __future__ import annotations class _snake_case : '''simple docstring''' def __init__( self: int ,lowerCamelCase_: int ) -> None: UpperCAmelCase_ : Union[str, Any] = data UpperCAmelCase_ : Node | None = None UpperCAmelCase_ : Node | None = None def lowerCamelCase_ ( _a : Node | None ): # In Order traversal of the tree '''simple docstring''' if tree: display(tree.left ) print(tree.data ) display(tree.right ) def lowerCamelCase_ ( _a : Node | None ): '''simple docstring''' return 1 + max(depth_of_tree(tree.left ) , depth_of_tree(tree.right ) ) if tree else 0 def lowerCamelCase_ ( _a : Node ): '''simple docstring''' if not tree: return True if tree.left and tree.right: return is_full_binary_tree(tree.left ) and is_full_binary_tree(tree.right ) else: return not tree.left and not tree.right def lowerCamelCase_ ( ): # Main function for testing. '''simple docstring''' UpperCAmelCase_ : List[str] = Node(1 ) UpperCAmelCase_ : Union[str, Any] = Node(2 ) UpperCAmelCase_ : List[str] = Node(3 ) UpperCAmelCase_ : Tuple = Node(4 ) UpperCAmelCase_ : Optional[int] = Node(5 ) UpperCAmelCase_ : Optional[int] = Node(6 ) UpperCAmelCase_ : Optional[int] = Node(7 ) UpperCAmelCase_ : List[str] = Node(8 ) UpperCAmelCase_ : Optional[Any] = Node(9 ) print(is_full_binary_tree(_a ) ) print(depth_of_tree(_a ) ) print("""Tree is: """ ) display(_a ) if __name__ == "__main__": main()
345
import collections import inspect import unittest from transformers import SwinvaConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class _snake_case : '''simple docstring''' def __init__( self: Tuple ,lowerCamelCase_: List[str] ,lowerCamelCase_: int=13 ,lowerCamelCase_: int=32 ,lowerCamelCase_: Optional[int]=2 ,lowerCamelCase_: Any=3 ,lowerCamelCase_: str=16 ,lowerCamelCase_: Optional[Any]=[1, 2, 1] ,lowerCamelCase_: Tuple=[2, 2, 4] ,lowerCamelCase_: int=2 ,lowerCamelCase_: List[Any]=2.0 ,lowerCamelCase_: str=True ,lowerCamelCase_: Optional[int]=0.0 ,lowerCamelCase_: List[Any]=0.0 ,lowerCamelCase_: List[str]=0.1 ,lowerCamelCase_: Tuple="gelu" ,lowerCamelCase_: Union[str, Any]=False ,lowerCamelCase_: Union[str, Any]=True ,lowerCamelCase_: Optional[int]=0.0_2 ,lowerCamelCase_: int=1e-5 ,lowerCamelCase_: Optional[int]=True ,lowerCamelCase_: Union[str, Any]=None ,lowerCamelCase_: Union[str, Any]=True ,lowerCamelCase_: Optional[int]=10 ,lowerCamelCase_: Tuple=8 ,) -> List[Any]: UpperCAmelCase_ : List[str] = parent UpperCAmelCase_ : int = batch_size UpperCAmelCase_ : int = image_size UpperCAmelCase_ : Union[str, Any] = patch_size UpperCAmelCase_ : Optional[Any] = num_channels UpperCAmelCase_ : int = embed_dim UpperCAmelCase_ : Union[str, Any] = depths UpperCAmelCase_ : List[str] = num_heads UpperCAmelCase_ : int = window_size UpperCAmelCase_ : List[str] = mlp_ratio UpperCAmelCase_ : Tuple = qkv_bias UpperCAmelCase_ : Tuple = hidden_dropout_prob UpperCAmelCase_ : str = attention_probs_dropout_prob UpperCAmelCase_ : Tuple = drop_path_rate UpperCAmelCase_ : List[str] = hidden_act UpperCAmelCase_ : int = use_absolute_embeddings UpperCAmelCase_ : Any = patch_norm UpperCAmelCase_ : Optional[int] = layer_norm_eps UpperCAmelCase_ : Tuple = initializer_range UpperCAmelCase_ : Optional[Any] = is_training UpperCAmelCase_ : Dict = scope UpperCAmelCase_ : int = use_labels UpperCAmelCase_ : Optional[Any] = type_sequence_label_size UpperCAmelCase_ : List[str] = encoder_stride def A__ ( self: Any ) -> int: UpperCAmelCase_ : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) UpperCAmelCase_ : List[Any] = None if self.use_labels: UpperCAmelCase_ : Optional[int] = ids_tensor([self.batch_size] ,self.type_sequence_label_size ) UpperCAmelCase_ : str = self.get_config() return config, pixel_values, labels def A__ ( self: List[Any] ) -> Union[str, Any]: return SwinvaConfig( image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,embed_dim=self.embed_dim ,depths=self.depths ,num_heads=self.num_heads ,window_size=self.window_size ,mlp_ratio=self.mlp_ratio ,qkv_bias=self.qkv_bias ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,drop_path_rate=self.drop_path_rate ,hidden_act=self.hidden_act ,use_absolute_embeddings=self.use_absolute_embeddings ,path_norm=self.patch_norm ,layer_norm_eps=self.layer_norm_eps ,initializer_range=self.initializer_range ,encoder_stride=self.encoder_stride ,) def A__ ( self: Dict ,lowerCamelCase_: Tuple ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: List[str] ) -> str: UpperCAmelCase_ : str = SwinvaModel(config=lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() UpperCAmelCase_ : Optional[Any] = model(lowerCamelCase_ ) UpperCAmelCase_ : List[Any] = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1)) UpperCAmelCase_ : List[Any] = int(config.embed_dim * 2 ** (len(config.depths ) - 1) ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, expected_seq_len, expected_dim) ) def A__ ( self: List[Any] ,lowerCamelCase_: List[Any] ,lowerCamelCase_: int ,lowerCamelCase_: int ) -> int: UpperCAmelCase_ : Any = SwinvaForMaskedImageModeling(config=lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() UpperCAmelCase_ : Union[str, Any] = model(lowerCamelCase_ ) self.parent.assertEqual( result.logits.shape ,(self.batch_size, self.num_channels, self.image_size, self.image_size) ) # test greyscale images UpperCAmelCase_ : str = 1 UpperCAmelCase_ : Optional[Any] = SwinvaForMaskedImageModeling(lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() UpperCAmelCase_ : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) UpperCAmelCase_ : int = model(lowerCamelCase_ ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, 1, self.image_size, self.image_size) ) def A__ ( self: int ,lowerCamelCase_: int ,lowerCamelCase_: List[Any] ,lowerCamelCase_: Optional[Any] ) -> int: UpperCAmelCase_ : Union[str, Any] = self.type_sequence_label_size UpperCAmelCase_ : int = SwinvaForImageClassification(lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() UpperCAmelCase_ : Optional[int] = model(lowerCamelCase_ ,labels=lowerCamelCase_ ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) ) def A__ ( self: str ) -> Union[str, Any]: UpperCAmelCase_ : Optional[Any] = self.prepare_config_and_inputs() UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = config_and_inputs UpperCAmelCase_ : Optional[int] = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class _snake_case ( __snake_case , __snake_case , unittest.TestCase ): '''simple docstring''' A__ : Tuple = ( (SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else () ) A__ : Optional[Any] = ( {"feature-extraction": SwinvaModel, "image-classification": SwinvaForImageClassification} if is_torch_available() else {} ) A__ : List[Any] = False A__ : Tuple = False A__ : int = False A__ : Union[str, Any] = False def A__ ( self: List[str] ) -> Optional[Any]: UpperCAmelCase_ : Any = SwinvaModelTester(self ) UpperCAmelCase_ : str = ConfigTester(self ,config_class=lowerCamelCase_ ,embed_dim=37 ) def A__ ( self: Optional[int] ) -> List[Any]: self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def A__ ( self: Any ) -> Dict: UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCamelCase_ ) @unittest.skip(reason="""Got `CUDA error: misaligned address` with PyTorch 2.0.0.""" ) def A__ ( self: int ) -> Dict: pass @unittest.skip(reason="""Swinv2 does not use inputs_embeds""" ) def A__ ( self: Tuple ) -> List[str]: pass def A__ ( self: str ) -> List[Any]: UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase_ : int = model_class(lowerCamelCase_ ) self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) ) UpperCAmelCase_ : Tuple = model.get_output_embeddings() self.assertTrue(x is None or isinstance(lowerCamelCase_ ,nn.Linear ) ) def A__ ( self: Optional[Any] ) -> Optional[int]: UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase_ : Dict = model_class(lowerCamelCase_ ) UpperCAmelCase_ : Any = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCAmelCase_ : int = [*signature.parameters.keys()] UpperCAmelCase_ : Tuple = ["""pixel_values"""] self.assertListEqual(arg_names[:1] ,lowerCamelCase_ ) def A__ ( self: Union[str, Any] ) -> Optional[Any]: UpperCAmelCase_ , UpperCAmelCase_ : int = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase_ : Any = True for model_class in self.all_model_classes: UpperCAmelCase_ : Optional[Any] = True UpperCAmelCase_ : Union[str, Any] = False UpperCAmelCase_ : str = True UpperCAmelCase_ : List[Any] = model_class(lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() with torch.no_grad(): UpperCAmelCase_ : Optional[int] = model(**self._prepare_for_class(lowerCamelCase_ ,lowerCamelCase_ ) ) UpperCAmelCase_ : Optional[Any] = outputs.attentions UpperCAmelCase_ : List[str] = len(self.model_tester.depths ) self.assertEqual(len(lowerCamelCase_ ) ,lowerCamelCase_ ) # check that output_attentions also work using config del inputs_dict["output_attentions"] UpperCAmelCase_ : str = True UpperCAmelCase_ : Optional[Any] = config.window_size**2 UpperCAmelCase_ : Optional[int] = model_class(lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() with torch.no_grad(): UpperCAmelCase_ : Optional[Any] = model(**self._prepare_for_class(lowerCamelCase_ ,lowerCamelCase_ ) ) UpperCAmelCase_ : List[Any] = outputs.attentions self.assertEqual(len(lowerCamelCase_ ) ,lowerCamelCase_ ) self.assertListEqual( list(attentions[0].shape[-3:] ) ,[self.model_tester.num_heads[0], window_size_squared, window_size_squared] ,) UpperCAmelCase_ : Optional[Any] = len(lowerCamelCase_ ) # Check attention is always last and order is fine UpperCAmelCase_ : Tuple = True UpperCAmelCase_ : List[Any] = True UpperCAmelCase_ : Tuple = model_class(lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() with torch.no_grad(): UpperCAmelCase_ : Union[str, Any] = model(**self._prepare_for_class(lowerCamelCase_ ,lowerCamelCase_ ) ) if hasattr(self.model_tester ,"""num_hidden_states_types""" ): UpperCAmelCase_ : List[Any] = self.model_tester.num_hidden_states_types else: # also another +1 for reshaped_hidden_states UpperCAmelCase_ : List[str] = 2 self.assertEqual(out_len + added_hidden_states ,len(lowerCamelCase_ ) ) UpperCAmelCase_ : Any = outputs.attentions self.assertEqual(len(lowerCamelCase_ ) ,lowerCamelCase_ ) self.assertListEqual( list(self_attentions[0].shape[-3:] ) ,[self.model_tester.num_heads[0], window_size_squared, window_size_squared] ,) def A__ ( self: List[str] ,lowerCamelCase_: Dict ,lowerCamelCase_: Tuple ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: Optional[int] ) -> List[Any]: UpperCAmelCase_ : str = model_class(lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() with torch.no_grad(): UpperCAmelCase_ : int = model(**self._prepare_for_class(lowerCamelCase_ ,lowerCamelCase_ ) ) UpperCAmelCase_ : List[str] = outputs.hidden_states UpperCAmelCase_ : Optional[Any] = getattr( self.model_tester ,"""expected_num_hidden_layers""" ,len(self.model_tester.depths ) + 1 ) self.assertEqual(len(lowerCamelCase_ ) ,lowerCamelCase_ ) # Swinv2 has a different seq_length UpperCAmelCase_ : Optional[Any] = ( config.patch_size if isinstance(config.patch_size ,collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) UpperCAmelCase_ : int = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.assertListEqual( list(hidden_states[0].shape[-2:] ) ,[num_patches, self.model_tester.embed_dim] ,) UpperCAmelCase_ : Optional[int] = outputs.reshaped_hidden_states self.assertEqual(len(lowerCamelCase_ ) ,lowerCamelCase_ ) UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = reshaped_hidden_states[0].shape UpperCAmelCase_ : Optional[Any] = ( reshaped_hidden_states[0].view(lowerCamelCase_ ,lowerCamelCase_ ,height * width ).permute(0 ,2 ,1 ) ) self.assertListEqual( list(reshaped_hidden_states.shape[-2:] ) ,[num_patches, self.model_tester.embed_dim] ,) def A__ ( self: Any ) -> int: UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase_ : Dict = ( self.model_tester.image_size if isinstance(self.model_tester.image_size ,collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) for model_class in self.all_model_classes: UpperCAmelCase_ : Any = True self.check_hidden_states_output(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] UpperCAmelCase_ : str = True self.check_hidden_states_output(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ) def A__ ( self: List[str] ) -> Dict: UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase_ : Union[str, Any] = 3 UpperCAmelCase_ : Optional[int] = ( self.model_tester.image_size if isinstance(self.model_tester.image_size ,collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) UpperCAmelCase_ : List[str] = ( config.patch_size if isinstance(config.patch_size ,collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) UpperCAmelCase_ : List[Any] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0]) UpperCAmelCase_ : Optional[Any] = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1]) for model_class in self.all_model_classes: UpperCAmelCase_ : Optional[Any] = True self.check_hidden_states_output(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,(padded_height, padded_width) ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] UpperCAmelCase_ : List[str] = True self.check_hidden_states_output(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,(padded_height, padded_width) ) def A__ ( self: Optional[int] ) -> str: UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*lowerCamelCase_ ) def A__ ( self: Union[str, Any] ) -> Dict: UpperCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*lowerCamelCase_ ) @slow def A__ ( self: str ) -> Tuple: for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase_ : Dict = SwinvaModel.from_pretrained(lowerCamelCase_ ) self.assertIsNotNone(lowerCamelCase_ ) def A__ ( self: Any ) -> int: UpperCAmelCase_ , UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase_ : List[str] = _config_zero_init(lowerCamelCase_ ) for model_class in self.all_model_classes: UpperCAmelCase_ : int = model_class(config=lowerCamelCase_ ) for name, param in model.named_parameters(): if "embeddings" not in name and "logit_scale" not in name and param.requires_grad: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item() ,[0.0, 1.0] ,msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' ,) @require_vision @require_torch class _snake_case ( unittest.TestCase ): '''simple docstring''' @cached_property def A__ ( self: Dict ) -> Optional[Any]: return ( AutoImageProcessor.from_pretrained("""microsoft/swinv2-tiny-patch4-window8-256""" ) if is_vision_available() else None ) @slow def A__ ( self: str ) -> List[Any]: UpperCAmelCase_ : Tuple = SwinvaForImageClassification.from_pretrained("""microsoft/swinv2-tiny-patch4-window8-256""" ).to( lowerCamelCase_ ) UpperCAmelCase_ : Any = self.default_image_processor UpperCAmelCase_ : List[str] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) UpperCAmelCase_ : Optional[int] = image_processor(images=lowerCamelCase_ ,return_tensors="""pt""" ).to(lowerCamelCase_ ) # forward pass with torch.no_grad(): UpperCAmelCase_ : Optional[Any] = model(**lowerCamelCase_ ) # verify the logits UpperCAmelCase_ : Dict = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape ,lowerCamelCase_ ) UpperCAmelCase_ : Any = torch.tensor([-0.3_9_4_7, -0.4_3_0_6, 0.0_0_2_6] ).to(lowerCamelCase_ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] ,lowerCamelCase_ ,atol=1e-4 ) )
345
1
import warnings from ...utils import logging from .image_processing_glpn import GLPNImageProcessor UpperCamelCase_ = logging.get_logger(__name__) class _snake_case ( __snake_case ): '''simple docstring''' def __init__( self: int ,*lowerCamelCase_: List[str] ,**lowerCamelCase_: Tuple ) -> None: warnings.warn( """The class GLPNFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please""" """ use GLPNImageProcessor instead.""" ,lowerCamelCase_ ,) super().__init__(*lowerCamelCase_ ,**lowerCamelCase_ )
345
import json import os from functools import lru_cache from typing import Dict, List, Optional, Tuple, Union import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...tokenization_utils_base import BatchEncoding, EncodedInput from ...utils import PaddingStrategy, logging UpperCamelCase_ = logging.get_logger(__name__) UpperCamelCase_ = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt'''} # See all LED models at https://huggingface.co/models?filter=LED UpperCamelCase_ = { '''vocab_file''': { '''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json''', }, '''merges_file''': { '''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt''', }, '''tokenizer_file''': { '''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json''', }, } UpperCamelCase_ = { '''allenai/led-base-16384''': 16384, } @lru_cache() # Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode def lowerCamelCase_ ( ): '''simple docstring''' UpperCAmelCase_ : int = ( list(range(ord("""!""" ) , ord("""~""" ) + 1 ) ) + list(range(ord("""¡""" ) , ord("""¬""" ) + 1 ) ) + list(range(ord("""®""" ) , ord("""ÿ""" ) + 1 ) ) ) UpperCAmelCase_ : Dict = bs[:] UpperCAmelCase_ : Any = 0 for b in range(2**8 ): if b not in bs: bs.append(_a ) cs.append(2**8 + n ) n += 1 UpperCAmelCase_ : Any = [chr(_a ) for n in cs] return dict(zip(_a , _a ) ) def lowerCamelCase_ ( _a : List[str] ): '''simple docstring''' UpperCAmelCase_ : Union[str, Any] = set() UpperCAmelCase_ : List[Any] = word[0] for char in word[1:]: pairs.add((prev_char, char) ) UpperCAmelCase_ : Optional[int] = char return pairs class _snake_case ( __snake_case ): '''simple docstring''' A__ : str = VOCAB_FILES_NAMES A__ : List[str] = PRETRAINED_VOCAB_FILES_MAP A__ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES A__ : Optional[int] = ["input_ids", "attention_mask"] def __init__( self: Union[str, Any] ,lowerCamelCase_: Tuple ,lowerCamelCase_: Any ,lowerCamelCase_: Union[str, Any]="replace" ,lowerCamelCase_: Optional[Any]="<s>" ,lowerCamelCase_: List[Any]="</s>" ,lowerCamelCase_: List[str]="</s>" ,lowerCamelCase_: int="<s>" ,lowerCamelCase_: int="<unk>" ,lowerCamelCase_: str="<pad>" ,lowerCamelCase_: Optional[Any]="<mask>" ,lowerCamelCase_: List[str]=False ,**lowerCamelCase_: Tuple ,) -> Any: UpperCAmelCase_ : Union[str, Any] = AddedToken(lowerCamelCase_ ,lstrip=lowerCamelCase_ ,rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ ,lowerCamelCase_ ) else bos_token UpperCAmelCase_ : int = AddedToken(lowerCamelCase_ ,lstrip=lowerCamelCase_ ,rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ ,lowerCamelCase_ ) else eos_token UpperCAmelCase_ : List[str] = AddedToken(lowerCamelCase_ ,lstrip=lowerCamelCase_ ,rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ ,lowerCamelCase_ ) else sep_token UpperCAmelCase_ : List[str] = AddedToken(lowerCamelCase_ ,lstrip=lowerCamelCase_ ,rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ ,lowerCamelCase_ ) else cls_token UpperCAmelCase_ : Optional[Any] = AddedToken(lowerCamelCase_ ,lstrip=lowerCamelCase_ ,rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ ,lowerCamelCase_ ) else unk_token UpperCAmelCase_ : List[str] = AddedToken(lowerCamelCase_ ,lstrip=lowerCamelCase_ ,rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ ,lowerCamelCase_ ) else pad_token # Mask token behave like a normal word, i.e. include the space before it UpperCAmelCase_ : str = AddedToken(lowerCamelCase_ ,lstrip=lowerCamelCase_ ,rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ ,lowerCamelCase_ ) else mask_token super().__init__( errors=lowerCamelCase_ ,bos_token=lowerCamelCase_ ,eos_token=lowerCamelCase_ ,unk_token=lowerCamelCase_ ,sep_token=lowerCamelCase_ ,cls_token=lowerCamelCase_ ,pad_token=lowerCamelCase_ ,mask_token=lowerCamelCase_ ,add_prefix_space=lowerCamelCase_ ,**lowerCamelCase_ ,) with open(lowerCamelCase_ ,encoding="""utf-8""" ) as vocab_handle: UpperCAmelCase_ : Union[str, Any] = json.load(lowerCamelCase_ ) UpperCAmelCase_ : Optional[int] = {v: k for k, v in self.encoder.items()} UpperCAmelCase_ : Any = errors # how to handle errors in decoding UpperCAmelCase_ : int = bytes_to_unicode() UpperCAmelCase_ : Dict = {v: k for k, v in self.byte_encoder.items()} with open(lowerCamelCase_ ,encoding="""utf-8""" ) as merges_handle: UpperCAmelCase_ : Any = merges_handle.read().split("""\n""" )[1:-1] UpperCAmelCase_ : int = [tuple(merge.split() ) for merge in bpe_merges] UpperCAmelCase_ : Union[str, Any] = dict(zip(lowerCamelCase_ ,range(len(lowerCamelCase_ ) ) ) ) UpperCAmelCase_ : Tuple = {} UpperCAmelCase_ : Optional[int] = add_prefix_space # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions UpperCAmelCase_ : int = re.compile(R"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""" ) @property # Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size def A__ ( self: List[str] ) -> List[str]: return len(self.encoder ) def A__ ( self: Any ) -> Union[str, Any]: return dict(self.encoder ,**self.added_tokens_encoder ) def A__ ( self: Tuple ,lowerCamelCase_: Dict ) -> Optional[Any]: if token in self.cache: return self.cache[token] UpperCAmelCase_ : Union[str, Any] = tuple(lowerCamelCase_ ) UpperCAmelCase_ : Union[str, Any] = get_pairs(lowerCamelCase_ ) if not pairs: return token while True: UpperCAmelCase_ : Union[str, Any] = min(lowerCamelCase_ ,key=lambda lowerCamelCase_ : self.bpe_ranks.get(lowerCamelCase_ ,float("""inf""" ) ) ) if bigram not in self.bpe_ranks: break UpperCAmelCase_ , UpperCAmelCase_ : Any = bigram UpperCAmelCase_ : Optional[Any] = [] UpperCAmelCase_ : List[str] = 0 while i < len(lowerCamelCase_ ): try: UpperCAmelCase_ : str = word.index(lowerCamelCase_ ,lowerCamelCase_ ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) UpperCAmelCase_ : Union[str, Any] = j if word[i] == first and i < len(lowerCamelCase_ ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 UpperCAmelCase_ : List[str] = tuple(lowerCamelCase_ ) UpperCAmelCase_ : List[Any] = new_word if len(lowerCamelCase_ ) == 1: break else: UpperCAmelCase_ : List[str] = get_pairs(lowerCamelCase_ ) UpperCAmelCase_ : int = """ """.join(lowerCamelCase_ ) UpperCAmelCase_ : Optional[Any] = word return word def A__ ( self: Union[str, Any] ,lowerCamelCase_: Tuple ) -> List[str]: UpperCAmelCase_ : str = [] for token in re.findall(self.pat ,lowerCamelCase_ ): UpperCAmelCase_ : List[Any] = """""".join( self.byte_encoder[b] for b in token.encode("""utf-8""" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowerCamelCase_ ).split(""" """ ) ) return bpe_tokens def A__ ( self: List[Any] ,lowerCamelCase_: Optional[Any] ) -> Optional[int]: return self.encoder.get(lowerCamelCase_ ,self.encoder.get(self.unk_token ) ) def A__ ( self: List[str] ,lowerCamelCase_: str ) -> Optional[Any]: return self.decoder.get(lowerCamelCase_ ) def A__ ( self: List[str] ,lowerCamelCase_: List[str] ) -> List[Any]: UpperCAmelCase_ : str = """""".join(lowerCamelCase_ ) UpperCAmelCase_ : int = bytearray([self.byte_decoder[c] for c in text] ).decode("""utf-8""" ,errors=self.errors ) return text def A__ ( self: Optional[Any] ,lowerCamelCase_: str ,lowerCamelCase_: Optional[str] = None ) -> Tuple[str]: if not os.path.isdir(lowerCamelCase_ ): logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' ) return UpperCAmelCase_ : List[Any] = os.path.join( lowerCamelCase_ ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) UpperCAmelCase_ : List[str] = os.path.join( lowerCamelCase_ ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] ) with open(lowerCamelCase_ ,"""w""" ,encoding="""utf-8""" ) as f: f.write(json.dumps(self.encoder ,indent=2 ,sort_keys=lowerCamelCase_ ,ensure_ascii=lowerCamelCase_ ) + """\n""" ) UpperCAmelCase_ : str = 0 with open(lowerCamelCase_ ,"""w""" ,encoding="""utf-8""" ) as writer: writer.write("""#version: 0.2\n""" ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() ,key=lambda lowerCamelCase_ : kv[1] ): if index != token_index: logger.warning( F'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.''' """ Please check that the tokenizer is not corrupted!""" ) UpperCAmelCase_ : Tuple = token_index writer.write(""" """.join(lowerCamelCase_ ) + """\n""" ) index += 1 return vocab_file, merge_file def A__ ( self: str ,lowerCamelCase_: List[int] ,lowerCamelCase_: Optional[List[int]] = None ) -> List[int]: if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] UpperCAmelCase_ : int = [self.cls_token_id] UpperCAmelCase_ : Optional[int] = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def A__ ( self: Union[str, Any] ,lowerCamelCase_: List[int] ,lowerCamelCase_: Optional[List[int]] = None ,lowerCamelCase_: bool = False ) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=lowerCamelCase_ ,token_ids_a=lowerCamelCase_ ,already_has_special_tokens=lowerCamelCase_ ) if token_ids_a is None: return [1] + ([0] * len(lowerCamelCase_ )) + [1] return [1] + ([0] * len(lowerCamelCase_ )) + [1, 1] + ([0] * len(lowerCamelCase_ )) + [1] def A__ ( self: str ,lowerCamelCase_: List[int] ,lowerCamelCase_: Optional[List[int]] = None ) -> List[int]: UpperCAmelCase_ : Optional[Any] = [self.sep_token_id] UpperCAmelCase_ : Optional[Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def A__ ( self: Optional[Any] ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: str=False ,**lowerCamelCase_: List[str] ) -> Optional[int]: UpperCAmelCase_ : Optional[int] = kwargs.pop("""add_prefix_space""" ,self.add_prefix_space ) if (is_split_into_words or add_prefix_space) and (len(lowerCamelCase_ ) > 0 and not text[0].isspace()): UpperCAmelCase_ : Dict = """ """ + text return (text, kwargs) def A__ ( self: List[str] ,lowerCamelCase_: Union[Dict[str, EncodedInput], BatchEncoding] ,lowerCamelCase_: Optional[int] = None ,lowerCamelCase_: PaddingStrategy = PaddingStrategy.DO_NOT_PAD ,lowerCamelCase_: Optional[int] = None ,lowerCamelCase_: Optional[bool] = None ,) -> dict: UpperCAmelCase_ : Optional[int] = super()._pad( encoded_inputs=lowerCamelCase_ ,max_length=lowerCamelCase_ ,padding_strategy=lowerCamelCase_ ,pad_to_multiple_of=lowerCamelCase_ ,return_attention_mask=lowerCamelCase_ ,) # Load from model defaults if return_attention_mask is None: UpperCAmelCase_ : str = """attention_mask""" in self.model_input_names if return_attention_mask and "global_attention_mask" in encoded_inputs: UpperCAmelCase_ : str = encoded_inputs[self.model_input_names[0]] # `global_attention_mask` need to have the same length as other (sequential) inputs. UpperCAmelCase_ : List[Any] = len(encoded_inputs["""global_attention_mask"""] ) != len(lowerCamelCase_ ) if needs_to_be_padded: UpperCAmelCase_ : Dict = len(lowerCamelCase_ ) - len(encoded_inputs["""global_attention_mask"""] ) if self.padding_side == "right": # Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend` UpperCAmelCase_ : str = ( encoded_inputs["""global_attention_mask"""] + [-1] * difference ) elif self.padding_side == "left": UpperCAmelCase_ : List[str] = [-1] * difference + encoded_inputs[ """global_attention_mask""" ] else: raise ValueError("""Invalid padding strategy:""" + str(self.padding_side ) ) return encoded_inputs
345
1
import re import string import numpy as np import datasets UpperCamelCase_ = ''' Returns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list. ''' UpperCamelCase_ = ''' Args: predictions: List of predicted texts. references: List of reference texts. regexes_to_ignore: List, defaults to None. Regex expressions of characters to ignore when calculating the exact matches. Note: these regexes are removed from the input data before the changes based on the options below (e.g. ignore_case, ignore_punctuation, ignore_numbers) are applied. ignore_case: Boolean, defaults to False. If true, turns everything to lowercase so that capitalization differences are ignored. ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before comparing predictions and references. ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before comparing predictions and references. Returns: exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive. Examples: >>> exact_match = datasets.load_metric("exact_match") >>> refs = ["the cat", "theater", "YELLING", "agent007"] >>> preds = ["cat?", "theater", "yelling", "agent"] >>> results = exact_match.compute(references=refs, predictions=preds) >>> print(round(results["exact_match"], 1)) 25.0 >>> exact_match = datasets.load_metric("exact_match") >>> refs = ["the cat", "theater", "YELLING", "agent007"] >>> preds = ["cat?", "theater", "yelling", "agent"] >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell"], ignore_case=True, ignore_punctuation=True) >>> print(round(results["exact_match"], 1)) 50.0 >>> exact_match = datasets.load_metric("exact_match") >>> refs = ["the cat", "theater", "YELLING", "agent007"] >>> preds = ["cat?", "theater", "yelling", "agent"] >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True) >>> print(round(results["exact_match"], 1)) 75.0 >>> exact_match = datasets.load_metric("exact_match") >>> refs = ["the cat", "theater", "YELLING", "agent007"] >>> preds = ["cat?", "theater", "yelling", "agent"] >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True) >>> print(round(results["exact_match"], 1)) 100.0 >>> exact_match = datasets.load_metric("exact_match") >>> refs = ["The cat sat on the mat.", "Theaters are great.", "It\'s like comparing oranges and apples."] >>> preds = ["The cat sat on the mat?", "Theaters are great.", "It\'s like comparing apples and oranges."] >>> results = exact_match.compute(references=refs, predictions=preds) >>> print(round(results["exact_match"], 1)) 33.3 ''' UpperCamelCase_ = ''' ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class _snake_case ( datasets.Metric ): '''simple docstring''' def A__ ( self: str ) -> List[str]: return datasets.MetricInfo( description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features( { """predictions""": datasets.Value("""string""" ,id="""sequence""" ), """references""": datasets.Value("""string""" ,id="""sequence""" ), } ) ,reference_urls=[] ,) def A__ ( self: Optional[Any] ,lowerCamelCase_: Tuple ,lowerCamelCase_: Tuple ,lowerCamelCase_: Union[str, Any]=None ,lowerCamelCase_: Union[str, Any]=False ,lowerCamelCase_: Tuple=False ,lowerCamelCase_: Tuple=False ,) -> str: if regexes_to_ignore is not None: for s in regexes_to_ignore: UpperCAmelCase_ : int = np.array([re.sub(lowerCamelCase_ ,"""""" ,lowerCamelCase_ ) for x in predictions] ) UpperCAmelCase_ : Optional[Any] = np.array([re.sub(lowerCamelCase_ ,"""""" ,lowerCamelCase_ ) for x in references] ) else: UpperCAmelCase_ : Optional[int] = np.asarray(lowerCamelCase_ ) UpperCAmelCase_ : Any = np.asarray(lowerCamelCase_ ) if ignore_case: UpperCAmelCase_ : Dict = np.char.lower(lowerCamelCase_ ) UpperCAmelCase_ : Optional[Any] = np.char.lower(lowerCamelCase_ ) if ignore_punctuation: UpperCAmelCase_ : Union[str, Any] = string.punctuation.maketrans("""""" ,"""""" ,string.punctuation ) UpperCAmelCase_ : Optional[int] = np.char.translate(lowerCamelCase_ ,table=lowerCamelCase_ ) UpperCAmelCase_ : List[Any] = np.char.translate(lowerCamelCase_ ,table=lowerCamelCase_ ) if ignore_numbers: UpperCAmelCase_ : List[Any] = string.digits.maketrans("""""" ,"""""" ,string.digits ) UpperCAmelCase_ : Optional[int] = np.char.translate(lowerCamelCase_ ,table=lowerCamelCase_ ) UpperCAmelCase_ : Optional[Any] = np.char.translate(lowerCamelCase_ ,table=lowerCamelCase_ ) UpperCAmelCase_ : Dict = predictions == references return {"exact_match": np.mean(lowerCamelCase_ ) * 100}
345
import gc import random import tempfile import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline from diffusers.utils import floats_tensor, nightly, torch_device from diffusers.utils.testing_utils import require_torch_gpu class _snake_case ( unittest.TestCase ): '''simple docstring''' def A__ ( self: Union[str, Any] ) -> Union[str, Any]: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() @property def A__ ( self: List[str] ) -> Dict: UpperCAmelCase_ : Union[str, Any] = 1 UpperCAmelCase_ : Tuple = 3 UpperCAmelCase_ : Optional[Any] = (32, 32) UpperCAmelCase_ : Optional[int] = floats_tensor((batch_size, num_channels) + sizes ,rng=random.Random(0 ) ).to(lowerCamelCase_ ) return image @property def A__ ( self: List[Any] ) -> Optional[Any]: torch.manual_seed(0 ) UpperCAmelCase_ : int = UNetaDConditionModel( block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") ,up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") ,cross_attention_dim=32 ,) return model @property def A__ ( self: str ) -> List[str]: torch.manual_seed(0 ) UpperCAmelCase_ : Optional[int] = AutoencoderKL( block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] ,up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] ,latent_channels=4 ,) return model @property def A__ ( self: Optional[int] ) -> int: torch.manual_seed(0 ) UpperCAmelCase_ : Dict = CLIPTextConfig( bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1e-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1000 ,) return CLIPTextModel(lowerCamelCase_ ) @property def A__ ( self: Tuple ) -> Tuple: def extract(*lowerCamelCase_: Optional[Any] ,**lowerCamelCase_: str ): class _snake_case : '''simple docstring''' def __init__( self: List[Any] ) -> Optional[Any]: UpperCAmelCase_ : List[str] = torch.ones([0] ) def A__ ( self: List[Any] ,lowerCamelCase_: str ) -> int: self.pixel_values.to(lowerCamelCase_ ) return self return Out() return extract def A__ ( self: Union[str, Any] ) -> Tuple: UpperCAmelCase_ : int = """cpu""" # ensure determinism for the device-dependent torch.Generator UpperCAmelCase_ : int = self.dummy_cond_unet UpperCAmelCase_ : Optional[Any] = DDIMScheduler( beta_start=0.0_0_0_8_5 ,beta_end=0.0_1_2 ,beta_schedule="""scaled_linear""" ,clip_sample=lowerCamelCase_ ,set_alpha_to_one=lowerCamelCase_ ,) UpperCAmelCase_ : str = self.dummy_vae UpperCAmelCase_ : List[str] = self.dummy_text_encoder UpperCAmelCase_ : int = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) # make sure here that pndm scheduler skips prk UpperCAmelCase_ : str = StableDiffusionPipeline( unet=lowerCamelCase_ ,scheduler=lowerCamelCase_ ,vae=lowerCamelCase_ ,text_encoder=lowerCamelCase_ ,tokenizer=lowerCamelCase_ ,safety_checker=lowerCamelCase_ ,feature_extractor=self.dummy_extractor ,) UpperCAmelCase_ : List[str] = sd_pipe.to(lowerCamelCase_ ) sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ ) UpperCAmelCase_ : List[str] = """A painting of a squirrel eating a burger""" UpperCAmelCase_ : str = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 ) UpperCAmelCase_ : int = sd_pipe([prompt] ,generator=lowerCamelCase_ ,guidance_scale=6.0 ,num_inference_steps=2 ,output_type="""np""" ) UpperCAmelCase_ : List[Any] = output.images UpperCAmelCase_ : str = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 ) UpperCAmelCase_ : Dict = sd_pipe( [prompt] ,generator=lowerCamelCase_ ,guidance_scale=6.0 ,num_inference_steps=2 ,output_type="""np""" ,return_dict=lowerCamelCase_ ,)[0] UpperCAmelCase_ : int = image[0, -3:, -3:, -1] UpperCAmelCase_ : Union[str, Any] = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) UpperCAmelCase_ : Tuple = np.array([0.5_7_5_6, 0.6_1_1_8, 0.5_0_0_5, 0.5_0_4_1, 0.5_4_7_1, 0.4_7_2_6, 0.4_9_7_6, 0.4_8_6_5, 0.4_8_6_4] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 def A__ ( self: Optional[Any] ) -> Any: UpperCAmelCase_ : Tuple = """cpu""" # ensure determinism for the device-dependent torch.Generator UpperCAmelCase_ : Dict = self.dummy_cond_unet UpperCAmelCase_ : List[Any] = PNDMScheduler(skip_prk_steps=lowerCamelCase_ ) UpperCAmelCase_ : str = self.dummy_vae UpperCAmelCase_ : Union[str, Any] = self.dummy_text_encoder UpperCAmelCase_ : str = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) # make sure here that pndm scheduler skips prk UpperCAmelCase_ : Any = StableDiffusionPipeline( unet=lowerCamelCase_ ,scheduler=lowerCamelCase_ ,vae=lowerCamelCase_ ,text_encoder=lowerCamelCase_ ,tokenizer=lowerCamelCase_ ,safety_checker=lowerCamelCase_ ,feature_extractor=self.dummy_extractor ,) UpperCAmelCase_ : int = sd_pipe.to(lowerCamelCase_ ) sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ ) UpperCAmelCase_ : Optional[Any] = """A painting of a squirrel eating a burger""" UpperCAmelCase_ : Optional[Any] = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 ) UpperCAmelCase_ : Optional[Any] = sd_pipe([prompt] ,generator=lowerCamelCase_ ,guidance_scale=6.0 ,num_inference_steps=2 ,output_type="""np""" ) UpperCAmelCase_ : str = output.images UpperCAmelCase_ : Union[str, Any] = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 ) UpperCAmelCase_ : int = sd_pipe( [prompt] ,generator=lowerCamelCase_ ,guidance_scale=6.0 ,num_inference_steps=2 ,output_type="""np""" ,return_dict=lowerCamelCase_ ,)[0] UpperCAmelCase_ : Dict = image[0, -3:, -3:, -1] UpperCAmelCase_ : List[Any] = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) UpperCAmelCase_ : Tuple = np.array([0.5_1_2_5, 0.5_7_1_6, 0.4_8_2_8, 0.5_0_6_0, 0.5_6_5_0, 0.4_7_6_8, 0.5_1_8_5, 0.4_8_9_5, 0.4_9_9_3] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 def A__ ( self: str ) -> Dict: UpperCAmelCase_ : Any = StableDiffusionPipeline.from_pretrained( """hf-internal-testing/tiny-stable-diffusion-lms-pipe""" ,safety_checker=lowerCamelCase_ ) assert isinstance(lowerCamelCase_ ,lowerCamelCase_ ) assert isinstance(pipe.scheduler ,lowerCamelCase_ ) assert pipe.safety_checker is None UpperCAmelCase_ : List[Any] = pipe("""example prompt""" ,num_inference_steps=2 ).images[0] assert image is not None # check that there's no error when saving a pipeline with one of the models being None with tempfile.TemporaryDirectory() as tmpdirname: pipe.save_pretrained(lowerCamelCase_ ) UpperCAmelCase_ : Any = StableDiffusionPipeline.from_pretrained(lowerCamelCase_ ) # sanity check that the pipeline still works assert pipe.safety_checker is None UpperCAmelCase_ : Optional[int] = pipe("""example prompt""" ,num_inference_steps=2 ).images[0] assert image is not None @unittest.skipIf(torch_device != """cuda""" ,"""This test requires a GPU""" ) def A__ ( self: List[str] ) -> Any: UpperCAmelCase_ : Tuple = self.dummy_cond_unet UpperCAmelCase_ : Dict = PNDMScheduler(skip_prk_steps=lowerCamelCase_ ) UpperCAmelCase_ : List[Any] = self.dummy_vae UpperCAmelCase_ : List[str] = self.dummy_text_encoder UpperCAmelCase_ : Union[str, Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) # put models in fp16 UpperCAmelCase_ : Optional[Any] = unet.half() UpperCAmelCase_ : Optional[int] = vae.half() UpperCAmelCase_ : int = bert.half() # make sure here that pndm scheduler skips prk UpperCAmelCase_ : Any = StableDiffusionPipeline( unet=lowerCamelCase_ ,scheduler=lowerCamelCase_ ,vae=lowerCamelCase_ ,text_encoder=lowerCamelCase_ ,tokenizer=lowerCamelCase_ ,safety_checker=lowerCamelCase_ ,feature_extractor=self.dummy_extractor ,) UpperCAmelCase_ : List[Any] = sd_pipe.to(lowerCamelCase_ ) sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ ) UpperCAmelCase_ : Tuple = """A painting of a squirrel eating a burger""" UpperCAmelCase_ : Optional[int] = sd_pipe([prompt] ,num_inference_steps=2 ,output_type="""np""" ).images assert image.shape == (1, 64, 64, 3) @nightly @require_torch_gpu class _snake_case ( unittest.TestCase ): '''simple docstring''' def A__ ( self: Optional[int] ) -> Optional[Any]: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def A__ ( self: List[str] ) -> List[Any]: UpperCAmelCase_ : Tuple = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" ,safety_checker=lowerCamelCase_ ) UpperCAmelCase_ : Optional[int] = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config ) UpperCAmelCase_ : str = sd_pipe.to(lowerCamelCase_ ) sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ ) UpperCAmelCase_ : str = ( """portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle""" """ coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with""" """ anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and""" """ children from bahnhof zoo, detailed """ ) UpperCAmelCase_ : Optional[int] = 4003660346 UpperCAmelCase_ : int = 7 # without safety guidance (sld_guidance_scale = 0) UpperCAmelCase_ : Dict = torch.manual_seed(lowerCamelCase_ ) UpperCAmelCase_ : List[Any] = sd_pipe( [prompt] ,generator=lowerCamelCase_ ,guidance_scale=lowerCamelCase_ ,num_inference_steps=50 ,output_type="""np""" ,width=512 ,height=512 ,sld_guidance_scale=0 ,) UpperCAmelCase_ : Optional[int] = output.images UpperCAmelCase_ : Union[str, Any] = image[0, -3:, -3:, -1] UpperCAmelCase_ : Dict = [0.2_2_7_8, 0.2_2_3_1, 0.2_2_4_9, 0.2_3_3_3, 0.2_3_0_3, 0.1_8_8_5, 0.2_2_7_3, 0.2_1_4_4, 0.2_1_7_6] assert image.shape == (1, 512, 512, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 # without safety guidance (strong configuration) UpperCAmelCase_ : Union[str, Any] = torch.manual_seed(lowerCamelCase_ ) UpperCAmelCase_ : Any = sd_pipe( [prompt] ,generator=lowerCamelCase_ ,guidance_scale=lowerCamelCase_ ,num_inference_steps=50 ,output_type="""np""" ,width=512 ,height=512 ,sld_guidance_scale=2000 ,sld_warmup_steps=7 ,sld_threshold=0.0_2_5 ,sld_momentum_scale=0.5 ,sld_mom_beta=0.7 ,) UpperCAmelCase_ : Tuple = output.images UpperCAmelCase_ : Union[str, Any] = image[0, -3:, -3:, -1] UpperCAmelCase_ : str = [0.2_3_8_3, 0.2_2_7_6, 0.2_3_6, 0.2_1_9_2, 0.2_1_8_6, 0.2_0_5_3, 0.1_9_7_1, 0.1_9_0_1, 0.1_7_1_9] assert image.shape == (1, 512, 512, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def A__ ( self: Optional[int] ) -> Any: UpperCAmelCase_ : Any = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" ,safety_checker=lowerCamelCase_ ) UpperCAmelCase_ : Any = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config ) UpperCAmelCase_ : str = sd_pipe.to(lowerCamelCase_ ) sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ ) UpperCAmelCase_ : Any = """padme amidala taking a bath artwork, safe for work, no nudity""" UpperCAmelCase_ : List[Any] = 2734971755 UpperCAmelCase_ : Optional[Any] = 7 UpperCAmelCase_ : int = torch.manual_seed(lowerCamelCase_ ) UpperCAmelCase_ : Optional[int] = sd_pipe( [prompt] ,generator=lowerCamelCase_ ,guidance_scale=lowerCamelCase_ ,num_inference_steps=50 ,output_type="""np""" ,width=512 ,height=512 ,sld_guidance_scale=0 ,) UpperCAmelCase_ : Dict = output.images UpperCAmelCase_ : Tuple = image[0, -3:, -3:, -1] UpperCAmelCase_ : Optional[Any] = [0.3_5_0_2, 0.3_6_2_2, 0.3_3_9_6, 0.3_6_4_2, 0.3_4_7_8, 0.3_3_1_8, 0.3_5, 0.3_3_4_8, 0.3_2_9_7] assert image.shape == (1, 512, 512, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 UpperCAmelCase_ : Any = torch.manual_seed(lowerCamelCase_ ) UpperCAmelCase_ : Tuple = sd_pipe( [prompt] ,generator=lowerCamelCase_ ,guidance_scale=lowerCamelCase_ ,num_inference_steps=50 ,output_type="""np""" ,width=512 ,height=512 ,sld_guidance_scale=2000 ,sld_warmup_steps=7 ,sld_threshold=0.0_2_5 ,sld_momentum_scale=0.5 ,sld_mom_beta=0.7 ,) UpperCAmelCase_ : Dict = output.images UpperCAmelCase_ : List[Any] = image[0, -3:, -3:, -1] UpperCAmelCase_ : Tuple = [0.5_5_3_1, 0.5_2_0_6, 0.4_8_9_5, 0.5_1_5_6, 0.5_1_8_2, 0.4_7_5_1, 0.4_8_0_2, 0.4_8_0_3, 0.4_4_4_3] assert image.shape == (1, 512, 512, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def A__ ( self: Union[str, Any] ) -> int: UpperCAmelCase_ : List[Any] = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" ) UpperCAmelCase_ : List[str] = sd_pipe.to(lowerCamelCase_ ) sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ ) UpperCAmelCase_ : Any = ( """the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c.""" """ leyendecker""" ) UpperCAmelCase_ : Optional[Any] = 1044355234 UpperCAmelCase_ : List[str] = 12 UpperCAmelCase_ : List[Any] = torch.manual_seed(lowerCamelCase_ ) UpperCAmelCase_ : List[Any] = sd_pipe( [prompt] ,generator=lowerCamelCase_ ,guidance_scale=lowerCamelCase_ ,num_inference_steps=50 ,output_type="""np""" ,width=512 ,height=512 ,sld_guidance_scale=0 ,) UpperCAmelCase_ : Any = output.images UpperCAmelCase_ : Dict = image[0, -3:, -3:, -1] UpperCAmelCase_ : Optional[Any] = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] ) assert image.shape == (1, 512, 512, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-7 UpperCAmelCase_ : Optional[int] = torch.manual_seed(lowerCamelCase_ ) UpperCAmelCase_ : Optional[Any] = sd_pipe( [prompt] ,generator=lowerCamelCase_ ,guidance_scale=lowerCamelCase_ ,num_inference_steps=50 ,output_type="""np""" ,width=512 ,height=512 ,sld_guidance_scale=2000 ,sld_warmup_steps=7 ,sld_threshold=0.0_2_5 ,sld_momentum_scale=0.5 ,sld_mom_beta=0.7 ,) UpperCAmelCase_ : List[str] = output.images UpperCAmelCase_ : Any = image[0, -3:, -3:, -1] UpperCAmelCase_ : Any = np.array([0.5_8_1_8, 0.6_2_8_5, 0.6_8_3_5, 0.6_0_1_9, 0.6_2_5, 0.6_7_5_4, 0.6_0_9_6, 0.6_3_3_4, 0.6_5_6_1] ) assert image.shape == (1, 512, 512, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
345
1
import warnings from typing import List, Optional, Tuple, Union import numpy as np import PIL import torch from ...models import UNetaDModel from ...schedulers import RePaintScheduler from ...utils import PIL_INTERPOLATION, logging, randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput UpperCamelCase_ = logging.get_logger(__name__) # pylint: disable=invalid-name def lowerCamelCase_ ( _a : Union[List, PIL.Image.Image, torch.Tensor] ): '''simple docstring''' warnings.warn( """The preprocess method is deprecated and will be removed in a future version. Please""" """ use VaeImageProcessor.preprocess instead""" , _a , ) if isinstance(_a , torch.Tensor ): return image elif isinstance(_a , PIL.Image.Image ): UpperCAmelCase_ : List[str] = [image] if isinstance(image[0] , PIL.Image.Image ): UpperCAmelCase_ , UpperCAmelCase_ : Tuple = image[0].size UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8 UpperCAmelCase_ : Optional[Any] = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION["""lanczos"""] ) )[None, :] for i in image] UpperCAmelCase_ : Union[str, Any] = np.concatenate(_a , axis=0 ) UpperCAmelCase_ : str = np.array(_a ).astype(np.floataa ) / 2_5_5.0 UpperCAmelCase_ : Any = image.transpose(0 , 3 , 1 , 2 ) UpperCAmelCase_ : Any = 2.0 * image - 1.0 UpperCAmelCase_ : Tuple = torch.from_numpy(_a ) elif isinstance(image[0] , torch.Tensor ): UpperCAmelCase_ : Tuple = torch.cat(_a , dim=0 ) return image def lowerCamelCase_ ( _a : Union[List, PIL.Image.Image, torch.Tensor] ): '''simple docstring''' if isinstance(_a , torch.Tensor ): return mask elif isinstance(_a , PIL.Image.Image ): UpperCAmelCase_ : Dict = [mask] if isinstance(mask[0] , PIL.Image.Image ): UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = mask[0].size UpperCAmelCase_ , UpperCAmelCase_ : int = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32 UpperCAmelCase_ : Optional[Any] = [np.array(m.convert("""L""" ).resize((w, h) , resample=PIL_INTERPOLATION["""nearest"""] ) )[None, :] for m in mask] UpperCAmelCase_ : Optional[Any] = np.concatenate(_a , axis=0 ) UpperCAmelCase_ : Tuple = mask.astype(np.floataa ) / 2_5_5.0 UpperCAmelCase_ : str = 0 UpperCAmelCase_ : List[str] = 1 UpperCAmelCase_ : Optional[int] = torch.from_numpy(_a ) elif isinstance(mask[0] , torch.Tensor ): UpperCAmelCase_ : List[Any] = torch.cat(_a , dim=0 ) return mask class _snake_case ( __snake_case ): '''simple docstring''' A__ : UNetaDModel A__ : RePaintScheduler def __init__( self: Dict ,lowerCamelCase_: Any ,lowerCamelCase_: str ) -> List[str]: super().__init__() self.register_modules(unet=lowerCamelCase_ ,scheduler=lowerCamelCase_ ) @torch.no_grad() def __call__( self: int ,lowerCamelCase_: Union[torch.Tensor, PIL.Image.Image] ,lowerCamelCase_: Union[torch.Tensor, PIL.Image.Image] ,lowerCamelCase_: int = 250 ,lowerCamelCase_: float = 0.0 ,lowerCamelCase_: int = 10 ,lowerCamelCase_: int = 10 ,lowerCamelCase_: Optional[Union[torch.Generator, List[torch.Generator]]] = None ,lowerCamelCase_: Optional[str] = "pil" ,lowerCamelCase_: bool = True ,) -> Union[ImagePipelineOutput, Tuple]: UpperCAmelCase_ : Any = image UpperCAmelCase_ : Any = _preprocess_image(lowerCamelCase_ ) UpperCAmelCase_ : List[str] = original_image.to(device=self.device ,dtype=self.unet.dtype ) UpperCAmelCase_ : Dict = _preprocess_mask(lowerCamelCase_ ) UpperCAmelCase_ : Optional[Any] = mask_image.to(device=self.device ,dtype=self.unet.dtype ) UpperCAmelCase_ : List[Any] = original_image.shape[0] # sample gaussian noise to begin the loop if isinstance(lowerCamelCase_ ,lowerCamelCase_ ) and len(lowerCamelCase_ ) != batch_size: raise ValueError( F'''You have passed a list of generators of length {len(lowerCamelCase_ )}, but requested an effective batch''' F''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' ) UpperCAmelCase_ : str = original_image.shape UpperCAmelCase_ : Optional[int] = randn_tensor(lowerCamelCase_ ,generator=lowerCamelCase_ ,device=self.device ,dtype=self.unet.dtype ) # set step values self.scheduler.set_timesteps(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,self.device ) UpperCAmelCase_ : Union[str, Any] = eta UpperCAmelCase_ : List[str] = self.scheduler.timesteps[0] + 1 UpperCAmelCase_ : List[str] = generator[0] if isinstance(lowerCamelCase_ ,lowerCamelCase_ ) else generator for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ): if t < t_last: # predict the noise residual UpperCAmelCase_ : List[str] = self.unet(lowerCamelCase_ ,lowerCamelCase_ ).sample # compute previous image: x_t -> x_t-1 UpperCAmelCase_ : Optional[Any] = self.scheduler.step(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ).prev_sample else: # compute the reverse: x_t-1 -> x_t UpperCAmelCase_ : List[Any] = self.scheduler.undo_step(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ) UpperCAmelCase_ : Any = t UpperCAmelCase_ : Dict = (image / 2 + 0.5).clamp(0 ,1 ) UpperCAmelCase_ : Dict = image.cpu().permute(0 ,2 ,3 ,1 ).numpy() if output_type == "pil": UpperCAmelCase_ : Optional[int] = self.numpy_to_pil(lowerCamelCase_ ) if not return_dict: return (image,) return ImagePipelineOutput(images=lowerCamelCase_ )
345
import unittest from transformers import MobileBertConfig, is_torch_available from transformers.models.auto import get_values from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_PRETRAINING_MAPPING, MobileBertForMaskedLM, MobileBertForMultipleChoice, MobileBertForNextSentencePrediction, MobileBertForPreTraining, MobileBertForQuestionAnswering, MobileBertForSequenceClassification, MobileBertForTokenClassification, MobileBertModel, ) class _snake_case : '''simple docstring''' def __init__( self: Optional[int] ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: Tuple=13 ,lowerCamelCase_: int=7 ,lowerCamelCase_: Union[str, Any]=True ,lowerCamelCase_: Dict=True ,lowerCamelCase_: str=True ,lowerCamelCase_: Tuple=True ,lowerCamelCase_: int=99 ,lowerCamelCase_: List[str]=64 ,lowerCamelCase_: Tuple=32 ,lowerCamelCase_: List[str]=5 ,lowerCamelCase_: str=4 ,lowerCamelCase_: str=37 ,lowerCamelCase_: Union[str, Any]="gelu" ,lowerCamelCase_: Union[str, Any]=0.1 ,lowerCamelCase_: str=0.1 ,lowerCamelCase_: List[str]=512 ,lowerCamelCase_: Dict=16 ,lowerCamelCase_: List[str]=2 ,lowerCamelCase_: List[str]=0.0_2 ,lowerCamelCase_: Optional[Any]=3 ,lowerCamelCase_: Union[str, Any]=4 ,lowerCamelCase_: str=None ,) -> List[str]: UpperCAmelCase_ : Any = parent UpperCAmelCase_ : List[Any] = batch_size UpperCAmelCase_ : Union[str, Any] = seq_length UpperCAmelCase_ : Optional[int] = is_training UpperCAmelCase_ : Dict = use_input_mask UpperCAmelCase_ : Any = use_token_type_ids UpperCAmelCase_ : Tuple = use_labels UpperCAmelCase_ : List[Any] = vocab_size UpperCAmelCase_ : str = hidden_size UpperCAmelCase_ : List[str] = embedding_size UpperCAmelCase_ : List[Any] = num_hidden_layers UpperCAmelCase_ : List[Any] = num_attention_heads UpperCAmelCase_ : List[Any] = intermediate_size UpperCAmelCase_ : Tuple = hidden_act UpperCAmelCase_ : str = hidden_dropout_prob UpperCAmelCase_ : List[str] = attention_probs_dropout_prob UpperCAmelCase_ : Any = max_position_embeddings UpperCAmelCase_ : List[str] = type_vocab_size UpperCAmelCase_ : Any = type_sequence_label_size UpperCAmelCase_ : Optional[Any] = initializer_range UpperCAmelCase_ : Optional[int] = num_labels UpperCAmelCase_ : Optional[int] = num_choices UpperCAmelCase_ : List[str] = scope def A__ ( self: Any ) -> Optional[int]: UpperCAmelCase_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size ) UpperCAmelCase_ : List[str] = None if self.use_input_mask: UpperCAmelCase_ : Tuple = random_attention_mask([self.batch_size, self.seq_length] ) UpperCAmelCase_ : Dict = None if self.use_token_type_ids: UpperCAmelCase_ : str = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size ) UpperCAmelCase_ : int = None UpperCAmelCase_ : Union[str, Any] = None UpperCAmelCase_ : Union[str, Any] = None if self.use_labels: UpperCAmelCase_ : List[str] = ids_tensor([self.batch_size] ,self.type_sequence_label_size ) UpperCAmelCase_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels ) UpperCAmelCase_ : int = ids_tensor([self.batch_size] ,self.num_choices ) UpperCAmelCase_ : Tuple = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def A__ ( self: Any ) -> Dict: return MobileBertConfig( vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,embedding_size=self.embedding_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=lowerCamelCase_ ,initializer_range=self.initializer_range ,) def A__ ( self: List[Any] ,lowerCamelCase_: str ,lowerCamelCase_: Optional[int] ,lowerCamelCase_: Any ,lowerCamelCase_: List[Any] ,lowerCamelCase_: List[str] ,lowerCamelCase_: str ,lowerCamelCase_: str ) -> int: UpperCAmelCase_ : Any = MobileBertModel(config=lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() UpperCAmelCase_ : List[Any] = model(lowerCamelCase_ ,attention_mask=lowerCamelCase_ ,token_type_ids=lowerCamelCase_ ) UpperCAmelCase_ : Union[str, Any] = model(lowerCamelCase_ ,token_type_ids=lowerCamelCase_ ) UpperCAmelCase_ : Tuple = model(lowerCamelCase_ ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.hidden_size) ) def A__ ( self: Optional[Any] ,lowerCamelCase_: List[str] ,lowerCamelCase_: List[str] ,lowerCamelCase_: Tuple ,lowerCamelCase_: List[Any] ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: List[Any] ,lowerCamelCase_: Dict ) -> int: UpperCAmelCase_ : Union[str, Any] = MobileBertForMaskedLM(config=lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() UpperCAmelCase_ : Optional[Any] = model(lowerCamelCase_ ,attention_mask=lowerCamelCase_ ,token_type_ids=lowerCamelCase_ ,labels=lowerCamelCase_ ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) ) def A__ ( self: str ,lowerCamelCase_: Any ,lowerCamelCase_: Dict ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: List[str] ,lowerCamelCase_: str ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: int ) -> int: UpperCAmelCase_ : List[Any] = MobileBertForNextSentencePrediction(config=lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() UpperCAmelCase_ : Union[str, Any] = model( lowerCamelCase_ ,attention_mask=lowerCamelCase_ ,token_type_ids=lowerCamelCase_ ,labels=lowerCamelCase_ ,) self.parent.assertEqual(result.logits.shape ,(self.batch_size, 2) ) def A__ ( self: Tuple ,lowerCamelCase_: Tuple ,lowerCamelCase_: Dict ,lowerCamelCase_: List[str] ,lowerCamelCase_: Tuple ,lowerCamelCase_: Tuple ,lowerCamelCase_: Dict ,lowerCamelCase_: Any ) -> Optional[Any]: UpperCAmelCase_ : Tuple = MobileBertForPreTraining(config=lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() UpperCAmelCase_ : Optional[int] = model( lowerCamelCase_ ,attention_mask=lowerCamelCase_ ,token_type_ids=lowerCamelCase_ ,labels=lowerCamelCase_ ,next_sentence_label=lowerCamelCase_ ,) self.parent.assertEqual(result.prediction_logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertEqual(result.seq_relationship_logits.shape ,(self.batch_size, 2) ) def A__ ( self: Any ,lowerCamelCase_: Optional[int] ,lowerCamelCase_: Any ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: List[str] ,lowerCamelCase_: Any ,lowerCamelCase_: int ,lowerCamelCase_: List[Any] ) -> List[str]: UpperCAmelCase_ : Optional[Any] = MobileBertForQuestionAnswering(config=lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() UpperCAmelCase_ : int = model( lowerCamelCase_ ,attention_mask=lowerCamelCase_ ,token_type_ids=lowerCamelCase_ ,start_positions=lowerCamelCase_ ,end_positions=lowerCamelCase_ ,) self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) ) def A__ ( self: List[str] ,lowerCamelCase_: List[Any] ,lowerCamelCase_: Tuple ,lowerCamelCase_: Any ,lowerCamelCase_: Tuple ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: Any ) -> str: UpperCAmelCase_ : Optional[Any] = self.num_labels UpperCAmelCase_ : Union[str, Any] = MobileBertForSequenceClassification(lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() UpperCAmelCase_ : Optional[int] = model(lowerCamelCase_ ,attention_mask=lowerCamelCase_ ,token_type_ids=lowerCamelCase_ ,labels=lowerCamelCase_ ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) ) def A__ ( self: Union[str, Any] ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: str ,lowerCamelCase_: Dict ,lowerCamelCase_: Any ,lowerCamelCase_: List[str] ) -> Any: UpperCAmelCase_ : str = self.num_labels UpperCAmelCase_ : Optional[int] = MobileBertForTokenClassification(config=lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() UpperCAmelCase_ : List[Any] = model(lowerCamelCase_ ,attention_mask=lowerCamelCase_ ,token_type_ids=lowerCamelCase_ ,labels=lowerCamelCase_ ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) ) def A__ ( self: Tuple ,lowerCamelCase_: str ,lowerCamelCase_: int ,lowerCamelCase_: Tuple ,lowerCamelCase_: List[Any] ,lowerCamelCase_: str ,lowerCamelCase_: Optional[int] ,lowerCamelCase_: List[Any] ) -> Union[str, Any]: UpperCAmelCase_ : Union[str, Any] = self.num_choices UpperCAmelCase_ : Tuple = MobileBertForMultipleChoice(config=lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() UpperCAmelCase_ : Dict = input_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous() UpperCAmelCase_ : Union[str, Any] = token_type_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous() UpperCAmelCase_ : str = input_mask.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous() UpperCAmelCase_ : Optional[int] = model( lowerCamelCase_ ,attention_mask=lowerCamelCase_ ,token_type_ids=lowerCamelCase_ ,labels=lowerCamelCase_ ,) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) ) def A__ ( self: List[str] ) -> str: UpperCAmelCase_ : str = self.prepare_config_and_inputs() ( ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ) : Union[str, Any] = config_and_inputs UpperCAmelCase_ : Dict = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class _snake_case ( __snake_case , __snake_case , unittest.TestCase ): '''simple docstring''' A__ : Dict = ( ( MobileBertModel, MobileBertForMaskedLM, MobileBertForMultipleChoice, MobileBertForNextSentencePrediction, MobileBertForPreTraining, MobileBertForQuestionAnswering, MobileBertForSequenceClassification, MobileBertForTokenClassification, ) if is_torch_available() else () ) A__ : List[str] = ( { "feature-extraction": MobileBertModel, "fill-mask": MobileBertForMaskedLM, "question-answering": MobileBertForQuestionAnswering, "text-classification": MobileBertForSequenceClassification, "token-classification": MobileBertForTokenClassification, "zero-shot": MobileBertForSequenceClassification, } if is_torch_available() else {} ) A__ : List[str] = True def A__ ( self: Dict ,lowerCamelCase_: Tuple ,lowerCamelCase_: Tuple ,lowerCamelCase_: int=False ) -> Union[str, Any]: UpperCAmelCase_ : List[Any] = super()._prepare_for_class(lowerCamelCase_ ,lowerCamelCase_ ,return_labels=lowerCamelCase_ ) if return_labels: if model_class in get_values(lowerCamelCase_ ): UpperCAmelCase_ : Any = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length) ,dtype=torch.long ,device=lowerCamelCase_ ) UpperCAmelCase_ : List[str] = torch.zeros( self.model_tester.batch_size ,dtype=torch.long ,device=lowerCamelCase_ ) return inputs_dict def A__ ( self: List[str] ) -> Any: UpperCAmelCase_ : List[str] = MobileBertModelTester(self ) UpperCAmelCase_ : Union[str, Any] = ConfigTester(self ,config_class=lowerCamelCase_ ,hidden_size=37 ) def A__ ( self: Optional[Any] ) -> List[Any]: self.config_tester.run_common_tests() def A__ ( self: List[str] ) -> Optional[Any]: UpperCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_model(*lowerCamelCase_ ) def A__ ( self: Optional[int] ) -> Optional[int]: UpperCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_masked_lm(*lowerCamelCase_ ) def A__ ( self: Optional[Any] ) -> Tuple: UpperCAmelCase_ : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_multiple_choice(*lowerCamelCase_ ) def A__ ( self: List[Any] ) -> List[str]: UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*lowerCamelCase_ ) def A__ ( self: Optional[Any] ) -> Dict: UpperCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_pretraining(*lowerCamelCase_ ) def A__ ( self: Optional[int] ) -> Optional[int]: UpperCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_question_answering(*lowerCamelCase_ ) def A__ ( self: Union[str, Any] ) -> Optional[int]: UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_sequence_classification(*lowerCamelCase_ ) def A__ ( self: Any ) -> Optional[int]: UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_token_classification(*lowerCamelCase_ ) def lowerCamelCase_ ( _a : Union[str, Any] ): '''simple docstring''' return torch.tensor( _a , dtype=torch.long , device=_a , ) UpperCamelCase_ = 1E-3 @require_torch @require_sentencepiece @require_tokenizers class _snake_case ( unittest.TestCase ): '''simple docstring''' @slow def A__ ( self: List[Any] ) -> str: UpperCAmelCase_ : Any = MobileBertModel.from_pretrained("""google/mobilebert-uncased""" ).to(lowerCamelCase_ ) UpperCAmelCase_ : str = _long_tensor([[101, 7110, 1005, 1056, 2023, 11333, 17413, 1029, 102]] ) with torch.no_grad(): UpperCAmelCase_ : Union[str, Any] = model(lowerCamelCase_ )[0] UpperCAmelCase_ : Union[str, Any] = torch.Size((1, 9, 512) ) self.assertEqual(output.shape ,lowerCamelCase_ ) UpperCAmelCase_ : Tuple = torch.tensor( [ [ [-2.473_6526e07, 8.269_1656e04, 1.652_1838e05], [-5.754_1704e-01, 3.905_6022e00, 4.401_1507e00], [2.604_7359e00, 1.567_7652e00, -1.732_4188e-01], ] ] ,device=lowerCamelCase_ ,) # MobileBERT results range from 10e0 to 10e8. Even a 0.0000001% difference with a value of 10e8 results in a # ~1 difference, it's therefore not a good idea to measure using addition. # Here, we instead divide the expected result with the result in order to obtain ~1. We then check that the # result is held between bounds: 1 - TOLERANCE < expected_result / result < 1 + TOLERANCE UpperCAmelCase_ : Dict = torch.all((expected_slice / output[..., :3, :3]) >= 1 - TOLERANCE ) UpperCAmelCase_ : Dict = torch.all((expected_slice / output[..., :3, :3]) <= 1 + TOLERANCE ) self.assertTrue(lower_bound and upper_bound )
345
1
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os from accelerate.test_utils import execute_subprocess_async def lowerCamelCase_ ( _a : str=None ): '''simple docstring''' if subparsers is not None: UpperCAmelCase_ : Union[str, Any] = subparsers.add_parser("""test""" ) else: UpperCAmelCase_ : Union[str, Any] = argparse.ArgumentParser("""Accelerate test command""" ) parser.add_argument( """--config_file""" , default=_a , help=( """The path to use to store the config file. Will default to a file named default_config.yaml in the cache """ """location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have """ """such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed """ """with 'huggingface'.""" ) , ) if subparsers is not None: parser.set_defaults(func=_a ) return parser def lowerCamelCase_ ( _a : List[Any] ): '''simple docstring''' UpperCAmelCase_ : Any = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ["""test_utils""", """scripts""", """test_script.py"""] ) if args.config_file is None: UpperCAmelCase_ : Union[str, Any] = script_name else: UpperCAmelCase_ : Optional[int] = F'''--config_file={args.config_file} {script_name}''' UpperCAmelCase_ : Optional[int] = ["""accelerate-launch"""] + test_args.split() UpperCAmelCase_ : Union[str, Any] = execute_subprocess_async(_a , env=os.environ.copy() ) if result.returncode == 0: print("""Test is a success! You are ready for your distributed training!""" ) def lowerCamelCase_ ( ): '''simple docstring''' UpperCAmelCase_ : Tuple = test_command_parser() UpperCAmelCase_ : Union[str, Any] = parser.parse_args() test_command(_a ) if __name__ == "__main__": main()
345
import os import shutil import tempfile import unittest import numpy as np from transformers import AutoTokenizer, BarkProcessor from transformers.testing_utils import require_torch, slow @require_torch class _snake_case ( unittest.TestCase ): '''simple docstring''' def A__ ( self: str ) -> int: UpperCAmelCase_ : List[Any] = """ylacombe/bark-small""" UpperCAmelCase_ : Tuple = tempfile.mkdtemp() UpperCAmelCase_ : Union[str, Any] = """en_speaker_1""" UpperCAmelCase_ : Optional[Any] = """This is a test string""" UpperCAmelCase_ : int = """speaker_embeddings_path.json""" UpperCAmelCase_ : Any = """speaker_embeddings""" def A__ ( self: Tuple ,**lowerCamelCase_: List[str] ) -> List[Any]: return AutoTokenizer.from_pretrained(self.checkpoint ,**lowerCamelCase_ ) def A__ ( self: str ) -> Union[str, Any]: shutil.rmtree(self.tmpdirname ) def A__ ( self: List[Any] ) -> int: UpperCAmelCase_ : int = self.get_tokenizer() UpperCAmelCase_ : Tuple = BarkProcessor(tokenizer=lowerCamelCase_ ) processor.save_pretrained(self.tmpdirname ) UpperCAmelCase_ : Optional[int] = BarkProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer.get_vocab() ) @slow def A__ ( self: List[Any] ) -> Optional[int]: UpperCAmelCase_ : List[Any] = BarkProcessor.from_pretrained( pretrained_processor_name_or_path=self.checkpoint ,speaker_embeddings_dict_path=self.speaker_embeddings_dict_path ,) processor.save_pretrained( self.tmpdirname ,speaker_embeddings_dict_path=self.speaker_embeddings_dict_path ,speaker_embeddings_directory=self.speaker_embeddings_directory ,) UpperCAmelCase_ : Optional[Any] = self.get_tokenizer(bos_token="""(BOS)""" ,eos_token="""(EOS)""" ) UpperCAmelCase_ : List[Any] = BarkProcessor.from_pretrained( self.tmpdirname ,self.speaker_embeddings_dict_path ,bos_token="""(BOS)""" ,eos_token="""(EOS)""" ,) self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer_add_kwargs.get_vocab() ) def A__ ( self: List[str] ) -> Optional[Any]: UpperCAmelCase_ : Any = BarkProcessor.from_pretrained( pretrained_processor_name_or_path=self.checkpoint ,speaker_embeddings_dict_path=self.speaker_embeddings_dict_path ,) UpperCAmelCase_ : Optional[int] = 35 UpperCAmelCase_ : Optional[int] = 2 UpperCAmelCase_ : Dict = 8 UpperCAmelCase_ : Optional[int] = { """semantic_prompt""": np.ones(lowerCamelCase_ ), """coarse_prompt""": np.ones((nb_codebooks_coarse, seq_len) ), """fine_prompt""": np.ones((nb_codebooks_total, seq_len) ), } # test providing already loaded voice_preset UpperCAmelCase_ : str = processor(text=self.input_string ,voice_preset=lowerCamelCase_ ) UpperCAmelCase_ : Optional[int] = inputs["""history_prompt"""] for key in voice_preset: self.assertListEqual(voice_preset[key].tolist() ,processed_voice_preset.get(lowerCamelCase_ ,np.array([] ) ).tolist() ) # test loading voice preset from npz file UpperCAmelCase_ : List[Any] = os.path.join(self.tmpdirname ,"""file.npz""" ) np.savez(lowerCamelCase_ ,**lowerCamelCase_ ) UpperCAmelCase_ : Optional[Any] = processor(text=self.input_string ,voice_preset=lowerCamelCase_ ) UpperCAmelCase_ : int = inputs["""history_prompt"""] for key in voice_preset: self.assertListEqual(voice_preset[key].tolist() ,processed_voice_preset.get(lowerCamelCase_ ,np.array([] ) ).tolist() ) # test loading voice preset from the hub UpperCAmelCase_ : Union[str, Any] = processor(text=self.input_string ,voice_preset=self.voice_preset ) def A__ ( self: Dict ) -> Tuple: UpperCAmelCase_ : Any = self.get_tokenizer() UpperCAmelCase_ : Dict = BarkProcessor(tokenizer=lowerCamelCase_ ) UpperCAmelCase_ : Optional[Any] = processor(text=self.input_string ) UpperCAmelCase_ : str = tokenizer( self.input_string ,padding="""max_length""" ,max_length=256 ,add_special_tokens=lowerCamelCase_ ,return_attention_mask=lowerCamelCase_ ,return_token_type_ids=lowerCamelCase_ ,) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] ,encoded_processor[key].squeeze().tolist() )
345
1
import argparse import io import requests import torch from omegaconf import OmegaConf from diffusers import AutoencoderKL from diffusers.pipelines.stable_diffusion.convert_from_ckpt import ( assign_to_checkpoint, conv_attn_to_linear, create_vae_diffusers_config, renew_vae_attention_paths, renew_vae_resnet_paths, ) def lowerCamelCase_ ( _a : str , _a : Dict ): '''simple docstring''' UpperCAmelCase_ : str = checkpoint UpperCAmelCase_ : Any = {} UpperCAmelCase_ : Optional[Any] = vae_state_dict["""encoder.conv_in.weight"""] UpperCAmelCase_ : Optional[Any] = vae_state_dict["""encoder.conv_in.bias"""] UpperCAmelCase_ : Union[str, Any] = vae_state_dict["""encoder.conv_out.weight"""] UpperCAmelCase_ : List[Any] = vae_state_dict["""encoder.conv_out.bias"""] UpperCAmelCase_ : Tuple = vae_state_dict["""encoder.norm_out.weight"""] UpperCAmelCase_ : Tuple = vae_state_dict["""encoder.norm_out.bias"""] UpperCAmelCase_ : Optional[int] = vae_state_dict["""decoder.conv_in.weight"""] UpperCAmelCase_ : Optional[Any] = vae_state_dict["""decoder.conv_in.bias"""] UpperCAmelCase_ : Tuple = vae_state_dict["""decoder.conv_out.weight"""] UpperCAmelCase_ : Tuple = vae_state_dict["""decoder.conv_out.bias"""] UpperCAmelCase_ : Optional[Any] = vae_state_dict["""decoder.norm_out.weight"""] UpperCAmelCase_ : str = vae_state_dict["""decoder.norm_out.bias"""] UpperCAmelCase_ : Dict = vae_state_dict["""quant_conv.weight"""] UpperCAmelCase_ : Union[str, Any] = vae_state_dict["""quant_conv.bias"""] UpperCAmelCase_ : Tuple = vae_state_dict["""post_quant_conv.weight"""] UpperCAmelCase_ : List[str] = vae_state_dict["""post_quant_conv.bias"""] # Retrieves the keys for the encoder down blocks only UpperCAmelCase_ : str = len({""".""".join(layer.split(""".""" )[:3] ) for layer in vae_state_dict if """encoder.down""" in layer} ) UpperCAmelCase_ : List[str] = { layer_id: [key for key in vae_state_dict if F'''down.{layer_id}''' in key] for layer_id in range(_a ) } # Retrieves the keys for the decoder up blocks only UpperCAmelCase_ : str = len({""".""".join(layer.split(""".""" )[:3] ) for layer in vae_state_dict if """decoder.up""" in layer} ) UpperCAmelCase_ : List[str] = { layer_id: [key for key in vae_state_dict if F'''up.{layer_id}''' in key] for layer_id in range(_a ) } for i in range(_a ): UpperCAmelCase_ : Optional[int] = [key for key in down_blocks[i] if F'''down.{i}''' in key and F'''down.{i}.downsample''' not in key] if F'''encoder.down.{i}.downsample.conv.weight''' in vae_state_dict: UpperCAmelCase_ : Optional[Any] = vae_state_dict.pop( F'''encoder.down.{i}.downsample.conv.weight''' ) UpperCAmelCase_ : Any = vae_state_dict.pop( F'''encoder.down.{i}.downsample.conv.bias''' ) UpperCAmelCase_ : Optional[Any] = renew_vae_resnet_paths(_a ) UpperCAmelCase_ : Tuple = {"""old""": F'''down.{i}.block''', """new""": F'''down_blocks.{i}.resnets'''} assign_to_checkpoint(_a , _a , _a , additional_replacements=[meta_path] , config=_a ) UpperCAmelCase_ : List[Any] = [key for key in vae_state_dict if """encoder.mid.block""" in key] UpperCAmelCase_ : Tuple = 2 for i in range(1 , num_mid_res_blocks + 1 ): UpperCAmelCase_ : Any = [key for key in mid_resnets if F'''encoder.mid.block_{i}''' in key] UpperCAmelCase_ : Union[str, Any] = renew_vae_resnet_paths(_a ) UpperCAmelCase_ : Any = {"""old""": F'''mid.block_{i}''', """new""": F'''mid_block.resnets.{i - 1}'''} assign_to_checkpoint(_a , _a , _a , additional_replacements=[meta_path] , config=_a ) UpperCAmelCase_ : int = [key for key in vae_state_dict if """encoder.mid.attn""" in key] UpperCAmelCase_ : str = renew_vae_attention_paths(_a ) UpperCAmelCase_ : str = {"""old""": """mid.attn_1""", """new""": """mid_block.attentions.0"""} assign_to_checkpoint(_a , _a , _a , additional_replacements=[meta_path] , config=_a ) conv_attn_to_linear(_a ) for i in range(_a ): UpperCAmelCase_ : Optional[Any] = num_up_blocks - 1 - i UpperCAmelCase_ : List[Any] = [ key for key in up_blocks[block_id] if F'''up.{block_id}''' in key and F'''up.{block_id}.upsample''' not in key ] if F'''decoder.up.{block_id}.upsample.conv.weight''' in vae_state_dict: UpperCAmelCase_ : Optional[int] = vae_state_dict[ F'''decoder.up.{block_id}.upsample.conv.weight''' ] UpperCAmelCase_ : Optional[Any] = vae_state_dict[ F'''decoder.up.{block_id}.upsample.conv.bias''' ] UpperCAmelCase_ : str = renew_vae_resnet_paths(_a ) UpperCAmelCase_ : Optional[int] = {"""old""": F'''up.{block_id}.block''', """new""": F'''up_blocks.{i}.resnets'''} assign_to_checkpoint(_a , _a , _a , additional_replacements=[meta_path] , config=_a ) UpperCAmelCase_ : Dict = [key for key in vae_state_dict if """decoder.mid.block""" in key] UpperCAmelCase_ : Dict = 2 for i in range(1 , num_mid_res_blocks + 1 ): UpperCAmelCase_ : int = [key for key in mid_resnets if F'''decoder.mid.block_{i}''' in key] UpperCAmelCase_ : Union[str, Any] = renew_vae_resnet_paths(_a ) UpperCAmelCase_ : int = {"""old""": F'''mid.block_{i}''', """new""": F'''mid_block.resnets.{i - 1}'''} assign_to_checkpoint(_a , _a , _a , additional_replacements=[meta_path] , config=_a ) UpperCAmelCase_ : Union[str, Any] = [key for key in vae_state_dict if """decoder.mid.attn""" in key] UpperCAmelCase_ : Dict = renew_vae_attention_paths(_a ) UpperCAmelCase_ : str = {"""old""": """mid.attn_1""", """new""": """mid_block.attentions.0"""} assign_to_checkpoint(_a , _a , _a , additional_replacements=[meta_path] , config=_a ) conv_attn_to_linear(_a ) return new_checkpoint def lowerCamelCase_ ( _a : str , _a : str , ): '''simple docstring''' UpperCAmelCase_ : List[str] = requests.get( """ https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml""" ) UpperCAmelCase_ : Union[str, Any] = io.BytesIO(r.content ) UpperCAmelCase_ : str = OmegaConf.load(_a ) UpperCAmelCase_ : Union[str, Any] = 512 UpperCAmelCase_ : List[str] = """cuda""" if torch.cuda.is_available() else """cpu""" if checkpoint_path.endswith("""safetensors""" ): from safetensors import safe_open UpperCAmelCase_ : int = {} with safe_open(_a , framework="""pt""" , device="""cpu""" ) as f: for key in f.keys(): UpperCAmelCase_ : Tuple = f.get_tensor(_a ) else: UpperCAmelCase_ : Dict = torch.load(_a , map_location=_a )["""state_dict"""] # Convert the VAE model. UpperCAmelCase_ : Optional[int] = create_vae_diffusers_config(_a , image_size=_a ) UpperCAmelCase_ : List[Any] = custom_convert_ldm_vae_checkpoint(_a , _a ) UpperCAmelCase_ : Any = AutoencoderKL(**_a ) vae.load_state_dict(_a ) vae.save_pretrained(_a ) if __name__ == "__main__": UpperCamelCase_ = argparse.ArgumentParser() parser.add_argument('''--vae_pt_path''', default=None, type=str, required=True, help='''Path to the VAE.pt to convert.''') parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the VAE.pt to convert.''') UpperCamelCase_ = parser.parse_args() vae_pt_to_vae_diffuser(args.vae_pt_path, args.dump_path)
345
import unittest from queue import Empty from threading import Thread from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available from transformers.testing_utils import CaptureStdout, require_torch, torch_device from ..test_modeling_common import ids_tensor if is_torch_available(): import torch from transformers import AutoModelForCausalLM @require_torch class _snake_case ( unittest.TestCase ): '''simple docstring''' def A__ ( self: Optional[int] ) -> Any: UpperCAmelCase_ : List[str] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ) UpperCAmelCase_ : Union[str, Any] = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(lowerCamelCase_ ) UpperCAmelCase_ : str = -1 UpperCAmelCase_ : Dict = ids_tensor((1, 5) ,vocab_size=model.config.vocab_size ).to(lowerCamelCase_ ) UpperCAmelCase_ : Union[str, Any] = model.generate(lowerCamelCase_ ,max_new_tokens=10 ,do_sample=lowerCamelCase_ ) UpperCAmelCase_ : Any = tokenizer.decode(greedy_ids[0] ) with CaptureStdout() as cs: UpperCAmelCase_ : List[Any] = TextStreamer(lowerCamelCase_ ) model.generate(lowerCamelCase_ ,max_new_tokens=10 ,do_sample=lowerCamelCase_ ,streamer=lowerCamelCase_ ) # The greedy text should be printed to stdout, except for the final "\n" in the streamer UpperCAmelCase_ : Optional[int] = cs.out[:-1] self.assertEqual(lowerCamelCase_ ,lowerCamelCase_ ) def A__ ( self: Dict ) -> Optional[Any]: UpperCAmelCase_ : str = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ) UpperCAmelCase_ : Optional[Any] = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(lowerCamelCase_ ) UpperCAmelCase_ : Optional[int] = -1 UpperCAmelCase_ : List[Any] = ids_tensor((1, 5) ,vocab_size=model.config.vocab_size ).to(lowerCamelCase_ ) UpperCAmelCase_ : List[str] = model.generate(lowerCamelCase_ ,max_new_tokens=10 ,do_sample=lowerCamelCase_ ) UpperCAmelCase_ : Dict = tokenizer.decode(greedy_ids[0] ) UpperCAmelCase_ : str = TextIteratorStreamer(lowerCamelCase_ ) UpperCAmelCase_ : Optional[int] = {"""input_ids""": input_ids, """max_new_tokens""": 10, """do_sample""": False, """streamer""": streamer} UpperCAmelCase_ : str = Thread(target=model.generate ,kwargs=lowerCamelCase_ ) thread.start() UpperCAmelCase_ : int = """""" for new_text in streamer: streamer_text += new_text self.assertEqual(lowerCamelCase_ ,lowerCamelCase_ ) def A__ ( self: List[Any] ) -> Dict: UpperCAmelCase_ : List[Any] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ) UpperCAmelCase_ : Optional[Any] = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(lowerCamelCase_ ) UpperCAmelCase_ : Optional[int] = -1 UpperCAmelCase_ : Tuple = ids_tensor((1, 5) ,vocab_size=model.config.vocab_size ).to(lowerCamelCase_ ) UpperCAmelCase_ : Dict = model.generate(lowerCamelCase_ ,max_new_tokens=10 ,do_sample=lowerCamelCase_ ) UpperCAmelCase_ : str = greedy_ids[:, input_ids.shape[1] :] UpperCAmelCase_ : Dict = tokenizer.decode(new_greedy_ids[0] ) with CaptureStdout() as cs: UpperCAmelCase_ : List[Any] = TextStreamer(lowerCamelCase_ ,skip_prompt=lowerCamelCase_ ) model.generate(lowerCamelCase_ ,max_new_tokens=10 ,do_sample=lowerCamelCase_ ,streamer=lowerCamelCase_ ) # The greedy text should be printed to stdout, except for the final "\n" in the streamer UpperCAmelCase_ : List[str] = cs.out[:-1] self.assertEqual(lowerCamelCase_ ,lowerCamelCase_ ) def A__ ( self: str ) -> str: # Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested # with actual models -- the dummy models' tokenizers are not aligned with their models, and # `skip_special_tokens=True` has no effect on them UpperCAmelCase_ : Union[str, Any] = AutoTokenizer.from_pretrained("""distilgpt2""" ) UpperCAmelCase_ : Optional[Any] = AutoModelForCausalLM.from_pretrained("""distilgpt2""" ).to(lowerCamelCase_ ) UpperCAmelCase_ : Any = -1 UpperCAmelCase_ : Union[str, Any] = torch.ones((1, 5) ,device=lowerCamelCase_ ).long() * model.config.bos_token_id with CaptureStdout() as cs: UpperCAmelCase_ : Union[str, Any] = TextStreamer(lowerCamelCase_ ,skip_special_tokens=lowerCamelCase_ ) model.generate(lowerCamelCase_ ,max_new_tokens=1 ,do_sample=lowerCamelCase_ ,streamer=lowerCamelCase_ ) # The prompt contains a special token, so the streamer should not print it. As such, the output text, when # re-tokenized, must only contain one token UpperCAmelCase_ : List[str] = cs.out[:-1] # Remove the final "\n" UpperCAmelCase_ : Dict = tokenizer(lowerCamelCase_ ,return_tensors="""pt""" ) self.assertEqual(streamer_text_tokenized.input_ids.shape ,(1, 1) ) def A__ ( self: List[str] ) -> Any: UpperCAmelCase_ : List[Any] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ) UpperCAmelCase_ : Any = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(lowerCamelCase_ ) UpperCAmelCase_ : List[str] = -1 UpperCAmelCase_ : Optional[Any] = ids_tensor((1, 5) ,vocab_size=model.config.vocab_size ).to(lowerCamelCase_ ) UpperCAmelCase_ : Optional[int] = TextIteratorStreamer(lowerCamelCase_ ,timeout=0.0_0_1 ) UpperCAmelCase_ : Any = {"""input_ids""": input_ids, """max_new_tokens""": 10, """do_sample""": False, """streamer""": streamer} UpperCAmelCase_ : Dict = Thread(target=model.generate ,kwargs=lowerCamelCase_ ) thread.start() # The streamer will timeout after 0.001 seconds, so an exception will be raised with self.assertRaises(lowerCamelCase_ ): UpperCAmelCase_ : Union[str, Any] = """""" for new_text in streamer: streamer_text += new_text
345
1
from argparse import ArgumentParser, Namespace from ..utils import logging from . import BaseTransformersCLICommand def lowerCamelCase_ ( _a : Namespace ): '''simple docstring''' return ConvertCommand( args.model_type , args.tf_checkpoint , args.pytorch_dump_output , args.config , args.finetuning_task_name ) UpperCamelCase_ = ''' transformers can only be used from the commandline to convert TensorFlow models in PyTorch, In that case, it requires TensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions. ''' class _snake_case ( __snake_case ): '''simple docstring''' @staticmethod def A__ ( lowerCamelCase_: ArgumentParser ) -> Optional[Any]: UpperCAmelCase_ : str = parser.add_parser( """convert""" ,help="""CLI tool to run convert model from original author checkpoints to Transformers PyTorch checkpoints.""" ,) train_parser.add_argument("""--model_type""" ,type=lowerCamelCase_ ,required=lowerCamelCase_ ,help="""Model's type.""" ) train_parser.add_argument( """--tf_checkpoint""" ,type=lowerCamelCase_ ,required=lowerCamelCase_ ,help="""TensorFlow checkpoint path or folder.""" ) train_parser.add_argument( """--pytorch_dump_output""" ,type=lowerCamelCase_ ,required=lowerCamelCase_ ,help="""Path to the PyTorch saved model output.""" ) train_parser.add_argument("""--config""" ,type=lowerCamelCase_ ,default="""""" ,help="""Configuration file path or folder.""" ) train_parser.add_argument( """--finetuning_task_name""" ,type=lowerCamelCase_ ,default=lowerCamelCase_ ,help="""Optional fine-tuning task name if the TF model was a finetuned model.""" ,) train_parser.set_defaults(func=lowerCamelCase_ ) def __init__( self: List[str] ,lowerCamelCase_: str ,lowerCamelCase_: str ,lowerCamelCase_: str ,lowerCamelCase_: str ,lowerCamelCase_: str ,*lowerCamelCase_: Dict ,) -> List[Any]: UpperCAmelCase_ : Any = logging.get_logger("""transformers-cli/converting""" ) self._logger.info(F'''Loading model {model_type}''' ) UpperCAmelCase_ : Tuple = model_type UpperCAmelCase_ : str = tf_checkpoint UpperCAmelCase_ : Optional[Any] = pytorch_dump_output UpperCAmelCase_ : Union[str, Any] = config UpperCAmelCase_ : int = finetuning_task_name def A__ ( self: Union[str, Any] ) -> Union[str, Any]: if self._model_type == "albert": try: from ..models.albert.convert_albert_original_tf_checkpoint_to_pytorch import ( convert_tf_checkpoint_to_pytorch, ) except ImportError: raise ImportError(lowerCamelCase_ ) convert_tf_checkpoint_to_pytorch(self._tf_checkpoint ,self._config ,self._pytorch_dump_output ) elif self._model_type == "bert": try: from ..models.bert.convert_bert_original_tf_checkpoint_to_pytorch import ( convert_tf_checkpoint_to_pytorch, ) except ImportError: raise ImportError(lowerCamelCase_ ) convert_tf_checkpoint_to_pytorch(self._tf_checkpoint ,self._config ,self._pytorch_dump_output ) elif self._model_type == "funnel": try: from ..models.funnel.convert_funnel_original_tf_checkpoint_to_pytorch import ( convert_tf_checkpoint_to_pytorch, ) except ImportError: raise ImportError(lowerCamelCase_ ) convert_tf_checkpoint_to_pytorch(self._tf_checkpoint ,self._config ,self._pytorch_dump_output ) elif self._model_type == "t5": try: from ..models.ta.convert_ta_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch except ImportError: raise ImportError(lowerCamelCase_ ) convert_tf_checkpoint_to_pytorch(self._tf_checkpoint ,self._config ,self._pytorch_dump_output ) elif self._model_type == "gpt": from ..models.openai.convert_openai_original_tf_checkpoint_to_pytorch import ( convert_openai_checkpoint_to_pytorch, ) convert_openai_checkpoint_to_pytorch(self._tf_checkpoint ,self._config ,self._pytorch_dump_output ) elif self._model_type == "transfo_xl": try: from ..models.transfo_xl.convert_transfo_xl_original_tf_checkpoint_to_pytorch import ( convert_transfo_xl_checkpoint_to_pytorch, ) except ImportError: raise ImportError(lowerCamelCase_ ) if "ckpt" in self._tf_checkpoint.lower(): UpperCAmelCase_ : Tuple = self._tf_checkpoint UpperCAmelCase_ : List[str] = """""" else: UpperCAmelCase_ : Dict = self._tf_checkpoint UpperCAmelCase_ : int = """""" convert_transfo_xl_checkpoint_to_pytorch( lowerCamelCase_ ,self._config ,self._pytorch_dump_output ,lowerCamelCase_ ) elif self._model_type == "gpt2": try: from ..models.gpta.convert_gpta_original_tf_checkpoint_to_pytorch import ( convert_gpta_checkpoint_to_pytorch, ) except ImportError: raise ImportError(lowerCamelCase_ ) convert_gpta_checkpoint_to_pytorch(self._tf_checkpoint ,self._config ,self._pytorch_dump_output ) elif self._model_type == "xlnet": try: from ..models.xlnet.convert_xlnet_original_tf_checkpoint_to_pytorch import ( convert_xlnet_checkpoint_to_pytorch, ) except ImportError: raise ImportError(lowerCamelCase_ ) convert_xlnet_checkpoint_to_pytorch( self._tf_checkpoint ,self._config ,self._pytorch_dump_output ,self._finetuning_task_name ) elif self._model_type == "xlm": from ..models.xlm.convert_xlm_original_pytorch_checkpoint_to_pytorch import ( convert_xlm_checkpoint_to_pytorch, ) convert_xlm_checkpoint_to_pytorch(self._tf_checkpoint ,self._pytorch_dump_output ) elif self._model_type == "lxmert": from ..models.lxmert.convert_lxmert_original_tf_checkpoint_to_pytorch import ( convert_lxmert_checkpoint_to_pytorch, ) convert_lxmert_checkpoint_to_pytorch(self._tf_checkpoint ,self._pytorch_dump_output ) elif self._model_type == "rembert": from ..models.rembert.convert_rembert_tf_checkpoint_to_pytorch import ( convert_rembert_tf_checkpoint_to_pytorch, ) convert_rembert_tf_checkpoint_to_pytorch(self._tf_checkpoint ,self._config ,self._pytorch_dump_output ) else: raise ValueError( """--model_type should be selected in the list [bert, gpt, gpt2, t5, transfo_xl, xlnet, xlm, lxmert]""" )
345
import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel from diffusers import DDIMScheduler, LDMPipeline, UNetaDModel, VQModel from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device enable_full_determinism() class _snake_case ( unittest.TestCase ): '''simple docstring''' @property def A__ ( self: Optional[int] ) -> int: torch.manual_seed(0 ) UpperCAmelCase_ : Union[str, Any] = UNetaDModel( block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=3 ,out_channels=3 ,down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") ,up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") ,) return model @property def A__ ( self: Tuple ) -> Optional[Any]: torch.manual_seed(0 ) UpperCAmelCase_ : List[str] = VQModel( block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] ,up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] ,latent_channels=3 ,) return model @property def A__ ( self: Tuple ) -> Any: torch.manual_seed(0 ) UpperCAmelCase_ : int = CLIPTextConfig( bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1e-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1000 ,) return CLIPTextModel(lowerCamelCase_ ) def A__ ( self: str ) -> Optional[Any]: UpperCAmelCase_ : str = self.dummy_uncond_unet UpperCAmelCase_ : List[Any] = DDIMScheduler() UpperCAmelCase_ : List[Any] = self.dummy_vq_model UpperCAmelCase_ : Optional[int] = LDMPipeline(unet=lowerCamelCase_ ,vqvae=lowerCamelCase_ ,scheduler=lowerCamelCase_ ) ldm.to(lowerCamelCase_ ) ldm.set_progress_bar_config(disable=lowerCamelCase_ ) UpperCAmelCase_ : Any = torch.manual_seed(0 ) UpperCAmelCase_ : int = ldm(generator=lowerCamelCase_ ,num_inference_steps=2 ,output_type="""numpy""" ).images UpperCAmelCase_ : List[str] = torch.manual_seed(0 ) UpperCAmelCase_ : Union[str, Any] = ldm(generator=lowerCamelCase_ ,num_inference_steps=2 ,output_type="""numpy""" ,return_dict=lowerCamelCase_ )[0] UpperCAmelCase_ : Optional[Any] = image[0, -3:, -3:, -1] UpperCAmelCase_ : Tuple = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) UpperCAmelCase_ : str = np.array([0.8_5_1_2, 0.8_1_8, 0.6_4_1_1, 0.6_8_0_8, 0.4_4_6_5, 0.5_6_1_8, 0.4_6, 0.6_2_3_1, 0.5_1_7_2] ) UpperCAmelCase_ : Tuple = 1e-2 if torch_device != """mps""" else 3e-2 assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < tolerance @slow @require_torch class _snake_case ( unittest.TestCase ): '''simple docstring''' def A__ ( self: Optional[int] ) -> Optional[Any]: UpperCAmelCase_ : List[str] = LDMPipeline.from_pretrained("""CompVis/ldm-celebahq-256""" ) ldm.to(lowerCamelCase_ ) ldm.set_progress_bar_config(disable=lowerCamelCase_ ) UpperCAmelCase_ : Optional[Any] = torch.manual_seed(0 ) UpperCAmelCase_ : Optional[int] = ldm(generator=lowerCamelCase_ ,num_inference_steps=5 ,output_type="""numpy""" ).images UpperCAmelCase_ : Optional[Any] = image[0, -3:, -3:, -1] assert image.shape == (1, 256, 256, 3) UpperCAmelCase_ : int = np.array([0.4_3_9_9, 0.4_4_9_7_5, 0.4_6_8_2_5, 0.4_7_4, 0.4_3_5_9, 0.4_5_8_1, 0.4_5_0_9_5, 0.4_3_4_1, 0.4_4_4_7] ) UpperCAmelCase_ : Union[str, Any] = 1e-2 if torch_device != """mps""" else 3e-2 assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
345
1
from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCamelCase_ = logging.get_logger(__name__) UpperCamelCase_ = { '''google/canine-s''': '''https://huggingface.co/google/canine-s/resolve/main/config.json''', # See all CANINE models at https://huggingface.co/models?filter=canine } class _snake_case ( __snake_case ): '''simple docstring''' A__ : Optional[Any] = "canine" def __init__( self: Union[str, Any] ,lowerCamelCase_: str=768 ,lowerCamelCase_: int=12 ,lowerCamelCase_: Any=12 ,lowerCamelCase_: List[Any]=3072 ,lowerCamelCase_: int="gelu" ,lowerCamelCase_: List[Any]=0.1 ,lowerCamelCase_: List[Any]=0.1 ,lowerCamelCase_: Optional[int]=16384 ,lowerCamelCase_: Optional[Any]=16 ,lowerCamelCase_: Optional[Any]=0.0_2 ,lowerCamelCase_: Tuple=1e-12 ,lowerCamelCase_: Union[str, Any]=0 ,lowerCamelCase_: List[Any]=0xE_0_0_0 ,lowerCamelCase_: Optional[int]=0xE_0_0_1 ,lowerCamelCase_: Optional[Any]=4 ,lowerCamelCase_: Dict=4 ,lowerCamelCase_: Dict=8 ,lowerCamelCase_: int=16384 ,lowerCamelCase_: Union[str, Any]=128 ,**lowerCamelCase_: str ,) -> Tuple: super().__init__(pad_token_id=lowerCamelCase_ ,bos_token_id=lowerCamelCase_ ,eos_token_id=lowerCamelCase_ ,**lowerCamelCase_ ) UpperCAmelCase_ : Tuple = max_position_embeddings UpperCAmelCase_ : List[str] = hidden_size UpperCAmelCase_ : Optional[int] = num_hidden_layers UpperCAmelCase_ : Dict = num_attention_heads UpperCAmelCase_ : str = intermediate_size UpperCAmelCase_ : List[str] = hidden_act UpperCAmelCase_ : Union[str, Any] = hidden_dropout_prob UpperCAmelCase_ : Tuple = attention_probs_dropout_prob UpperCAmelCase_ : Dict = initializer_range UpperCAmelCase_ : Dict = type_vocab_size UpperCAmelCase_ : Union[str, Any] = layer_norm_eps # Character config: UpperCAmelCase_ : Tuple = downsampling_rate UpperCAmelCase_ : Union[str, Any] = upsampling_kernel_size UpperCAmelCase_ : List[Any] = num_hash_functions UpperCAmelCase_ : Dict = num_hash_buckets UpperCAmelCase_ : List[str] = local_transformer_stride
345
def lowerCamelCase_ ( _a : List[str] ): '''simple docstring''' UpperCAmelCase_ : Tuple = [0] * len(_a ) UpperCAmelCase_ : Dict = [] UpperCAmelCase_ : Optional[int] = [] UpperCAmelCase_ : Dict = 0 for values in graph.values(): for i in values: indegree[i] += 1 for i in range(len(_a ) ): if indegree[i] == 0: queue.append(_a ) while queue: UpperCAmelCase_ : List[str] = queue.pop(0 ) cnt += 1 topo.append(_a ) for x in graph[vertex]: indegree[x] -= 1 if indegree[x] == 0: queue.append(_a ) if cnt != len(_a ): print("""Cycle exists""" ) else: print(_a ) # Adjacency List of Graph UpperCamelCase_ = {0: [1, 2], 1: [3], 2: [3], 3: [4, 5], 4: [], 5: []} topological_sort(graph)
345
1
from ..utils import DummyObject, requires_backends class _snake_case ( metaclass=__snake_case ): '''simple docstring''' A__ : Tuple = ["flax"] def __init__( self: str ,*lowerCamelCase_: int ,**lowerCamelCase_: List[str] ) -> str: requires_backends(self ,["""flax"""] ) @classmethod def A__ ( cls: Optional[Any] ,*lowerCamelCase_: Dict ,**lowerCamelCase_: List[str] ) -> Any: requires_backends(cls ,["""flax"""] ) @classmethod def A__ ( cls: Optional[int] ,*lowerCamelCase_: Optional[int] ,**lowerCamelCase_: int ) -> Optional[int]: requires_backends(cls ,["""flax"""] ) class _snake_case ( metaclass=__snake_case ): '''simple docstring''' A__ : Any = ["flax"] def __init__( self: int ,*lowerCamelCase_: List[Any] ,**lowerCamelCase_: Tuple ) -> Union[str, Any]: requires_backends(self ,["""flax"""] ) @classmethod def A__ ( cls: Optional[int] ,*lowerCamelCase_: Optional[int] ,**lowerCamelCase_: List[str] ) -> Union[str, Any]: requires_backends(cls ,["""flax"""] ) @classmethod def A__ ( cls: Tuple ,*lowerCamelCase_: Tuple ,**lowerCamelCase_: Any ) -> int: requires_backends(cls ,["""flax"""] ) class _snake_case ( metaclass=__snake_case ): '''simple docstring''' A__ : Dict = ["flax"] def __init__( self: Dict ,*lowerCamelCase_: Optional[int] ,**lowerCamelCase_: List[Any] ) -> Any: requires_backends(self ,["""flax"""] ) @classmethod def A__ ( cls: Tuple ,*lowerCamelCase_: Optional[Any] ,**lowerCamelCase_: List[Any] ) -> str: requires_backends(cls ,["""flax"""] ) @classmethod def A__ ( cls: int ,*lowerCamelCase_: Optional[Any] ,**lowerCamelCase_: Optional[Any] ) -> int: requires_backends(cls ,["""flax"""] ) class _snake_case ( metaclass=__snake_case ): '''simple docstring''' A__ : List[str] = ["flax"] def __init__( self: str ,*lowerCamelCase_: List[str] ,**lowerCamelCase_: Optional[int] ) -> Union[str, Any]: requires_backends(self ,["""flax"""] ) @classmethod def A__ ( cls: Union[str, Any] ,*lowerCamelCase_: Any ,**lowerCamelCase_: Any ) -> Any: requires_backends(cls ,["""flax"""] ) @classmethod def A__ ( cls: Dict ,*lowerCamelCase_: int ,**lowerCamelCase_: Optional[Any] ) -> int: requires_backends(cls ,["""flax"""] ) class _snake_case ( metaclass=__snake_case ): '''simple docstring''' A__ : int = ["flax"] def __init__( self: Dict ,*lowerCamelCase_: Tuple ,**lowerCamelCase_: List[str] ) -> Optional[Any]: requires_backends(self ,["""flax"""] ) @classmethod def A__ ( cls: Optional[Any] ,*lowerCamelCase_: List[Any] ,**lowerCamelCase_: str ) -> Any: requires_backends(cls ,["""flax"""] ) @classmethod def A__ ( cls: Union[str, Any] ,*lowerCamelCase_: Dict ,**lowerCamelCase_: Optional[Any] ) -> str: requires_backends(cls ,["""flax"""] ) class _snake_case ( metaclass=__snake_case ): '''simple docstring''' A__ : Optional[int] = ["flax"] def __init__( self: str ,*lowerCamelCase_: Dict ,**lowerCamelCase_: Optional[int] ) -> Tuple: requires_backends(self ,["""flax"""] ) @classmethod def A__ ( cls: int ,*lowerCamelCase_: int ,**lowerCamelCase_: Tuple ) -> List[str]: requires_backends(cls ,["""flax"""] ) @classmethod def A__ ( cls: str ,*lowerCamelCase_: Union[str, Any] ,**lowerCamelCase_: Optional[Any] ) -> Any: requires_backends(cls ,["""flax"""] ) class _snake_case ( metaclass=__snake_case ): '''simple docstring''' A__ : List[Any] = ["flax"] def __init__( self: Union[str, Any] ,*lowerCamelCase_: Tuple ,**lowerCamelCase_: int ) -> List[Any]: requires_backends(self ,["""flax"""] ) @classmethod def A__ ( cls: Tuple ,*lowerCamelCase_: List[Any] ,**lowerCamelCase_: Dict ) -> Dict: requires_backends(cls ,["""flax"""] ) @classmethod def A__ ( cls: Dict ,*lowerCamelCase_: List[Any] ,**lowerCamelCase_: str ) -> Any: requires_backends(cls ,["""flax"""] ) class _snake_case ( metaclass=__snake_case ): '''simple docstring''' A__ : Tuple = ["flax"] def __init__( self: str ,*lowerCamelCase_: Any ,**lowerCamelCase_: int ) -> Tuple: requires_backends(self ,["""flax"""] ) @classmethod def A__ ( cls: Dict ,*lowerCamelCase_: Optional[int] ,**lowerCamelCase_: Union[str, Any] ) -> List[str]: requires_backends(cls ,["""flax"""] ) @classmethod def A__ ( cls: str ,*lowerCamelCase_: Union[str, Any] ,**lowerCamelCase_: Dict ) -> Optional[int]: requires_backends(cls ,["""flax"""] ) class _snake_case ( metaclass=__snake_case ): '''simple docstring''' A__ : str = ["flax"] def __init__( self: Optional[Any] ,*lowerCamelCase_: str ,**lowerCamelCase_: List[str] ) -> Optional[Any]: requires_backends(self ,["""flax"""] ) @classmethod def A__ ( cls: List[str] ,*lowerCamelCase_: Dict ,**lowerCamelCase_: int ) -> List[str]: requires_backends(cls ,["""flax"""] ) @classmethod def A__ ( cls: str ,*lowerCamelCase_: Optional[Any] ,**lowerCamelCase_: int ) -> Union[str, Any]: requires_backends(cls ,["""flax"""] ) class _snake_case ( metaclass=__snake_case ): '''simple docstring''' A__ : Union[str, Any] = ["flax"] def __init__( self: Any ,*lowerCamelCase_: Tuple ,**lowerCamelCase_: Optional[int] ) -> List[str]: requires_backends(self ,["""flax"""] ) @classmethod def A__ ( cls: Optional[int] ,*lowerCamelCase_: List[Any] ,**lowerCamelCase_: str ) -> Union[str, Any]: requires_backends(cls ,["""flax"""] ) @classmethod def A__ ( cls: List[Any] ,*lowerCamelCase_: Any ,**lowerCamelCase_: Any ) -> int: requires_backends(cls ,["""flax"""] ) class _snake_case ( metaclass=__snake_case ): '''simple docstring''' A__ : Tuple = ["flax"] def __init__( self: Any ,*lowerCamelCase_: Optional[Any] ,**lowerCamelCase_: Dict ) -> str: requires_backends(self ,["""flax"""] ) @classmethod def A__ ( cls: Tuple ,*lowerCamelCase_: Union[str, Any] ,**lowerCamelCase_: List[str] ) -> int: requires_backends(cls ,["""flax"""] ) @classmethod def A__ ( cls: List[Any] ,*lowerCamelCase_: str ,**lowerCamelCase_: str ) -> Any: requires_backends(cls ,["""flax"""] ) class _snake_case ( metaclass=__snake_case ): '''simple docstring''' A__ : Optional[Any] = ["flax"] def __init__( self: Dict ,*lowerCamelCase_: int ,**lowerCamelCase_: Optional[Any] ) -> Union[str, Any]: requires_backends(self ,["""flax"""] ) @classmethod def A__ ( cls: int ,*lowerCamelCase_: int ,**lowerCamelCase_: Tuple ) -> Union[str, Any]: requires_backends(cls ,["""flax"""] ) @classmethod def A__ ( cls: Optional[Any] ,*lowerCamelCase_: List[Any] ,**lowerCamelCase_: Optional[int] ) -> int: requires_backends(cls ,["""flax"""] ) class _snake_case ( metaclass=__snake_case ): '''simple docstring''' A__ : Optional[int] = ["flax"] def __init__( self: List[str] ,*lowerCamelCase_: Dict ,**lowerCamelCase_: Dict ) -> int: requires_backends(self ,["""flax"""] ) @classmethod def A__ ( cls: Dict ,*lowerCamelCase_: List[Any] ,**lowerCamelCase_: Dict ) -> Union[str, Any]: requires_backends(cls ,["""flax"""] ) @classmethod def A__ ( cls: int ,*lowerCamelCase_: Any ,**lowerCamelCase_: Any ) -> Optional[Any]: requires_backends(cls ,["""flax"""] )
345
from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCamelCase_ = logging.get_logger(__name__) UpperCamelCase_ = { '''microsoft/swinv2-tiny-patch4-window8-256''': ( '''https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json''' ), } class _snake_case ( __snake_case ): '''simple docstring''' A__ : Optional[Any] = "swinv2" A__ : int = { "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers", } def __init__( self: List[str] ,lowerCamelCase_: List[str]=224 ,lowerCamelCase_: List[str]=4 ,lowerCamelCase_: List[Any]=3 ,lowerCamelCase_: Optional[Any]=96 ,lowerCamelCase_: Any=[2, 2, 6, 2] ,lowerCamelCase_: Dict=[3, 6, 12, 24] ,lowerCamelCase_: str=7 ,lowerCamelCase_: Optional[Any]=4.0 ,lowerCamelCase_: Tuple=True ,lowerCamelCase_: List[str]=0.0 ,lowerCamelCase_: Optional[int]=0.0 ,lowerCamelCase_: List[str]=0.1 ,lowerCamelCase_: str="gelu" ,lowerCamelCase_: str=False ,lowerCamelCase_: Dict=0.0_2 ,lowerCamelCase_: Union[str, Any]=1e-5 ,lowerCamelCase_: str=32 ,**lowerCamelCase_: List[str] ,) -> Tuple: super().__init__(**lowerCamelCase_ ) UpperCAmelCase_ : Tuple = image_size UpperCAmelCase_ : Tuple = patch_size UpperCAmelCase_ : Dict = num_channels UpperCAmelCase_ : List[Any] = embed_dim UpperCAmelCase_ : Dict = depths UpperCAmelCase_ : Dict = len(lowerCamelCase_ ) UpperCAmelCase_ : str = num_heads UpperCAmelCase_ : Tuple = window_size UpperCAmelCase_ : int = mlp_ratio UpperCAmelCase_ : str = qkv_bias UpperCAmelCase_ : Any = hidden_dropout_prob UpperCAmelCase_ : Tuple = attention_probs_dropout_prob UpperCAmelCase_ : int = drop_path_rate UpperCAmelCase_ : Optional[Any] = hidden_act UpperCAmelCase_ : List[str] = use_absolute_embeddings UpperCAmelCase_ : Dict = layer_norm_eps UpperCAmelCase_ : int = initializer_range UpperCAmelCase_ : Union[str, Any] = encoder_stride # we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model UpperCAmelCase_ : List[str] = int(embed_dim * 2 ** (len(lowerCamelCase_ ) - 1) ) UpperCAmelCase_ : Any = (0, 0, 0, 0)
345
1
import warnings from ...utils import logging from .image_processing_layoutlmva import LayoutLMvaImageProcessor UpperCamelCase_ = logging.get_logger(__name__) class _snake_case ( __snake_case ): '''simple docstring''' def __init__( self: List[Any] ,*lowerCamelCase_: List[str] ,**lowerCamelCase_: Tuple ) -> None: warnings.warn( """The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers.""" """ Please use LayoutLMv2ImageProcessor instead.""" ,lowerCamelCase_ ,) super().__init__(*lowerCamelCase_ ,**lowerCamelCase_ )
345
import os import sys import tempfile import unittest import unittest.mock as mock from pathlib import Path from huggingface_hub import HfFolder, delete_repo from huggingface_hub.file_download import http_get from requests.exceptions import HTTPError from transformers import ( AlbertTokenizer, AutoTokenizer, BertTokenizer, BertTokenizerFast, GPTaTokenizerFast, is_tokenizers_available, ) from transformers.testing_utils import TOKEN, USER, is_staging_test, require_tokenizers from transformers.tokenization_utils import Trie sys.path.append(str(Path(__file__).parent.parent / '''utils''')) from test_module.custom_tokenization import CustomTokenizer # noqa E402 if is_tokenizers_available(): from test_module.custom_tokenization_fast import CustomTokenizerFast class _snake_case ( unittest.TestCase ): '''simple docstring''' def A__ ( self: int ) -> str: # A mock response for an HTTP head request to emulate server down UpperCAmelCase_ : List[str] = mock.Mock() UpperCAmelCase_ : List[Any] = 500 UpperCAmelCase_ : Union[str, Any] = {} UpperCAmelCase_ : Union[str, Any] = HTTPError UpperCAmelCase_ : Any = {} # Download this model to make sure it's in the cache. UpperCAmelCase_ : Union[str, Any] = BertTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bert""" ) # Under the mock environment we get a 500 error when trying to reach the tokenizer. with mock.patch("""requests.Session.request""" ,return_value=lowerCamelCase_ ) as mock_head: UpperCAmelCase_ : Any = BertTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bert""" ) # This check we did call the fake head request mock_head.assert_called() @require_tokenizers def A__ ( self: str ) -> int: # A mock response for an HTTP head request to emulate server down UpperCAmelCase_ : str = mock.Mock() UpperCAmelCase_ : Optional[int] = 500 UpperCAmelCase_ : int = {} UpperCAmelCase_ : Union[str, Any] = HTTPError UpperCAmelCase_ : List[Any] = {} # Download this model to make sure it's in the cache. UpperCAmelCase_ : Optional[int] = GPTaTokenizerFast.from_pretrained("""gpt2""" ) # Under the mock environment we get a 500 error when trying to reach the tokenizer. with mock.patch("""requests.Session.request""" ,return_value=lowerCamelCase_ ) as mock_head: UpperCAmelCase_ : Any = GPTaTokenizerFast.from_pretrained("""gpt2""" ) # This check we did call the fake head request mock_head.assert_called() def A__ ( self: str ) -> Dict: # This test is for deprecated behavior and can be removed in v5 try: UpperCAmelCase_ : Any = tempfile.mktemp() with open(lowerCamelCase_ ,"""wb""" ) as f: http_get("""https://huggingface.co/albert-base-v1/resolve/main/spiece.model""" ,lowerCamelCase_ ) UpperCAmelCase_ : Tuple = AlbertTokenizer.from_pretrained(lowerCamelCase_ ) finally: os.remove(lowerCamelCase_ ) # Supporting this legacy load introduced a weird bug where the tokenizer would load local files if they are in # the current folder and have the right name. if os.path.isfile("""tokenizer.json""" ): # We skip the test if the user has a `tokenizer.json` in this folder to avoid deleting it. return try: with open("""tokenizer.json""" ,"""wb""" ) as f: http_get("""https://huggingface.co/hf-internal-testing/tiny-random-bert/blob/main/tokenizer.json""" ,lowerCamelCase_ ) UpperCAmelCase_ : str = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ) # The tiny random BERT has a vocab size of 1024, tiny gpt2 as a vocab size of 1000 self.assertEqual(tokenizer.vocab_size ,1000 ) # Tokenizer should depend on the remote checkpoint, not the local tokenizer.json file. finally: os.remove("""tokenizer.json""" ) def A__ ( self: List[str] ) -> Tuple: # This test is for deprecated behavior and can be removed in v5 UpperCAmelCase_ : str = AlbertTokenizer.from_pretrained("""https://huggingface.co/albert-base-v1/resolve/main/spiece.model""" ) @is_staging_test class _snake_case ( unittest.TestCase ): '''simple docstring''' A__ : str = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"] @classmethod def A__ ( cls: Dict ) -> Optional[int]: UpperCAmelCase_ : List[str] = TOKEN HfFolder.save_token(lowerCamelCase_ ) @classmethod def A__ ( cls: Optional[Any] ) -> List[str]: try: delete_repo(token=cls._token ,repo_id="""test-tokenizer""" ) except HTTPError: pass try: delete_repo(token=cls._token ,repo_id="""valid_org/test-tokenizer-org""" ) except HTTPError: pass try: delete_repo(token=cls._token ,repo_id="""test-dynamic-tokenizer""" ) except HTTPError: pass def A__ ( self: Any ) -> Optional[int]: with tempfile.TemporaryDirectory() as tmp_dir: UpperCAmelCase_ : Tuple = os.path.join(lowerCamelCase_ ,"""vocab.txt""" ) with open(lowerCamelCase_ ,"""w""" ,encoding="""utf-8""" ) as vocab_writer: vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) ) UpperCAmelCase_ : List[Any] = BertTokenizer(lowerCamelCase_ ) tokenizer.push_to_hub("""test-tokenizer""" ,use_auth_token=self._token ) UpperCAmelCase_ : List[Any] = BertTokenizer.from_pretrained(F'''{USER}/test-tokenizer''' ) self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab ) # Reset repo delete_repo(token=self._token ,repo_id="""test-tokenizer""" ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(lowerCamelCase_ ,repo_id="""test-tokenizer""" ,push_to_hub=lowerCamelCase_ ,use_auth_token=self._token ) UpperCAmelCase_ : List[Any] = BertTokenizer.from_pretrained(F'''{USER}/test-tokenizer''' ) self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab ) def A__ ( self: Optional[int] ) -> Any: with tempfile.TemporaryDirectory() as tmp_dir: UpperCAmelCase_ : List[Any] = os.path.join(lowerCamelCase_ ,"""vocab.txt""" ) with open(lowerCamelCase_ ,"""w""" ,encoding="""utf-8""" ) as vocab_writer: vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) ) UpperCAmelCase_ : Dict = BertTokenizer(lowerCamelCase_ ) tokenizer.push_to_hub("""valid_org/test-tokenizer-org""" ,use_auth_token=self._token ) UpperCAmelCase_ : Dict = BertTokenizer.from_pretrained("""valid_org/test-tokenizer-org""" ) self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab ) # Reset repo delete_repo(token=self._token ,repo_id="""valid_org/test-tokenizer-org""" ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained( lowerCamelCase_ ,repo_id="""valid_org/test-tokenizer-org""" ,push_to_hub=lowerCamelCase_ ,use_auth_token=self._token ) UpperCAmelCase_ : List[Any] = BertTokenizer.from_pretrained("""valid_org/test-tokenizer-org""" ) self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab ) @require_tokenizers def A__ ( self: Optional[int] ) -> Optional[Any]: CustomTokenizer.register_for_auto_class() with tempfile.TemporaryDirectory() as tmp_dir: UpperCAmelCase_ : Any = os.path.join(lowerCamelCase_ ,"""vocab.txt""" ) with open(lowerCamelCase_ ,"""w""" ,encoding="""utf-8""" ) as vocab_writer: vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) ) UpperCAmelCase_ : Optional[Any] = CustomTokenizer(lowerCamelCase_ ) # No fast custom tokenizer tokenizer.push_to_hub("""test-dynamic-tokenizer""" ,use_auth_token=self._token ) UpperCAmelCase_ : Optional[Any] = AutoTokenizer.from_pretrained(F'''{USER}/test-dynamic-tokenizer''' ,trust_remote_code=lowerCamelCase_ ) # Can't make an isinstance check because the new_model.config is from the CustomTokenizer class of a dynamic module self.assertEqual(tokenizer.__class__.__name__ ,"""CustomTokenizer""" ) # Fast and slow custom tokenizer CustomTokenizerFast.register_for_auto_class() with tempfile.TemporaryDirectory() as tmp_dir: UpperCAmelCase_ : List[str] = os.path.join(lowerCamelCase_ ,"""vocab.txt""" ) with open(lowerCamelCase_ ,"""w""" ,encoding="""utf-8""" ) as vocab_writer: vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) ) UpperCAmelCase_ : str = BertTokenizerFast.from_pretrained(lowerCamelCase_ ) bert_tokenizer.save_pretrained(lowerCamelCase_ ) UpperCAmelCase_ : List[str] = CustomTokenizerFast.from_pretrained(lowerCamelCase_ ) tokenizer.push_to_hub("""test-dynamic-tokenizer""" ,use_auth_token=self._token ) UpperCAmelCase_ : List[str] = AutoTokenizer.from_pretrained(F'''{USER}/test-dynamic-tokenizer''' ,trust_remote_code=lowerCamelCase_ ) # Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module self.assertEqual(tokenizer.__class__.__name__ ,"""CustomTokenizerFast""" ) UpperCAmelCase_ : List[str] = AutoTokenizer.from_pretrained( F'''{USER}/test-dynamic-tokenizer''' ,use_fast=lowerCamelCase_ ,trust_remote_code=lowerCamelCase_ ) # Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module self.assertEqual(tokenizer.__class__.__name__ ,"""CustomTokenizer""" ) class _snake_case ( unittest.TestCase ): '''simple docstring''' def A__ ( self: Optional[Any] ) -> Any: UpperCAmelCase_ : Any = Trie() trie.add("""Hello 友達""" ) self.assertEqual(trie.data ,{"""H""": {"""e""": {"""l""": {"""l""": {"""o""": {""" """: {"""友""": {"""達""": {"""""": 1}}}}}}}}} ) trie.add("""Hello""" ) trie.data self.assertEqual(trie.data ,{"""H""": {"""e""": {"""l""": {"""l""": {"""o""": {"""""": 1, """ """: {"""友""": {"""達""": {"""""": 1}}}}}}}}} ) def A__ ( self: Tuple ) -> Optional[int]: UpperCAmelCase_ : str = Trie() self.assertEqual(trie.split("""[CLS] This is a extra_id_100""" ) ,["""[CLS] This is a extra_id_100"""] ) trie.add("""[CLS]""" ) trie.add("""extra_id_1""" ) trie.add("""extra_id_100""" ) self.assertEqual(trie.split("""[CLS] This is a extra_id_100""" ) ,["""[CLS]""", """ This is a """, """extra_id_100"""] ) def A__ ( self: Optional[Any] ) -> Optional[int]: UpperCAmelCase_ : Dict = Trie() trie.add("""A""" ) self.assertEqual(trie.split("""ABC""" ) ,["""A""", """BC"""] ) self.assertEqual(trie.split("""BCA""" ) ,["""BC""", """A"""] ) def A__ ( self: Union[str, Any] ) -> int: UpperCAmelCase_ : List[str] = Trie() trie.add("""TOKEN]""" ) trie.add("""[SPECIAL_TOKEN]""" ) self.assertEqual(trie.split("""This is something [SPECIAL_TOKEN]""" ) ,["""This is something """, """[SPECIAL_TOKEN]"""] ) def A__ ( self: int ) -> Union[str, Any]: UpperCAmelCase_ : List[str] = Trie() trie.add("""A""" ) trie.add("""P""" ) trie.add("""[SPECIAL_TOKEN]""" ) self.assertEqual(trie.split("""This is something [SPECIAL_TOKEN]""" ) ,["""This is something """, """[SPECIAL_TOKEN]"""] ) def A__ ( self: int ) -> List[str]: UpperCAmelCase_ : int = Trie() trie.add("""AB""" ) trie.add("""B""" ) trie.add("""C""" ) self.assertEqual(trie.split("""ABC""" ) ,["""AB""", """C"""] ) def A__ ( self: str ) -> Optional[int]: UpperCAmelCase_ : Optional[Any] = Trie() trie.add("""ABC""" ) trie.add("""B""" ) trie.add("""CD""" ) self.assertEqual(trie.split("""ABCD""" ) ,["""ABC""", """D"""] ) def A__ ( self: List[Any] ) -> Any: # Even if the offsets are wrong, we necessarily output correct string # parts. UpperCAmelCase_ : Tuple = Trie() UpperCAmelCase_ : Optional[Any] = trie.cut_text("""ABC""" ,[0, 0, 2, 1, 2, 3] ) self.assertEqual(lowerCamelCase_ ,["""AB""", """C"""] )
345
1
from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCamelCase_ = logging.get_logger(__name__) UpperCamelCase_ = { '''microsoft/swinv2-tiny-patch4-window8-256''': ( '''https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json''' ), } class _snake_case ( __snake_case ): '''simple docstring''' A__ : Optional[Any] = "swinv2" A__ : int = { "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers", } def __init__( self: List[str] ,lowerCamelCase_: List[str]=224 ,lowerCamelCase_: List[str]=4 ,lowerCamelCase_: List[Any]=3 ,lowerCamelCase_: Optional[Any]=96 ,lowerCamelCase_: Any=[2, 2, 6, 2] ,lowerCamelCase_: Dict=[3, 6, 12, 24] ,lowerCamelCase_: str=7 ,lowerCamelCase_: Optional[Any]=4.0 ,lowerCamelCase_: Tuple=True ,lowerCamelCase_: List[str]=0.0 ,lowerCamelCase_: Optional[int]=0.0 ,lowerCamelCase_: List[str]=0.1 ,lowerCamelCase_: str="gelu" ,lowerCamelCase_: str=False ,lowerCamelCase_: Dict=0.0_2 ,lowerCamelCase_: Union[str, Any]=1e-5 ,lowerCamelCase_: str=32 ,**lowerCamelCase_: List[str] ,) -> Tuple: super().__init__(**lowerCamelCase_ ) UpperCAmelCase_ : Tuple = image_size UpperCAmelCase_ : Tuple = patch_size UpperCAmelCase_ : Dict = num_channels UpperCAmelCase_ : List[Any] = embed_dim UpperCAmelCase_ : Dict = depths UpperCAmelCase_ : Dict = len(lowerCamelCase_ ) UpperCAmelCase_ : str = num_heads UpperCAmelCase_ : Tuple = window_size UpperCAmelCase_ : int = mlp_ratio UpperCAmelCase_ : str = qkv_bias UpperCAmelCase_ : Any = hidden_dropout_prob UpperCAmelCase_ : Tuple = attention_probs_dropout_prob UpperCAmelCase_ : int = drop_path_rate UpperCAmelCase_ : Optional[Any] = hidden_act UpperCAmelCase_ : List[str] = use_absolute_embeddings UpperCAmelCase_ : Dict = layer_norm_eps UpperCAmelCase_ : int = initializer_range UpperCAmelCase_ : Union[str, Any] = encoder_stride # we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model UpperCAmelCase_ : List[str] = int(embed_dim * 2 ** (len(lowerCamelCase_ ) - 1) ) UpperCAmelCase_ : Any = (0, 0, 0, 0)
345
from ..utils import DummyObject, requires_backends class _snake_case ( metaclass=__snake_case ): '''simple docstring''' A__ : Tuple = ["flax"] def __init__( self: str ,*lowerCamelCase_: int ,**lowerCamelCase_: List[str] ) -> str: requires_backends(self ,["""flax"""] ) @classmethod def A__ ( cls: Optional[Any] ,*lowerCamelCase_: Dict ,**lowerCamelCase_: List[str] ) -> Any: requires_backends(cls ,["""flax"""] ) @classmethod def A__ ( cls: Optional[int] ,*lowerCamelCase_: Optional[int] ,**lowerCamelCase_: int ) -> Optional[int]: requires_backends(cls ,["""flax"""] ) class _snake_case ( metaclass=__snake_case ): '''simple docstring''' A__ : Any = ["flax"] def __init__( self: int ,*lowerCamelCase_: List[Any] ,**lowerCamelCase_: Tuple ) -> Union[str, Any]: requires_backends(self ,["""flax"""] ) @classmethod def A__ ( cls: Optional[int] ,*lowerCamelCase_: Optional[int] ,**lowerCamelCase_: List[str] ) -> Union[str, Any]: requires_backends(cls ,["""flax"""] ) @classmethod def A__ ( cls: Tuple ,*lowerCamelCase_: Tuple ,**lowerCamelCase_: Any ) -> int: requires_backends(cls ,["""flax"""] ) class _snake_case ( metaclass=__snake_case ): '''simple docstring''' A__ : Dict = ["flax"] def __init__( self: Dict ,*lowerCamelCase_: Optional[int] ,**lowerCamelCase_: List[Any] ) -> Any: requires_backends(self ,["""flax"""] ) @classmethod def A__ ( cls: Tuple ,*lowerCamelCase_: Optional[Any] ,**lowerCamelCase_: List[Any] ) -> str: requires_backends(cls ,["""flax"""] ) @classmethod def A__ ( cls: int ,*lowerCamelCase_: Optional[Any] ,**lowerCamelCase_: Optional[Any] ) -> int: requires_backends(cls ,["""flax"""] ) class _snake_case ( metaclass=__snake_case ): '''simple docstring''' A__ : List[str] = ["flax"] def __init__( self: str ,*lowerCamelCase_: List[str] ,**lowerCamelCase_: Optional[int] ) -> Union[str, Any]: requires_backends(self ,["""flax"""] ) @classmethod def A__ ( cls: Union[str, Any] ,*lowerCamelCase_: Any ,**lowerCamelCase_: Any ) -> Any: requires_backends(cls ,["""flax"""] ) @classmethod def A__ ( cls: Dict ,*lowerCamelCase_: int ,**lowerCamelCase_: Optional[Any] ) -> int: requires_backends(cls ,["""flax"""] ) class _snake_case ( metaclass=__snake_case ): '''simple docstring''' A__ : int = ["flax"] def __init__( self: Dict ,*lowerCamelCase_: Tuple ,**lowerCamelCase_: List[str] ) -> Optional[Any]: requires_backends(self ,["""flax"""] ) @classmethod def A__ ( cls: Optional[Any] ,*lowerCamelCase_: List[Any] ,**lowerCamelCase_: str ) -> Any: requires_backends(cls ,["""flax"""] ) @classmethod def A__ ( cls: Union[str, Any] ,*lowerCamelCase_: Dict ,**lowerCamelCase_: Optional[Any] ) -> str: requires_backends(cls ,["""flax"""] ) class _snake_case ( metaclass=__snake_case ): '''simple docstring''' A__ : Optional[int] = ["flax"] def __init__( self: str ,*lowerCamelCase_: Dict ,**lowerCamelCase_: Optional[int] ) -> Tuple: requires_backends(self ,["""flax"""] ) @classmethod def A__ ( cls: int ,*lowerCamelCase_: int ,**lowerCamelCase_: Tuple ) -> List[str]: requires_backends(cls ,["""flax"""] ) @classmethod def A__ ( cls: str ,*lowerCamelCase_: Union[str, Any] ,**lowerCamelCase_: Optional[Any] ) -> Any: requires_backends(cls ,["""flax"""] ) class _snake_case ( metaclass=__snake_case ): '''simple docstring''' A__ : List[Any] = ["flax"] def __init__( self: Union[str, Any] ,*lowerCamelCase_: Tuple ,**lowerCamelCase_: int ) -> List[Any]: requires_backends(self ,["""flax"""] ) @classmethod def A__ ( cls: Tuple ,*lowerCamelCase_: List[Any] ,**lowerCamelCase_: Dict ) -> Dict: requires_backends(cls ,["""flax"""] ) @classmethod def A__ ( cls: Dict ,*lowerCamelCase_: List[Any] ,**lowerCamelCase_: str ) -> Any: requires_backends(cls ,["""flax"""] ) class _snake_case ( metaclass=__snake_case ): '''simple docstring''' A__ : Tuple = ["flax"] def __init__( self: str ,*lowerCamelCase_: Any ,**lowerCamelCase_: int ) -> Tuple: requires_backends(self ,["""flax"""] ) @classmethod def A__ ( cls: Dict ,*lowerCamelCase_: Optional[int] ,**lowerCamelCase_: Union[str, Any] ) -> List[str]: requires_backends(cls ,["""flax"""] ) @classmethod def A__ ( cls: str ,*lowerCamelCase_: Union[str, Any] ,**lowerCamelCase_: Dict ) -> Optional[int]: requires_backends(cls ,["""flax"""] ) class _snake_case ( metaclass=__snake_case ): '''simple docstring''' A__ : str = ["flax"] def __init__( self: Optional[Any] ,*lowerCamelCase_: str ,**lowerCamelCase_: List[str] ) -> Optional[Any]: requires_backends(self ,["""flax"""] ) @classmethod def A__ ( cls: List[str] ,*lowerCamelCase_: Dict ,**lowerCamelCase_: int ) -> List[str]: requires_backends(cls ,["""flax"""] ) @classmethod def A__ ( cls: str ,*lowerCamelCase_: Optional[Any] ,**lowerCamelCase_: int ) -> Union[str, Any]: requires_backends(cls ,["""flax"""] ) class _snake_case ( metaclass=__snake_case ): '''simple docstring''' A__ : Union[str, Any] = ["flax"] def __init__( self: Any ,*lowerCamelCase_: Tuple ,**lowerCamelCase_: Optional[int] ) -> List[str]: requires_backends(self ,["""flax"""] ) @classmethod def A__ ( cls: Optional[int] ,*lowerCamelCase_: List[Any] ,**lowerCamelCase_: str ) -> Union[str, Any]: requires_backends(cls ,["""flax"""] ) @classmethod def A__ ( cls: List[Any] ,*lowerCamelCase_: Any ,**lowerCamelCase_: Any ) -> int: requires_backends(cls ,["""flax"""] ) class _snake_case ( metaclass=__snake_case ): '''simple docstring''' A__ : Tuple = ["flax"] def __init__( self: Any ,*lowerCamelCase_: Optional[Any] ,**lowerCamelCase_: Dict ) -> str: requires_backends(self ,["""flax"""] ) @classmethod def A__ ( cls: Tuple ,*lowerCamelCase_: Union[str, Any] ,**lowerCamelCase_: List[str] ) -> int: requires_backends(cls ,["""flax"""] ) @classmethod def A__ ( cls: List[Any] ,*lowerCamelCase_: str ,**lowerCamelCase_: str ) -> Any: requires_backends(cls ,["""flax"""] ) class _snake_case ( metaclass=__snake_case ): '''simple docstring''' A__ : Optional[Any] = ["flax"] def __init__( self: Dict ,*lowerCamelCase_: int ,**lowerCamelCase_: Optional[Any] ) -> Union[str, Any]: requires_backends(self ,["""flax"""] ) @classmethod def A__ ( cls: int ,*lowerCamelCase_: int ,**lowerCamelCase_: Tuple ) -> Union[str, Any]: requires_backends(cls ,["""flax"""] ) @classmethod def A__ ( cls: Optional[Any] ,*lowerCamelCase_: List[Any] ,**lowerCamelCase_: Optional[int] ) -> int: requires_backends(cls ,["""flax"""] ) class _snake_case ( metaclass=__snake_case ): '''simple docstring''' A__ : Optional[int] = ["flax"] def __init__( self: List[str] ,*lowerCamelCase_: Dict ,**lowerCamelCase_: Dict ) -> int: requires_backends(self ,["""flax"""] ) @classmethod def A__ ( cls: Dict ,*lowerCamelCase_: List[Any] ,**lowerCamelCase_: Dict ) -> Union[str, Any]: requires_backends(cls ,["""flax"""] ) @classmethod def A__ ( cls: int ,*lowerCamelCase_: Any ,**lowerCamelCase_: Any ) -> Optional[Any]: requires_backends(cls ,["""flax"""] )
345
1
from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxSeqaSeqConfigWithPast from ...utils import logging UpperCamelCase_ = logging.get_logger(__name__) UpperCamelCase_ = { '''t5-small''': '''https://huggingface.co/t5-small/resolve/main/config.json''', '''t5-base''': '''https://huggingface.co/t5-base/resolve/main/config.json''', '''t5-large''': '''https://huggingface.co/t5-large/resolve/main/config.json''', '''t5-3b''': '''https://huggingface.co/t5-3b/resolve/main/config.json''', '''t5-11b''': '''https://huggingface.co/t5-11b/resolve/main/config.json''', } class _snake_case ( __snake_case ): '''simple docstring''' A__ : Dict = "t5" A__ : List[str] = ["past_key_values"] A__ : Optional[int] = {"hidden_size": "d_model", "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers"} def __init__( self: List[str] ,lowerCamelCase_: Dict=32128 ,lowerCamelCase_: Any=512 ,lowerCamelCase_: Union[str, Any]=64 ,lowerCamelCase_: str=2048 ,lowerCamelCase_: Optional[int]=6 ,lowerCamelCase_: List[str]=None ,lowerCamelCase_: int=8 ,lowerCamelCase_: str=32 ,lowerCamelCase_: Optional[int]=128 ,lowerCamelCase_: Tuple=0.1 ,lowerCamelCase_: List[Any]=1e-6 ,lowerCamelCase_: Dict=1.0 ,lowerCamelCase_: Tuple="relu" ,lowerCamelCase_: Tuple=True ,lowerCamelCase_: Tuple=True ,lowerCamelCase_: Optional[int]=0 ,lowerCamelCase_: str=1 ,**lowerCamelCase_: Tuple ,) -> List[str]: UpperCAmelCase_ : Any = vocab_size UpperCAmelCase_ : Dict = d_model UpperCAmelCase_ : Optional[int] = d_kv UpperCAmelCase_ : Dict = d_ff UpperCAmelCase_ : Dict = num_layers UpperCAmelCase_ : Union[str, Any] = ( num_decoder_layers if num_decoder_layers is not None else self.num_layers ) # default = symmetry UpperCAmelCase_ : Union[str, Any] = num_heads UpperCAmelCase_ : List[Any] = relative_attention_num_buckets UpperCAmelCase_ : List[Any] = relative_attention_max_distance UpperCAmelCase_ : Optional[Any] = dropout_rate UpperCAmelCase_ : Any = layer_norm_epsilon UpperCAmelCase_ : Optional[Any] = initializer_factor UpperCAmelCase_ : int = feed_forward_proj UpperCAmelCase_ : Optional[Any] = use_cache UpperCAmelCase_ : str = self.feed_forward_proj.split("""-""" ) UpperCAmelCase_ : int = act_info[-1] UpperCAmelCase_ : Tuple = act_info[0] == """gated""" if len(lowerCamelCase_ ) > 1 and act_info[0] != "gated" or len(lowerCamelCase_ ) > 2: raise ValueError( F'''`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.''' """Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. """ """'gated-gelu' or 'relu'""" ) # for backwards compatibility if feed_forward_proj == "gated-gelu": UpperCAmelCase_ : List[str] = """gelu_new""" super().__init__( pad_token_id=lowerCamelCase_ ,eos_token_id=lowerCamelCase_ ,is_encoder_decoder=lowerCamelCase_ ,**lowerCamelCase_ ,) class _snake_case ( __snake_case ): '''simple docstring''' @property def A__ ( self: str ) -> Mapping[str, Mapping[int, str]]: UpperCAmelCase_ : int = { """input_ids""": {0: """batch""", 1: """encoder_sequence"""}, """attention_mask""": {0: """batch""", 1: """encoder_sequence"""}, } if self.use_past: UpperCAmelCase_ : Any = """past_encoder_sequence + sequence""" UpperCAmelCase_ : Optional[int] = {0: """batch"""} UpperCAmelCase_ : List[str] = {0: """batch""", 1: """past_decoder_sequence + sequence"""} else: UpperCAmelCase_ : Tuple = {0: """batch""", 1: """decoder_sequence"""} UpperCAmelCase_ : Optional[Any] = {0: """batch""", 1: """decoder_sequence"""} if self.use_past: self.fill_with_past_key_values_(lowerCamelCase_ ,direction="""inputs""" ) return common_inputs @property def A__ ( self: Optional[Any] ) -> int: return 13
345
import random from typing import Any def lowerCamelCase_ ( _a : list ): '''simple docstring''' for _ in range(len(_a ) ): UpperCAmelCase_ : Tuple = random.randint(0 , len(_a ) - 1 ) UpperCAmelCase_ : List[Any] = random.randint(0 , len(_a ) - 1 ) UpperCAmelCase_ , UpperCAmelCase_ : int = data[b], data[a] return data if __name__ == "__main__": UpperCamelCase_ = [0, 1, 2, 3, 4, 5, 6, 7] UpperCamelCase_ = ['''python''', '''says''', '''hello''', '''!'''] print('''Fisher-Yates Shuffle:''') print('''List''', integers, strings) print('''FY Shuffle''', fisher_yates_shuffle(integers), fisher_yates_shuffle(strings))
345
1
def lowerCamelCase_ ( _a : int , _a : int ): '''simple docstring''' if b == 0: return 1 if (b % 2) == 0: return actual_power(_a , int(b / 2 ) ) * actual_power(_a , int(b / 2 ) ) else: return a * actual_power(_a , int(b / 2 ) ) * actual_power(_a , int(b / 2 ) ) def lowerCamelCase_ ( _a : int , _a : int ): '''simple docstring''' if b < 0: return 1 / actual_power(_a , _a ) return actual_power(_a , _a ) if __name__ == "__main__": print(power(-2, -3))
345
import flax.linen as nn import jax.numpy as jnp from .attention_flax import FlaxTransformeraDModel from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD class _snake_case ( nn.Module ): '''simple docstring''' A__ : int A__ : int A__ : float = 0.0 A__ : int = 1 A__ : int = 1 A__ : bool = True A__ : bool = False A__ : bool = False A__ : bool = False A__ : jnp.dtype = jnp.floataa def A__ ( self: Dict ) -> List[str]: UpperCAmelCase_ : Optional[int] = [] UpperCAmelCase_ : Optional[int] = [] for i in range(self.num_layers ): UpperCAmelCase_ : List[Any] = self.in_channels if i == 0 else self.out_channels UpperCAmelCase_ : List[Any] = FlaxResnetBlockaD( in_channels=lowerCamelCase_ ,out_channels=self.out_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,) resnets.append(lowerCamelCase_ ) UpperCAmelCase_ : Union[str, Any] = FlaxTransformeraDModel( in_channels=self.out_channels ,n_heads=self.num_attention_heads ,d_head=self.out_channels // self.num_attention_heads ,depth=1 ,use_linear_projection=self.use_linear_projection ,only_cross_attention=self.only_cross_attention ,use_memory_efficient_attention=self.use_memory_efficient_attention ,dtype=self.dtype ,) attentions.append(lowerCamelCase_ ) UpperCAmelCase_ : int = resnets UpperCAmelCase_ : Tuple = attentions if self.add_downsample: UpperCAmelCase_ : List[Any] = FlaxDownsampleaD(self.out_channels ,dtype=self.dtype ) def __call__( self: Optional[Any] ,lowerCamelCase_: Optional[int] ,lowerCamelCase_: str ,lowerCamelCase_: Optional[int] ,lowerCamelCase_: int=True ) -> int: UpperCAmelCase_ : List[Any] = () for resnet, attn in zip(self.resnets ,self.attentions ): UpperCAmelCase_ : str = resnet(lowerCamelCase_ ,lowerCamelCase_ ,deterministic=lowerCamelCase_ ) UpperCAmelCase_ : Union[str, Any] = attn(lowerCamelCase_ ,lowerCamelCase_ ,deterministic=lowerCamelCase_ ) output_states += (hidden_states,) if self.add_downsample: UpperCAmelCase_ : List[Any] = self.downsamplers_a(lowerCamelCase_ ) output_states += (hidden_states,) return hidden_states, output_states class _snake_case ( nn.Module ): '''simple docstring''' A__ : int A__ : int A__ : float = 0.0 A__ : int = 1 A__ : bool = True A__ : jnp.dtype = jnp.floataa def A__ ( self: Dict ) -> int: UpperCAmelCase_ : List[str] = [] for i in range(self.num_layers ): UpperCAmelCase_ : int = self.in_channels if i == 0 else self.out_channels UpperCAmelCase_ : Dict = FlaxResnetBlockaD( in_channels=lowerCamelCase_ ,out_channels=self.out_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,) resnets.append(lowerCamelCase_ ) UpperCAmelCase_ : Union[str, Any] = resnets if self.add_downsample: UpperCAmelCase_ : List[str] = FlaxDownsampleaD(self.out_channels ,dtype=self.dtype ) def __call__( self: Any ,lowerCamelCase_: List[Any] ,lowerCamelCase_: Any ,lowerCamelCase_: List[Any]=True ) -> Any: UpperCAmelCase_ : Union[str, Any] = () for resnet in self.resnets: UpperCAmelCase_ : Tuple = resnet(lowerCamelCase_ ,lowerCamelCase_ ,deterministic=lowerCamelCase_ ) output_states += (hidden_states,) if self.add_downsample: UpperCAmelCase_ : List[str] = self.downsamplers_a(lowerCamelCase_ ) output_states += (hidden_states,) return hidden_states, output_states class _snake_case ( nn.Module ): '''simple docstring''' A__ : int A__ : int A__ : int A__ : float = 0.0 A__ : int = 1 A__ : int = 1 A__ : bool = True A__ : bool = False A__ : bool = False A__ : bool = False A__ : jnp.dtype = jnp.floataa def A__ ( self: str ) -> Any: UpperCAmelCase_ : Dict = [] UpperCAmelCase_ : List[str] = [] for i in range(self.num_layers ): UpperCAmelCase_ : int = self.in_channels if (i == self.num_layers - 1) else self.out_channels UpperCAmelCase_ : int = self.prev_output_channel if i == 0 else self.out_channels UpperCAmelCase_ : Optional[Any] = FlaxResnetBlockaD( in_channels=resnet_in_channels + res_skip_channels ,out_channels=self.out_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,) resnets.append(lowerCamelCase_ ) UpperCAmelCase_ : int = FlaxTransformeraDModel( in_channels=self.out_channels ,n_heads=self.num_attention_heads ,d_head=self.out_channels // self.num_attention_heads ,depth=1 ,use_linear_projection=self.use_linear_projection ,only_cross_attention=self.only_cross_attention ,use_memory_efficient_attention=self.use_memory_efficient_attention ,dtype=self.dtype ,) attentions.append(lowerCamelCase_ ) UpperCAmelCase_ : List[str] = resnets UpperCAmelCase_ : Dict = attentions if self.add_upsample: UpperCAmelCase_ : Optional[Any] = FlaxUpsampleaD(self.out_channels ,dtype=self.dtype ) def __call__( self: Optional[int] ,lowerCamelCase_: List[Any] ,lowerCamelCase_: int ,lowerCamelCase_: Any ,lowerCamelCase_: str ,lowerCamelCase_: List[str]=True ) -> List[str]: for resnet, attn in zip(self.resnets ,self.attentions ): # pop res hidden states UpperCAmelCase_ : List[str] = res_hidden_states_tuple[-1] UpperCAmelCase_ : Union[str, Any] = res_hidden_states_tuple[:-1] UpperCAmelCase_ : Optional[Any] = jnp.concatenate((hidden_states, res_hidden_states) ,axis=-1 ) UpperCAmelCase_ : Tuple = resnet(lowerCamelCase_ ,lowerCamelCase_ ,deterministic=lowerCamelCase_ ) UpperCAmelCase_ : List[Any] = attn(lowerCamelCase_ ,lowerCamelCase_ ,deterministic=lowerCamelCase_ ) if self.add_upsample: UpperCAmelCase_ : Dict = self.upsamplers_a(lowerCamelCase_ ) return hidden_states class _snake_case ( nn.Module ): '''simple docstring''' A__ : int A__ : int A__ : int A__ : float = 0.0 A__ : int = 1 A__ : bool = True A__ : jnp.dtype = jnp.floataa def A__ ( self: Dict ) -> Dict: UpperCAmelCase_ : Any = [] for i in range(self.num_layers ): UpperCAmelCase_ : str = self.in_channels if (i == self.num_layers - 1) else self.out_channels UpperCAmelCase_ : Optional[int] = self.prev_output_channel if i == 0 else self.out_channels UpperCAmelCase_ : Any = FlaxResnetBlockaD( in_channels=resnet_in_channels + res_skip_channels ,out_channels=self.out_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,) resnets.append(lowerCamelCase_ ) UpperCAmelCase_ : str = resnets if self.add_upsample: UpperCAmelCase_ : Union[str, Any] = FlaxUpsampleaD(self.out_channels ,dtype=self.dtype ) def __call__( self: Dict ,lowerCamelCase_: Dict ,lowerCamelCase_: List[Any] ,lowerCamelCase_: Tuple ,lowerCamelCase_: Any=True ) -> List[str]: for resnet in self.resnets: # pop res hidden states UpperCAmelCase_ : Dict = res_hidden_states_tuple[-1] UpperCAmelCase_ : str = res_hidden_states_tuple[:-1] UpperCAmelCase_ : List[Any] = jnp.concatenate((hidden_states, res_hidden_states) ,axis=-1 ) UpperCAmelCase_ : List[str] = resnet(lowerCamelCase_ ,lowerCamelCase_ ,deterministic=lowerCamelCase_ ) if self.add_upsample: UpperCAmelCase_ : Optional[Any] = self.upsamplers_a(lowerCamelCase_ ) return hidden_states class _snake_case ( nn.Module ): '''simple docstring''' A__ : int A__ : float = 0.0 A__ : int = 1 A__ : int = 1 A__ : bool = False A__ : bool = False A__ : jnp.dtype = jnp.floataa def A__ ( self: Dict ) -> List[str]: # there is always at least one resnet UpperCAmelCase_ : List[Any] = [ FlaxResnetBlockaD( in_channels=self.in_channels ,out_channels=self.in_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,) ] UpperCAmelCase_ : Any = [] for _ in range(self.num_layers ): UpperCAmelCase_ : Optional[Any] = FlaxTransformeraDModel( in_channels=self.in_channels ,n_heads=self.num_attention_heads ,d_head=self.in_channels // self.num_attention_heads ,depth=1 ,use_linear_projection=self.use_linear_projection ,use_memory_efficient_attention=self.use_memory_efficient_attention ,dtype=self.dtype ,) attentions.append(lowerCamelCase_ ) UpperCAmelCase_ : Any = FlaxResnetBlockaD( in_channels=self.in_channels ,out_channels=self.in_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,) resnets.append(lowerCamelCase_ ) UpperCAmelCase_ : Dict = resnets UpperCAmelCase_ : Any = attentions def __call__( self: str ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: str ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: Union[str, Any]=True ) -> List[Any]: UpperCAmelCase_ : List[Any] = self.resnets[0](lowerCamelCase_ ,lowerCamelCase_ ) for attn, resnet in zip(self.attentions ,self.resnets[1:] ): UpperCAmelCase_ : Optional[Any] = attn(lowerCamelCase_ ,lowerCamelCase_ ,deterministic=lowerCamelCase_ ) UpperCAmelCase_ : Union[str, Any] = resnet(lowerCamelCase_ ,lowerCamelCase_ ,deterministic=lowerCamelCase_ ) return hidden_states
345
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available UpperCamelCase_ = { '''configuration_gpt_neo''': ['''GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTNeoConfig''', '''GPTNeoOnnxConfig'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase_ = [ '''GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST''', '''GPTNeoForCausalLM''', '''GPTNeoForQuestionAnswering''', '''GPTNeoForSequenceClassification''', '''GPTNeoForTokenClassification''', '''GPTNeoModel''', '''GPTNeoPreTrainedModel''', '''load_tf_weights_in_gpt_neo''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase_ = [ '''FlaxGPTNeoForCausalLM''', '''FlaxGPTNeoModel''', '''FlaxGPTNeoPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_gpt_neo import GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoConfig, GPTNeoOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_gpt_neo import ( GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST, GPTNeoForCausalLM, GPTNeoForQuestionAnswering, GPTNeoForSequenceClassification, GPTNeoForTokenClassification, GPTNeoModel, GPTNeoPreTrainedModel, load_tf_weights_in_gpt_neo, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_gpt_neo import FlaxGPTNeoForCausalLM, FlaxGPTNeoModel, FlaxGPTNeoPreTrainedModel else: import sys UpperCamelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
345
import pickle import numpy as np from matplotlib import pyplot as plt class _snake_case : '''simple docstring''' def __init__( self: Any ,lowerCamelCase_: Dict ,lowerCamelCase_: Tuple ,lowerCamelCase_: Dict ,lowerCamelCase_: Tuple ,lowerCamelCase_: Any ,lowerCamelCase_: Tuple=0.2 ,lowerCamelCase_: Union[str, Any]=0.2 ) -> List[str]: UpperCAmelCase_ : List[Any] = bp_numa UpperCAmelCase_ : str = bp_numa UpperCAmelCase_ : List[Any] = bp_numa UpperCAmelCase_ : Optional[int] = conva_get[:2] UpperCAmelCase_ : List[Any] = conva_get[2] UpperCAmelCase_ : str = size_pa UpperCAmelCase_ : Optional[int] = rate_w UpperCAmelCase_ : Dict = rate_t UpperCAmelCase_ : List[Any] = [ np.mat(-1 * np.random.rand(self.conva[0] ,self.conva[0] ) + 0.5 ) for i in range(self.conva[1] ) ] UpperCAmelCase_ : int = np.mat(-1 * np.random.rand(self.num_bpa ,self.num_bpa ) + 0.5 ) UpperCAmelCase_ : int = np.mat(-1 * np.random.rand(self.num_bpa ,self.num_bpa ) + 0.5 ) UpperCAmelCase_ : Dict = -2 * np.random.rand(self.conva[1] ) + 1 UpperCAmelCase_ : str = -2 * np.random.rand(self.num_bpa ) + 1 UpperCAmelCase_ : Union[str, Any] = -2 * np.random.rand(self.num_bpa ) + 1 def A__ ( self: str ,lowerCamelCase_: Optional[Any] ) -> Tuple: # save model dict with pickle UpperCAmelCase_ : Dict = { """num_bp1""": self.num_bpa, """num_bp2""": self.num_bpa, """num_bp3""": self.num_bpa, """conv1""": self.conva, """step_conv1""": self.step_conva, """size_pooling1""": self.size_poolinga, """rate_weight""": self.rate_weight, """rate_thre""": self.rate_thre, """w_conv1""": self.w_conva, """wkj""": self.wkj, """vji""": self.vji, """thre_conv1""": self.thre_conva, """thre_bp2""": self.thre_bpa, """thre_bp3""": self.thre_bpa, } with open(lowerCamelCase_ ,"""wb""" ) as f: pickle.dump(lowerCamelCase_ ,lowerCamelCase_ ) print(F'''Model saved: {save_path}''' ) @classmethod def A__ ( cls: List[str] ,lowerCamelCase_: str ) -> List[str]: # read saved model with open(lowerCamelCase_ ,"""rb""" ) as f: UpperCAmelCase_ : Any = pickle.load(lowerCamelCase_ ) # noqa: S301 UpperCAmelCase_ : Union[str, Any] = model_dic.get("""conv1""" ) conv_get.append(model_dic.get("""step_conv1""" ) ) UpperCAmelCase_ : List[str] = model_dic.get("""size_pooling1""" ) UpperCAmelCase_ : Tuple = model_dic.get("""num_bp1""" ) UpperCAmelCase_ : Optional[Any] = model_dic.get("""num_bp2""" ) UpperCAmelCase_ : List[str] = model_dic.get("""num_bp3""" ) UpperCAmelCase_ : List[Any] = model_dic.get("""rate_weight""" ) UpperCAmelCase_ : Dict = model_dic.get("""rate_thre""" ) # create model instance UpperCAmelCase_ : List[Any] = CNN(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ) # modify model parameter UpperCAmelCase_ : Any = model_dic.get("""w_conv1""" ) UpperCAmelCase_ : int = model_dic.get("""wkj""" ) UpperCAmelCase_ : int = model_dic.get("""vji""" ) UpperCAmelCase_ : Optional[int] = model_dic.get("""thre_conv1""" ) UpperCAmelCase_ : List[str] = model_dic.get("""thre_bp2""" ) UpperCAmelCase_ : Dict = model_dic.get("""thre_bp3""" ) return conv_ins def A__ ( self: List[Any] ,lowerCamelCase_: Union[str, Any] ) -> Tuple: return 1 / (1 + np.exp(-1 * x )) def A__ ( self: Union[str, Any] ,lowerCamelCase_: Union[str, Any] ) -> Optional[Any]: return round(lowerCamelCase_ ,3 ) def A__ ( self: Tuple ,lowerCamelCase_: Any ,lowerCamelCase_: List[str] ,lowerCamelCase_: str ,lowerCamelCase_: Any ,lowerCamelCase_: Union[str, Any] ) -> Any: # convolution process UpperCAmelCase_ : Optional[Any] = convs[0] UpperCAmelCase_ : int = convs[1] UpperCAmelCase_ : int = np.shape(lowerCamelCase_ )[0] # get the data slice of original image data, data_focus UpperCAmelCase_ : Dict = [] for i_focus in range(0 ,size_data - size_conv + 1 ,lowerCamelCase_ ): for j_focus in range(0 ,size_data - size_conv + 1 ,lowerCamelCase_ ): UpperCAmelCase_ : Union[str, Any] = data[ i_focus : i_focus + size_conv, j_focus : j_focus + size_conv ] data_focus.append(lowerCamelCase_ ) # calculate the feature map of every single kernel, and saved as list of matrix UpperCAmelCase_ : Any = [] UpperCAmelCase_ : Tuple = int((size_data - size_conv) / conv_step + 1 ) for i_map in range(lowerCamelCase_ ): UpperCAmelCase_ : Optional[int] = [] for i_focus in range(len(lowerCamelCase_ ) ): UpperCAmelCase_ : int = ( np.sum(np.multiply(data_focus[i_focus] ,w_convs[i_map] ) ) - thre_convs[i_map] ) featuremap.append(self.sig(lowerCamelCase_ ) ) UpperCAmelCase_ : Union[str, Any] = np.asmatrix(lowerCamelCase_ ).reshape( lowerCamelCase_ ,lowerCamelCase_ ) data_featuremap.append(lowerCamelCase_ ) # expanding the data slice to One dimenssion UpperCAmelCase_ : Optional[Any] = [] for each_focus in data_focus: focusa_list.extend(self.Expand_Mat(lowerCamelCase_ ) ) UpperCAmelCase_ : Optional[int] = np.asarray(lowerCamelCase_ ) return focus_list, data_featuremap def A__ ( self: Tuple ,lowerCamelCase_: Optional[int] ,lowerCamelCase_: Tuple ,lowerCamelCase_: Optional[Any]="average_pool" ) -> List[Any]: # pooling process UpperCAmelCase_ : Optional[Any] = len(featuremaps[0] ) UpperCAmelCase_ : Any = int(size_map / size_pooling ) UpperCAmelCase_ : Optional[int] = [] for i_map in range(len(lowerCamelCase_ ) ): UpperCAmelCase_ : Any = featuremaps[i_map] UpperCAmelCase_ : Tuple = [] for i_focus in range(0 ,lowerCamelCase_ ,lowerCamelCase_ ): for j_focus in range(0 ,lowerCamelCase_ ,lowerCamelCase_ ): UpperCAmelCase_ : str = feature_map[ i_focus : i_focus + size_pooling, j_focus : j_focus + size_pooling, ] if pooling_type == "average_pool": # average pooling map_pooled.append(np.average(lowerCamelCase_ ) ) elif pooling_type == "max_pooling": # max pooling map_pooled.append(np.max(lowerCamelCase_ ) ) UpperCAmelCase_ : int = np.asmatrix(lowerCamelCase_ ).reshape(lowerCamelCase_ ,lowerCamelCase_ ) featuremap_pooled.append(lowerCamelCase_ ) return featuremap_pooled def A__ ( self: Union[str, Any] ,lowerCamelCase_: Tuple ) -> Optional[int]: # expanding three dimension data to one dimension list UpperCAmelCase_ : List[Any] = [] for i in range(len(lowerCamelCase_ ) ): UpperCAmelCase_ : Tuple = np.shape(data[i] ) UpperCAmelCase_ : Optional[int] = data[i].reshape(1 ,shapes[0] * shapes[1] ) UpperCAmelCase_ : Optional[int] = data_listed.getA().tolist()[0] data_expanded.extend(lowerCamelCase_ ) UpperCAmelCase_ : int = np.asarray(lowerCamelCase_ ) return data_expanded def A__ ( self: Optional[Any] ,lowerCamelCase_: Optional[int] ) -> Union[str, Any]: # expanding matrix to one dimension list UpperCAmelCase_ : List[Any] = np.asarray(lowerCamelCase_ ) UpperCAmelCase_ : str = np.shape(lowerCamelCase_ ) UpperCAmelCase_ : Dict = data_mat.reshape(1 ,shapes[0] * shapes[1] ) return data_expanded def A__ ( self: str ,lowerCamelCase_: Dict ,lowerCamelCase_: int ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: Any ) -> Union[str, Any]: UpperCAmelCase_ : Any = [] UpperCAmelCase_ : Tuple = 0 for i_map in range(lowerCamelCase_ ): UpperCAmelCase_ : Optional[Any] = np.ones((size_map, size_map) ) for i in range(0 ,lowerCamelCase_ ,lowerCamelCase_ ): for j in range(0 ,lowerCamelCase_ ,lowerCamelCase_ ): UpperCAmelCase_ : Any = pd_pool[ i_pool ] UpperCAmelCase_ : List[str] = i_pool + 1 UpperCAmelCase_ : Optional[Any] = np.multiply( lowerCamelCase_ ,np.multiply(out_map[i_map] ,(1 - out_map[i_map]) ) ) pd_all.append(lowerCamelCase_ ) return pd_all def A__ ( self: str ,lowerCamelCase_: int ,lowerCamelCase_: int ,lowerCamelCase_: List[Any] ,lowerCamelCase_: Any ,lowerCamelCase_: List[str] ,lowerCamelCase_: Any=bool ) -> Optional[int]: # model traning print("""----------------------Start Training-------------------------""" ) print((""" - - Shape: Train_Data """, np.shape(lowerCamelCase_ )) ) print((""" - - Shape: Teach_Data """, np.shape(lowerCamelCase_ )) ) UpperCAmelCase_ : str = 0 UpperCAmelCase_ : Tuple = [] UpperCAmelCase_ : Any = 10000 while rp < n_repeat and mse >= error_accuracy: UpperCAmelCase_ : List[str] = 0 print(F'''-------------Learning Time {rp}--------------''' ) for p in range(len(lowerCamelCase_ ) ): # print('------------Learning Image: %d--------------'%p) UpperCAmelCase_ : str = np.asmatrix(datas_train[p] ) UpperCAmelCase_ : Optional[Any] = np.asarray(datas_teach[p] ) UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = self.convolute( lowerCamelCase_ ,self.conva ,self.w_conva ,self.thre_conva ,conv_step=self.step_conva ,) UpperCAmelCase_ : List[Any] = self.pooling(lowerCamelCase_ ,self.size_poolinga ) UpperCAmelCase_ : int = np.shape(lowerCamelCase_ ) UpperCAmelCase_ : Dict = self._expand(lowerCamelCase_ ) UpperCAmelCase_ : Union[str, Any] = data_bp_input UpperCAmelCase_ : Optional[Any] = np.dot(lowerCamelCase_ ,self.vji.T ) - self.thre_bpa UpperCAmelCase_ : int = self.sig(lowerCamelCase_ ) UpperCAmelCase_ : Union[str, Any] = np.dot(lowerCamelCase_ ,self.wkj.T ) - self.thre_bpa UpperCAmelCase_ : Optional[Any] = self.sig(lowerCamelCase_ ) # --------------Model Leaning ------------------------ # calculate error and gradient--------------- UpperCAmelCase_ : List[str] = np.multiply( (data_teach - bp_outa) ,np.multiply(lowerCamelCase_ ,(1 - bp_outa) ) ) UpperCAmelCase_ : List[Any] = np.multiply( np.dot(lowerCamelCase_ ,self.wkj ) ,np.multiply(lowerCamelCase_ ,(1 - bp_outa) ) ) UpperCAmelCase_ : Any = np.dot(lowerCamelCase_ ,self.vji ) UpperCAmelCase_ : Tuple = pd_i_all / (self.size_poolinga * self.size_poolinga) UpperCAmelCase_ : List[str] = pd_conva_pooled.T.getA().tolist() UpperCAmelCase_ : str = self._calculate_gradient_from_pool( lowerCamelCase_ ,lowerCamelCase_ ,shape_featuremapa[0] ,shape_featuremapa[1] ,self.size_poolinga ,) # weight and threshold learning process--------- # convolution layer for k_conv in range(self.conva[1] ): UpperCAmelCase_ : List[str] = self._expand_mat(pd_conva_all[k_conv] ) UpperCAmelCase_ : Optional[Any] = self.rate_weight * np.dot(lowerCamelCase_ ,lowerCamelCase_ ) UpperCAmelCase_ : int = self.w_conva[k_conv] + delta_w.reshape( (self.conva[0], self.conva[0]) ) UpperCAmelCase_ : str = ( self.thre_conva[k_conv] - np.sum(pd_conva_all[k_conv] ) * self.rate_thre ) # all connected layer UpperCAmelCase_ : int = self.wkj + pd_k_all.T * bp_outa * self.rate_weight UpperCAmelCase_ : Tuple = self.vji + pd_j_all.T * bp_outa * self.rate_weight UpperCAmelCase_ : int = self.thre_bpa - pd_k_all * self.rate_thre UpperCAmelCase_ : str = self.thre_bpa - pd_j_all * self.rate_thre # calculate the sum error of all single image UpperCAmelCase_ : int = np.sum(abs(data_teach - bp_outa ) ) error_count += errors # print(' ----Teach ',data_teach) # print(' ----BP_output ',bp_out3) UpperCAmelCase_ : int = rp + 1 UpperCAmelCase_ : Any = error_count / patterns all_mse.append(lowerCamelCase_ ) def draw_error(): UpperCAmelCase_ : Any = [error_accuracy for i in range(int(n_repeat * 1.2 ) )] plt.plot(lowerCamelCase_ ,"""+-""" ) plt.plot(lowerCamelCase_ ,"""r--""" ) plt.xlabel("""Learning Times""" ) plt.ylabel("""All_mse""" ) plt.grid(lowerCamelCase_ ,alpha=0.5 ) plt.show() print("""------------------Training Complished---------------------""" ) print((""" - - Training epoch: """, rp, F''' - - Mse: {mse:.6f}''') ) if draw_e: draw_error() return mse def A__ ( self: Optional[int] ,lowerCamelCase_: Any ) -> Tuple: # model predict UpperCAmelCase_ : Union[str, Any] = [] print("""-------------------Start Testing-------------------------""" ) print((""" - - Shape: Test_Data """, np.shape(lowerCamelCase_ )) ) for p in range(len(lowerCamelCase_ ) ): UpperCAmelCase_ : int = np.asmatrix(datas_test[p] ) UpperCAmelCase_ , UpperCAmelCase_ : List[str] = self.convolute( lowerCamelCase_ ,self.conva ,self.w_conva ,self.thre_conva ,conv_step=self.step_conva ,) UpperCAmelCase_ : Optional[Any] = self.pooling(lowerCamelCase_ ,self.size_poolinga ) UpperCAmelCase_ : str = self._expand(lowerCamelCase_ ) UpperCAmelCase_ : str = data_bp_input UpperCAmelCase_ : Union[str, Any] = bp_outa * self.vji.T - self.thre_bpa UpperCAmelCase_ : Optional[int] = self.sig(lowerCamelCase_ ) UpperCAmelCase_ : Tuple = bp_outa * self.wkj.T - self.thre_bpa UpperCAmelCase_ : List[Any] = self.sig(lowerCamelCase_ ) produce_out.extend(bp_outa.getA().tolist() ) UpperCAmelCase_ : int = [list(map(self.do_round ,lowerCamelCase_ ) ) for each in produce_out] return np.asarray(lowerCamelCase_ ) def A__ ( self: Optional[Any] ,lowerCamelCase_: Dict ) -> Tuple: # return the data of image after convoluting process so we can check it out UpperCAmelCase_ : Optional[int] = np.asmatrix(lowerCamelCase_ ) UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = self.convolute( lowerCamelCase_ ,self.conva ,self.w_conva ,self.thre_conva ,conv_step=self.step_conva ,) UpperCAmelCase_ : Dict = self.pooling(lowerCamelCase_ ,self.size_poolinga ) return data_conveda, data_pooleda if __name__ == "__main__": pass
345
1
import unittest from queue import Empty from threading import Thread from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available from transformers.testing_utils import CaptureStdout, require_torch, torch_device from ..test_modeling_common import ids_tensor if is_torch_available(): import torch from transformers import AutoModelForCausalLM @require_torch class _snake_case ( unittest.TestCase ): '''simple docstring''' def A__ ( self: Optional[int] ) -> Any: UpperCAmelCase_ : List[str] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ) UpperCAmelCase_ : Union[str, Any] = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(lowerCamelCase_ ) UpperCAmelCase_ : str = -1 UpperCAmelCase_ : Dict = ids_tensor((1, 5) ,vocab_size=model.config.vocab_size ).to(lowerCamelCase_ ) UpperCAmelCase_ : Union[str, Any] = model.generate(lowerCamelCase_ ,max_new_tokens=10 ,do_sample=lowerCamelCase_ ) UpperCAmelCase_ : Any = tokenizer.decode(greedy_ids[0] ) with CaptureStdout() as cs: UpperCAmelCase_ : List[Any] = TextStreamer(lowerCamelCase_ ) model.generate(lowerCamelCase_ ,max_new_tokens=10 ,do_sample=lowerCamelCase_ ,streamer=lowerCamelCase_ ) # The greedy text should be printed to stdout, except for the final "\n" in the streamer UpperCAmelCase_ : Optional[int] = cs.out[:-1] self.assertEqual(lowerCamelCase_ ,lowerCamelCase_ ) def A__ ( self: Dict ) -> Optional[Any]: UpperCAmelCase_ : str = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ) UpperCAmelCase_ : Optional[Any] = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(lowerCamelCase_ ) UpperCAmelCase_ : Optional[int] = -1 UpperCAmelCase_ : List[Any] = ids_tensor((1, 5) ,vocab_size=model.config.vocab_size ).to(lowerCamelCase_ ) UpperCAmelCase_ : List[str] = model.generate(lowerCamelCase_ ,max_new_tokens=10 ,do_sample=lowerCamelCase_ ) UpperCAmelCase_ : Dict = tokenizer.decode(greedy_ids[0] ) UpperCAmelCase_ : str = TextIteratorStreamer(lowerCamelCase_ ) UpperCAmelCase_ : Optional[int] = {"""input_ids""": input_ids, """max_new_tokens""": 10, """do_sample""": False, """streamer""": streamer} UpperCAmelCase_ : str = Thread(target=model.generate ,kwargs=lowerCamelCase_ ) thread.start() UpperCAmelCase_ : int = """""" for new_text in streamer: streamer_text += new_text self.assertEqual(lowerCamelCase_ ,lowerCamelCase_ ) def A__ ( self: List[Any] ) -> Dict: UpperCAmelCase_ : List[Any] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ) UpperCAmelCase_ : Optional[Any] = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(lowerCamelCase_ ) UpperCAmelCase_ : Optional[int] = -1 UpperCAmelCase_ : Tuple = ids_tensor((1, 5) ,vocab_size=model.config.vocab_size ).to(lowerCamelCase_ ) UpperCAmelCase_ : Dict = model.generate(lowerCamelCase_ ,max_new_tokens=10 ,do_sample=lowerCamelCase_ ) UpperCAmelCase_ : str = greedy_ids[:, input_ids.shape[1] :] UpperCAmelCase_ : Dict = tokenizer.decode(new_greedy_ids[0] ) with CaptureStdout() as cs: UpperCAmelCase_ : List[Any] = TextStreamer(lowerCamelCase_ ,skip_prompt=lowerCamelCase_ ) model.generate(lowerCamelCase_ ,max_new_tokens=10 ,do_sample=lowerCamelCase_ ,streamer=lowerCamelCase_ ) # The greedy text should be printed to stdout, except for the final "\n" in the streamer UpperCAmelCase_ : List[str] = cs.out[:-1] self.assertEqual(lowerCamelCase_ ,lowerCamelCase_ ) def A__ ( self: str ) -> str: # Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested # with actual models -- the dummy models' tokenizers are not aligned with their models, and # `skip_special_tokens=True` has no effect on them UpperCAmelCase_ : Union[str, Any] = AutoTokenizer.from_pretrained("""distilgpt2""" ) UpperCAmelCase_ : Optional[Any] = AutoModelForCausalLM.from_pretrained("""distilgpt2""" ).to(lowerCamelCase_ ) UpperCAmelCase_ : Any = -1 UpperCAmelCase_ : Union[str, Any] = torch.ones((1, 5) ,device=lowerCamelCase_ ).long() * model.config.bos_token_id with CaptureStdout() as cs: UpperCAmelCase_ : Union[str, Any] = TextStreamer(lowerCamelCase_ ,skip_special_tokens=lowerCamelCase_ ) model.generate(lowerCamelCase_ ,max_new_tokens=1 ,do_sample=lowerCamelCase_ ,streamer=lowerCamelCase_ ) # The prompt contains a special token, so the streamer should not print it. As such, the output text, when # re-tokenized, must only contain one token UpperCAmelCase_ : List[str] = cs.out[:-1] # Remove the final "\n" UpperCAmelCase_ : Dict = tokenizer(lowerCamelCase_ ,return_tensors="""pt""" ) self.assertEqual(streamer_text_tokenized.input_ids.shape ,(1, 1) ) def A__ ( self: List[str] ) -> Any: UpperCAmelCase_ : List[Any] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ) UpperCAmelCase_ : Any = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(lowerCamelCase_ ) UpperCAmelCase_ : List[str] = -1 UpperCAmelCase_ : Optional[Any] = ids_tensor((1, 5) ,vocab_size=model.config.vocab_size ).to(lowerCamelCase_ ) UpperCAmelCase_ : Optional[int] = TextIteratorStreamer(lowerCamelCase_ ,timeout=0.0_0_1 ) UpperCAmelCase_ : Any = {"""input_ids""": input_ids, """max_new_tokens""": 10, """do_sample""": False, """streamer""": streamer} UpperCAmelCase_ : Dict = Thread(target=model.generate ,kwargs=lowerCamelCase_ ) thread.start() # The streamer will timeout after 0.001 seconds, so an exception will be raised with self.assertRaises(lowerCamelCase_ ): UpperCAmelCase_ : Union[str, Any] = """""" for new_text in streamer: streamer_text += new_text
345
import json import os import unittest from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer from ...test_tokenization_common import TokenizerTesterMixin class _snake_case ( __snake_case , unittest.TestCase ): '''simple docstring''' A__ : Optional[Any] = CTRLTokenizer A__ : Optional[Any] = False A__ : str = False def A__ ( self: Optional[int] ) -> List[Any]: super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt UpperCAmelCase_ : Dict = ["""adapt""", """re@@""", """a@@""", """apt""", """c@@""", """t""", """<unk>"""] UpperCAmelCase_ : Union[str, Any] = dict(zip(lowerCamelCase_ ,range(len(lowerCamelCase_ ) ) ) ) UpperCAmelCase_ : List[Any] = ["""#version: 0.2""", """a p""", """ap t</w>""", """r e""", """a d""", """ad apt</w>""", """"""] UpperCAmelCase_ : Optional[Any] = {"""unk_token""": """<unk>"""} UpperCAmelCase_ : Union[str, Any] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""vocab_file"""] ) UpperCAmelCase_ : Optional[Any] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""merges_file"""] ) with open(self.vocab_file ,"""w""" ,encoding="""utf-8""" ) as fp: fp.write(json.dumps(lowerCamelCase_ ) + """\n""" ) with open(self.merges_file ,"""w""" ,encoding="""utf-8""" ) as fp: fp.write("""\n""".join(lowerCamelCase_ ) ) def A__ ( self: Optional[int] ,**lowerCamelCase_: Any ) -> str: kwargs.update(self.special_tokens_map ) return CTRLTokenizer.from_pretrained(self.tmpdirname ,**lowerCamelCase_ ) def A__ ( self: int ,lowerCamelCase_: int ) -> str: UpperCAmelCase_ : List[str] = """adapt react readapt apt""" UpperCAmelCase_ : List[Any] = """adapt react readapt apt""" return input_text, output_text def A__ ( self: Union[str, Any] ) -> Optional[int]: UpperCAmelCase_ : Union[str, Any] = CTRLTokenizer(self.vocab_file ,self.merges_file ,**self.special_tokens_map ) UpperCAmelCase_ : List[Any] = """adapt react readapt apt""" UpperCAmelCase_ : Optional[int] = """adapt re@@ a@@ c@@ t re@@ adapt apt""".split() UpperCAmelCase_ : Tuple = tokenizer.tokenize(lowerCamelCase_ ) self.assertListEqual(lowerCamelCase_ ,lowerCamelCase_ ) UpperCAmelCase_ : Union[str, Any] = tokens + [tokenizer.unk_token] UpperCAmelCase_ : List[str] = [0, 1, 2, 4, 5, 1, 0, 3, 6] self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase_ ) ,lowerCamelCase_ )
345
1
from typing import List, Optional, Union import torch from ...models import UNetaDConditionModel, VQModel from ...pipelines import DiffusionPipeline from ...pipelines.pipeline_utils import ImagePipelineOutput from ...schedulers import DDPMScheduler from ...utils import ( is_accelerate_available, is_accelerate_version, logging, randn_tensor, replace_example_docstring, ) UpperCamelCase_ = logging.get_logger(__name__) # pylint: disable=invalid-name UpperCamelCase_ = ''' Examples: ```py >>> from diffusers import KandinskyV22Pipeline, KandinskyV22PriorPipeline >>> import torch >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained("kandinsky-community/kandinsky-2-2-prior") >>> pipe_prior.to("cuda") >>> prompt = "red cat, 4k photo" >>> out = pipe_prior(prompt) >>> image_emb = out.image_embeds >>> zero_image_emb = out.negative_image_embeds >>> pipe = KandinskyV22Pipeline.from_pretrained("kandinsky-community/kandinsky-2-2-decoder") >>> pipe.to("cuda") >>> image = pipe( ... image_embeds=image_emb, ... negative_image_embeds=zero_image_emb, ... height=768, ... width=768, ... num_inference_steps=50, ... ).images >>> image[0].save("cat.png") ``` ''' def lowerCamelCase_ ( _a : List[Any] , _a : Tuple , _a : Any=8 ): '''simple docstring''' UpperCAmelCase_ : List[Any] = height // scale_factor**2 if height % scale_factor**2 != 0: new_height += 1 UpperCAmelCase_ : Optional[Any] = width // scale_factor**2 if width % scale_factor**2 != 0: new_width += 1 return new_height * scale_factor, new_width * scale_factor class _snake_case ( __snake_case ): '''simple docstring''' def __init__( self: List[Any] ,lowerCamelCase_: UNetaDConditionModel ,lowerCamelCase_: DDPMScheduler ,lowerCamelCase_: VQModel ,) -> Dict: super().__init__() self.register_modules( unet=lowerCamelCase_ ,scheduler=lowerCamelCase_ ,movq=lowerCamelCase_ ,) UpperCAmelCase_ : Dict = 2 ** (len(self.movq.config.block_out_channels ) - 1) def A__ ( self: List[str] ,lowerCamelCase_: Tuple ,lowerCamelCase_: List[Any] ,lowerCamelCase_: List[str] ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: List[Any] ) -> Optional[int]: if latents is None: UpperCAmelCase_ : int = randn_tensor(lowerCamelCase_ ,generator=lowerCamelCase_ ,device=lowerCamelCase_ ,dtype=lowerCamelCase_ ) else: if latents.shape != shape: raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {shape}''' ) UpperCAmelCase_ : Tuple = latents.to(lowerCamelCase_ ) UpperCAmelCase_ : Union[str, Any] = latents * scheduler.init_noise_sigma return latents def A__ ( self: Any ,lowerCamelCase_: List[Any]=0 ) -> Tuple: if is_accelerate_available(): from accelerate import cpu_offload else: raise ImportError("""Please install accelerate via `pip install accelerate`""" ) UpperCAmelCase_ : Optional[Any] = torch.device(F'''cuda:{gpu_id}''' ) UpperCAmelCase_ : List[str] = [ self.unet, self.movq, ] for cpu_offloaded_model in models: if cpu_offloaded_model is not None: cpu_offload(lowerCamelCase_ ,lowerCamelCase_ ) def A__ ( self: Any ,lowerCamelCase_: Optional[int]=0 ) -> int: if is_accelerate_available() and is_accelerate_version(""">=""" ,"""0.17.0.dev0""" ): from accelerate import cpu_offload_with_hook else: raise ImportError("""`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.""" ) UpperCAmelCase_ : List[str] = torch.device(F'''cuda:{gpu_id}''' ) if self.device.type != "cpu": self.to("""cpu""" ,silence_dtype_warnings=lowerCamelCase_ ) torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist) UpperCAmelCase_ : int = None for cpu_offloaded_model in [self.unet, self.movq]: UpperCAmelCase_ , UpperCAmelCase_ : List[str] = cpu_offload_with_hook(lowerCamelCase_ ,lowerCamelCase_ ,prev_module_hook=lowerCamelCase_ ) # We'll offload the last model manually. UpperCAmelCase_ : Tuple = hook @property # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device def A__ ( self: Optional[int] ) -> Union[str, Any]: if not hasattr(self.unet ,"""_hf_hook""" ): return self.device for module in self.unet.modules(): if ( hasattr(lowerCamelCase_ ,"""_hf_hook""" ) and hasattr(module._hf_hook ,"""execution_device""" ) and module._hf_hook.execution_device is not None ): return torch.device(module._hf_hook.execution_device ) return self.device @torch.no_grad() @replace_example_docstring(lowerCamelCase_ ) def __call__( self: Dict ,lowerCamelCase_: Union[torch.FloatTensor, List[torch.FloatTensor]] ,lowerCamelCase_: Union[torch.FloatTensor, List[torch.FloatTensor]] ,lowerCamelCase_: int = 512 ,lowerCamelCase_: int = 512 ,lowerCamelCase_: int = 100 ,lowerCamelCase_: float = 4.0 ,lowerCamelCase_: int = 1 ,lowerCamelCase_: Optional[Union[torch.Generator, List[torch.Generator]]] = None ,lowerCamelCase_: Optional[torch.FloatTensor] = None ,lowerCamelCase_: Optional[str] = "pil" ,lowerCamelCase_: bool = True ,) -> Optional[Any]: UpperCAmelCase_ : Any = self._execution_device UpperCAmelCase_ : List[str] = guidance_scale > 1.0 if isinstance(lowerCamelCase_ ,lowerCamelCase_ ): UpperCAmelCase_ : List[Any] = torch.cat(lowerCamelCase_ ,dim=0 ) UpperCAmelCase_ : Union[str, Any] = image_embeds.shape[0] * num_images_per_prompt if isinstance(lowerCamelCase_ ,lowerCamelCase_ ): UpperCAmelCase_ : Dict = torch.cat(lowerCamelCase_ ,dim=0 ) if do_classifier_free_guidance: UpperCAmelCase_ : Union[str, Any] = image_embeds.repeat_interleave(lowerCamelCase_ ,dim=0 ) UpperCAmelCase_ : Any = negative_image_embeds.repeat_interleave(lowerCamelCase_ ,dim=0 ) UpperCAmelCase_ : List[Any] = torch.cat([negative_image_embeds, image_embeds] ,dim=0 ).to(dtype=self.unet.dtype ,device=lowerCamelCase_ ) self.scheduler.set_timesteps(lowerCamelCase_ ,device=lowerCamelCase_ ) UpperCAmelCase_ : str = self.scheduler.timesteps UpperCAmelCase_ : List[str] = self.unet.config.in_channels UpperCAmelCase_ , UpperCAmelCase_ : Dict = downscale_height_and_width(lowerCamelCase_ ,lowerCamelCase_ ,self.movq_scale_factor ) # create initial latent UpperCAmelCase_ : Optional[int] = self.prepare_latents( (batch_size, num_channels_latents, height, width) ,image_embeds.dtype ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,self.scheduler ,) for i, t in enumerate(self.progress_bar(lowerCamelCase_ ) ): # expand the latents if we are doing classifier free guidance UpperCAmelCase_ : List[str] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents UpperCAmelCase_ : Union[str, Any] = {"""image_embeds""": image_embeds} UpperCAmelCase_ : Union[str, Any] = self.unet( sample=lowerCamelCase_ ,timestep=lowerCamelCase_ ,encoder_hidden_states=lowerCamelCase_ ,added_cond_kwargs=lowerCamelCase_ ,return_dict=lowerCamelCase_ ,)[0] if do_classifier_free_guidance: UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = noise_pred.split(latents.shape[1] ,dim=1 ) UpperCAmelCase_ , UpperCAmelCase_ : Dict = noise_pred.chunk(2 ) UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = variance_pred.chunk(2 ) UpperCAmelCase_ : List[Any] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) UpperCAmelCase_ : List[str] = torch.cat([noise_pred, variance_pred_text] ,dim=1 ) if not ( hasattr(self.scheduler.config ,"""variance_type""" ) and self.scheduler.config.variance_type in ["learned", "learned_range"] ): UpperCAmelCase_ , UpperCAmelCase_ : Dict = noise_pred.split(latents.shape[1] ,dim=1 ) # compute the previous noisy sample x_t -> x_t-1 UpperCAmelCase_ : Any = self.scheduler.step( lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,generator=lowerCamelCase_ ,)[0] # post-processing UpperCAmelCase_ : Optional[int] = self.movq.decode(lowerCamelCase_ ,force_not_quantize=lowerCamelCase_ )["""sample"""] if output_type not in ["pt", "np", "pil"]: raise ValueError(F'''Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}''' ) if output_type in ["np", "pil"]: UpperCAmelCase_ : Optional[int] = image * 0.5 + 0.5 UpperCAmelCase_ : Dict = image.clamp(0 ,1 ) UpperCAmelCase_ : int = image.cpu().permute(0 ,2 ,3 ,1 ).float().numpy() if output_type == "pil": UpperCAmelCase_ : List[str] = self.numpy_to_pil(lowerCamelCase_ ) if not return_dict: return (image,) return ImagePipelineOutput(images=lowerCamelCase_ )
345
from __future__ import annotations from typing import Dict from ...configuration_utils import PretrainedConfig UpperCamelCase_ = { '''susnato/ernie-m-base_pytorch''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/config.json''', '''susnato/ernie-m-large_pytorch''': '''https://huggingface.co/susnato/ernie-m-large_pytorch/blob/main/config.json''', } class _snake_case ( __snake_case ): '''simple docstring''' A__ : Union[str, Any] = "ernie_m" A__ : Dict[str, str] = {"dropout": "classifier_dropout", "num_classes": "num_labels"} def __init__( self: str ,lowerCamelCase_: int = 250002 ,lowerCamelCase_: int = 768 ,lowerCamelCase_: int = 12 ,lowerCamelCase_: int = 12 ,lowerCamelCase_: int = 3072 ,lowerCamelCase_: str = "gelu" ,lowerCamelCase_: float = 0.1 ,lowerCamelCase_: float = 0.1 ,lowerCamelCase_: int = 514 ,lowerCamelCase_: float = 0.0_2 ,lowerCamelCase_: int = 1 ,lowerCamelCase_: float = 1e-05 ,lowerCamelCase_: Any=None ,lowerCamelCase_: List[Any]=False ,lowerCamelCase_: Tuple=0.0 ,**lowerCamelCase_: Optional[int] ,) -> Optional[Any]: super().__init__(pad_token_id=lowerCamelCase_ ,**lowerCamelCase_ ) UpperCAmelCase_ : Optional[Any] = vocab_size UpperCAmelCase_ : Any = hidden_size UpperCAmelCase_ : Optional[Any] = num_hidden_layers UpperCAmelCase_ : Union[str, Any] = num_attention_heads UpperCAmelCase_ : List[Any] = intermediate_size UpperCAmelCase_ : List[Any] = hidden_act UpperCAmelCase_ : Any = hidden_dropout_prob UpperCAmelCase_ : List[Any] = attention_probs_dropout_prob UpperCAmelCase_ : str = max_position_embeddings UpperCAmelCase_ : Union[str, Any] = initializer_range UpperCAmelCase_ : Union[str, Any] = layer_norm_eps UpperCAmelCase_ : List[Any] = classifier_dropout UpperCAmelCase_ : str = is_decoder UpperCAmelCase_ : List[str] = act_dropout
345
1
import argparse from argparse import Namespace import torch from torch import nn from transformers import XGLMConfig, XGLMForCausalLM def lowerCamelCase_ ( _a : List[Any] ): '''simple docstring''' UpperCAmelCase_ : Optional[int] = [ """decoder.version""", """decoder.output_projection.weight""", """_float_tensor""", """decoder.embed_positions._float_tensor""", ] for k in ignore_keys: state_dict.pop(_a , _a ) def lowerCamelCase_ ( _a : Any ): '''simple docstring''' UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = emb.weight.shape UpperCAmelCase_ : Tuple = nn.Linear(_a , _a , bias=_a ) UpperCAmelCase_ : List[Any] = emb.weight.data return lin_layer def lowerCamelCase_ ( _a : Dict ): '''simple docstring''' UpperCAmelCase_ : int = torch.load(_a , map_location="""cpu""" ) UpperCAmelCase_ : Dict = Namespace(**checkpoint["""cfg"""]["""model"""] ) UpperCAmelCase_ : Optional[int] = checkpoint["""model"""] remove_ignore_keys_(_a ) UpperCAmelCase_ : str = state_dict["""decoder.embed_tokens.weight"""].shape[0] UpperCAmelCase_ : List[str] = {key.replace("""decoder""" , """model""" ): val for key, val in state_dict.items()} UpperCAmelCase_ : int = XGLMConfig( vocab_size=_a , max_position_embeddings=args.max_target_positions , num_layers=args.decoder_layers , attention_heads=args.decoder_attention_heads , ffn_dim=args.decoder_ffn_embed_dim , d_model=args.decoder_embed_dim , layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="""gelu""" , scale_embedding=not args.no_scale_embedding , tie_word_embeddings=args.share_decoder_input_output_embed , ) UpperCAmelCase_ : List[str] = XGLMForCausalLM(_a ) UpperCAmelCase_ : Tuple = model.load_state_dict(_a , strict=_a ) print(_a ) UpperCAmelCase_ : Optional[Any] = make_linear_from_emb(model.model.embed_tokens ) return model if __name__ == "__main__": UpperCamelCase_ = argparse.ArgumentParser() # Required parameters parser.add_argument('''fairseq_path''', type=str, help='''path to a model.pt on local filesystem.''') parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') UpperCamelCase_ = parser.parse_args() UpperCamelCase_ = convert_fairseq_xglm_checkpoint_from_disk(args.fairseq_path) model.save_pretrained(args.pytorch_dump_folder_path)
345
import logging import os from dataclasses import dataclass, field from functools import partial from pathlib import Path from tempfile import TemporaryDirectory from typing import List, Optional import faiss import torch from datasets import Features, Sequence, Value, load_dataset from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser UpperCamelCase_ = logging.getLogger(__name__) torch.set_grad_enabled(False) UpperCamelCase_ = '''cuda''' if torch.cuda.is_available() else '''cpu''' def lowerCamelCase_ ( _a : str , _a : Any=100 , _a : int=" " ): '''simple docstring''' UpperCAmelCase_ : Optional[int] = text.split(_a ) return [character.join(text[i : i + n] ).strip() for i in range(0 , len(_a ) , _a )] def lowerCamelCase_ ( _a : dict ): '''simple docstring''' UpperCAmelCase_ , UpperCAmelCase_ : Dict = [], [] for title, text in zip(documents["""title"""] , documents["""text"""] ): if text is not None: for passage in split_text(_a ): titles.append(title if title is not None else """""" ) texts.append(_a ) return {"title": titles, "text": texts} def lowerCamelCase_ ( _a : dict , _a : DPRContextEncoder , _a : DPRContextEncoderTokenizerFast ): '''simple docstring''' UpperCAmelCase_ : List[str] = ctx_tokenizer( documents["""title"""] , documents["""text"""] , truncation=_a , padding="""longest""" , return_tensors="""pt""" )["""input_ids"""] UpperCAmelCase_ : Tuple = ctx_encoder(input_ids.to(device=_a ) , return_dict=_a ).pooler_output return {"embeddings": embeddings.detach().cpu().numpy()} def lowerCamelCase_ ( _a : "RagExampleArguments" , _a : "ProcessingArguments" , _a : "IndexHnswArguments" , ): '''simple docstring''' logger.info("""Step 1 - Create the dataset""" ) ###################################### # The dataset needed for RAG must have three columns: # - title (string): title of the document # - text (string): text of a passage of the document # - embeddings (array of dimension d): DPR representation of the passage # Let's say you have documents in tab-separated csv files with columns "title" and "text" assert os.path.isfile(rag_example_args.csv_path ), "Please provide a valid path to a csv file" # You can load a Dataset object this way UpperCAmelCase_ : Optional[int] = load_dataset( """csv""" , data_files=[rag_example_args.csv_path] , split="""train""" , delimiter="""\t""" , column_names=["""title""", """text"""] ) # More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files # Then split the documents into passages of 100 words UpperCAmelCase_ : Tuple = dataset.map(_a , batched=_a , num_proc=processing_args.num_proc ) # And compute the embeddings UpperCAmelCase_ : List[str] = DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ).to(device=_a ) UpperCAmelCase_ : Dict = DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ) UpperCAmelCase_ : Any = Features( {"""text""": Value("""string""" ), """title""": Value("""string""" ), """embeddings""": Sequence(Value("""float32""" ) )} ) # optional, save as float32 instead of float64 to save space UpperCAmelCase_ : List[str] = dataset.map( partial(_a , ctx_encoder=_a , ctx_tokenizer=_a ) , batched=_a , batch_size=processing_args.batch_size , features=_a , ) # And finally save your dataset UpperCAmelCase_ : Union[str, Any] = os.path.join(rag_example_args.output_dir , """my_knowledge_dataset""" ) dataset.save_to_disk(_a ) # from datasets import load_from_disk # dataset = load_from_disk(passages_path) # to reload the dataset ###################################### logger.info("""Step 2 - Index the dataset""" ) ###################################### # Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search UpperCAmelCase_ : Union[str, Any] = faiss.IndexHNSWFlat(index_hnsw_args.d , index_hnsw_args.m , faiss.METRIC_INNER_PRODUCT ) dataset.add_faiss_index("""embeddings""" , custom_index=_a ) # And save the index UpperCAmelCase_ : Optional[Any] = os.path.join(rag_example_args.output_dir , """my_knowledge_dataset_hnsw_index.faiss""" ) dataset.get_index("""embeddings""" ).save(_a ) # dataset.load_faiss_index("embeddings", index_path) # to reload the index @dataclass class _snake_case : '''simple docstring''' A__ : str = field( default=str(Path(__snake_case ).parent / "test_run" / "dummy-kb" / "my_knowledge_dataset.csv" ) , metadata={"help": "Path to a tab-separated csv file with columns 'title' and 'text'"} , ) A__ : Optional[str] = field( default=__snake_case , metadata={"help": "Question that is passed as input to RAG. Default is 'What does Moses' rod turn into ?'."} , ) A__ : str = field( default="facebook/rag-sequence-nq" , metadata={"help": "The RAG model to use. Either 'facebook/rag-sequence-nq' or 'facebook/rag-token-nq'"} , ) A__ : str = field( default="facebook/dpr-ctx_encoder-multiset-base" , metadata={ "help": ( "The DPR context encoder model to use. Either 'facebook/dpr-ctx_encoder-single-nq-base' or" " 'facebook/dpr-ctx_encoder-multiset-base'" ) } , ) A__ : Optional[str] = field( default=str(Path(__snake_case ).parent / "test_run" / "dummy-kb" ) , metadata={"help": "Path to a directory where the dataset passages and the index will be saved"} , ) @dataclass class _snake_case : '''simple docstring''' A__ : Optional[int] = field( default=__snake_case , metadata={ "help": "The number of processes to use to split the documents into passages. Default is single process." } , ) A__ : int = field( default=16 , metadata={ "help": "The batch size to use when computing the passages embeddings using the DPR context encoder." } , ) @dataclass class _snake_case : '''simple docstring''' A__ : int = field( default=768 , metadata={"help": "The dimension of the embeddings to pass to the HNSW Faiss index."} , ) A__ : int = field( default=128 , metadata={ "help": ( "The number of bi-directional links created for every new element during the HNSW index construction." ) } , ) if __name__ == "__main__": logging.basicConfig(level=logging.WARNING) logger.setLevel(logging.INFO) UpperCamelCase_ = HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments)) UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ = parser.parse_args_into_dataclasses() with TemporaryDirectory() as tmp_dir: UpperCamelCase_ = rag_example_args.output_dir or tmp_dir main(rag_example_args, processing_args, index_hnsw_args)
345
1
import argparse import json import requests import timm import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import AutoImageProcessor, SwinConfig, SwinForImageClassification def lowerCamelCase_ ( _a : Optional[Any] ): '''simple docstring''' UpperCAmelCase_ : Optional[int] = SwinConfig() UpperCAmelCase_ : Union[str, Any] = swin_name.split("""_""" ) UpperCAmelCase_ : Optional[int] = name_split[1] UpperCAmelCase_ : Union[str, Any] = int(name_split[4] ) UpperCAmelCase_ : str = int(name_split[3][-1] ) if model_size == "tiny": UpperCAmelCase_ : Dict = 96 UpperCAmelCase_ : Tuple = (2, 2, 6, 2) UpperCAmelCase_ : Any = (3, 6, 12, 24) elif model_size == "small": UpperCAmelCase_ : List[Any] = 96 UpperCAmelCase_ : int = (2, 2, 18, 2) UpperCAmelCase_ : Optional[Any] = (3, 6, 12, 24) elif model_size == "base": UpperCAmelCase_ : Optional[Any] = 128 UpperCAmelCase_ : Any = (2, 2, 18, 2) UpperCAmelCase_ : Any = (4, 8, 16, 32) else: UpperCAmelCase_ : Dict = 192 UpperCAmelCase_ : List[str] = (2, 2, 18, 2) UpperCAmelCase_ : Dict = (6, 12, 24, 48) if "in22k" in swin_name: UpperCAmelCase_ : Dict = 2_1841 else: UpperCAmelCase_ : Optional[int] = 1000 UpperCAmelCase_ : List[str] = """huggingface/label-files""" UpperCAmelCase_ : Dict = """imagenet-1k-id2label.json""" UpperCAmelCase_ : Union[str, Any] = json.load(open(hf_hub_download(_a , _a , repo_type="""dataset""" ) , """r""" ) ) UpperCAmelCase_ : List[Any] = {int(_a ): v for k, v in idalabel.items()} UpperCAmelCase_ : List[str] = idalabel UpperCAmelCase_ : Tuple = {v: k for k, v in idalabel.items()} UpperCAmelCase_ : List[Any] = img_size UpperCAmelCase_ : Tuple = num_classes UpperCAmelCase_ : Optional[int] = embed_dim UpperCAmelCase_ : Optional[int] = depths UpperCAmelCase_ : List[str] = num_heads UpperCAmelCase_ : Optional[Any] = window_size return config def lowerCamelCase_ ( _a : Optional[Any] ): '''simple docstring''' if "patch_embed.proj" in name: UpperCAmelCase_ : List[Any] = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" ) if "patch_embed.norm" in name: UpperCAmelCase_ : Dict = name.replace("""patch_embed.norm""" , """embeddings.norm""" ) if "layers" in name: UpperCAmelCase_ : List[str] = """encoder.""" + name if "attn.proj" in name: UpperCAmelCase_ : Optional[int] = name.replace("""attn.proj""" , """attention.output.dense""" ) if "attn" in name: UpperCAmelCase_ : Union[str, Any] = name.replace("""attn""" , """attention.self""" ) if "norm1" in name: UpperCAmelCase_ : Dict = name.replace("""norm1""" , """layernorm_before""" ) if "norm2" in name: UpperCAmelCase_ : Optional[Any] = name.replace("""norm2""" , """layernorm_after""" ) if "mlp.fc1" in name: UpperCAmelCase_ : Union[str, Any] = name.replace("""mlp.fc1""" , """intermediate.dense""" ) if "mlp.fc2" in name: UpperCAmelCase_ : Any = name.replace("""mlp.fc2""" , """output.dense""" ) if name == "norm.weight": UpperCAmelCase_ : List[Any] = """layernorm.weight""" if name == "norm.bias": UpperCAmelCase_ : int = """layernorm.bias""" if "head" in name: UpperCAmelCase_ : Any = name.replace("""head""" , """classifier""" ) else: UpperCAmelCase_ : List[str] = """swin.""" + name return name def lowerCamelCase_ ( _a : Tuple , _a : Any ): '''simple docstring''' for key in orig_state_dict.copy().keys(): UpperCAmelCase_ : Optional[Any] = orig_state_dict.pop(_a ) if "mask" in key: continue elif "qkv" in key: UpperCAmelCase_ : Optional[Any] = key.split(""".""" ) UpperCAmelCase_ : Tuple = int(key_split[1] ) UpperCAmelCase_ : Optional[int] = int(key_split[3] ) UpperCAmelCase_ : Optional[Any] = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size if "weight" in key: UpperCAmelCase_ : int = val[:dim, :] UpperCAmelCase_ : Tuple = val[ dim : dim * 2, : ] UpperCAmelCase_ : int = val[-dim:, :] else: UpperCAmelCase_ : List[str] = val[ :dim ] UpperCAmelCase_ : str = val[ dim : dim * 2 ] UpperCAmelCase_ : Any = val[ -dim: ] else: UpperCAmelCase_ : int = val return orig_state_dict def lowerCamelCase_ ( _a : Tuple , _a : List[str] ): '''simple docstring''' UpperCAmelCase_ : Optional[int] = timm.create_model(_a , pretrained=_a ) timm_model.eval() UpperCAmelCase_ : Optional[int] = get_swin_config(_a ) UpperCAmelCase_ : Any = SwinForImageClassification(_a ) model.eval() UpperCAmelCase_ : List[Any] = convert_state_dict(timm_model.state_dict() , _a ) model.load_state_dict(_a ) UpperCAmelCase_ : Optional[Any] = """http://images.cocodataset.org/val2017/000000039769.jpg""" UpperCAmelCase_ : int = AutoImageProcessor.from_pretrained("""microsoft/{}""".format(swin_name.replace("""_""" , """-""" ) ) ) UpperCAmelCase_ : Optional[Any] = Image.open(requests.get(_a , stream=_a ).raw ) UpperCAmelCase_ : Union[str, Any] = image_processor(images=_a , return_tensors="""pt""" ) UpperCAmelCase_ : int = timm_model(inputs["""pixel_values"""] ) UpperCAmelCase_ : int = model(**_a ).logits assert torch.allclose(_a , _a , atol=1E-3 ) print(F'''Saving model {swin_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(_a ) print(F'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(_a ) if __name__ == "__main__": UpperCamelCase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--swin_name''', default='''swin_tiny_patch4_window7_224''', type=str, help='''Name of the Swin timm model you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) UpperCamelCase_ = parser.parse_args() convert_swin_checkpoint(args.swin_name, args.pytorch_dump_folder_path)
345
import gc import unittest import torch from parameterized import parameterized from diffusers import AutoencoderKL from diffusers.utils import floats_tensor, load_hf_numpy, require_torch_gpu, slow, torch_all_close, torch_device from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import enable_full_determinism from .test_modeling_common import ModelTesterMixin, UNetTesterMixin enable_full_determinism() class _snake_case ( __snake_case , __snake_case , unittest.TestCase ): '''simple docstring''' A__ : Dict = AutoencoderKL A__ : Optional[int] = "sample" A__ : Tuple = 1E-2 @property def A__ ( self: List[Any] ) -> Union[str, Any]: UpperCAmelCase_ : Tuple = 4 UpperCAmelCase_ : str = 3 UpperCAmelCase_ : Any = (32, 32) UpperCAmelCase_ : Optional[int] = floats_tensor((batch_size, num_channels) + sizes ).to(lowerCamelCase_ ) return {"sample": image} @property def A__ ( self: List[str] ) -> Tuple: return (3, 32, 32) @property def A__ ( self: Optional[Any] ) -> Any: return (3, 32, 32) def A__ ( self: Any ) -> Tuple: UpperCAmelCase_ : List[Any] = { """block_out_channels""": [32, 64], """in_channels""": 3, """out_channels""": 3, """down_block_types""": ["""DownEncoderBlock2D""", """DownEncoderBlock2D"""], """up_block_types""": ["""UpDecoderBlock2D""", """UpDecoderBlock2D"""], """latent_channels""": 4, } UpperCAmelCase_ : int = self.dummy_input return init_dict, inputs_dict def A__ ( self: Optional[Any] ) -> int: pass def A__ ( self: str ) -> Any: pass @unittest.skipIf(torch_device == """mps""" ,"""Gradient checkpointing skipped on MPS""" ) def A__ ( self: Union[str, Any] ) -> Dict: # enable deterministic behavior for gradient checkpointing UpperCAmelCase_ , UpperCAmelCase_ : List[str] = self.prepare_init_args_and_inputs_for_common() UpperCAmelCase_ : List[Any] = self.model_class(**lowerCamelCase_ ) model.to(lowerCamelCase_ ) assert not model.is_gradient_checkpointing and model.training UpperCAmelCase_ : Optional[Any] = model(**lowerCamelCase_ ).sample # run the backwards pass on the model. For backwards pass, for simplicity purpose, # we won't calculate the loss and rather backprop on out.sum() model.zero_grad() UpperCAmelCase_ : Any = torch.randn_like(lowerCamelCase_ ) UpperCAmelCase_ : Optional[int] = (out - labels).mean() loss.backward() # re-instantiate the model now enabling gradient checkpointing UpperCAmelCase_ : str = self.model_class(**lowerCamelCase_ ) # clone model model_a.load_state_dict(model.state_dict() ) model_a.to(lowerCamelCase_ ) model_a.enable_gradient_checkpointing() assert model_a.is_gradient_checkpointing and model_a.training UpperCAmelCase_ : Optional[int] = model_a(**lowerCamelCase_ ).sample # run the backwards pass on the model. For backwards pass, for simplicity purpose, # we won't calculate the loss and rather backprop on out.sum() model_a.zero_grad() UpperCAmelCase_ : Dict = (out_a - labels).mean() loss_a.backward() # compare the output and parameters gradients self.assertTrue((loss - loss_a).abs() < 1e-5 ) UpperCAmelCase_ : Dict = dict(model.named_parameters() ) UpperCAmelCase_ : Union[str, Any] = dict(model_a.named_parameters() ) for name, param in named_params.items(): self.assertTrue(torch_all_close(param.grad.data ,named_params_a[name].grad.data ,atol=5e-5 ) ) def A__ ( self: Optional[Any] ) -> str: UpperCAmelCase_ , UpperCAmelCase_ : int = AutoencoderKL.from_pretrained("""fusing/autoencoder-kl-dummy""" ,output_loading_info=lowerCamelCase_ ) self.assertIsNotNone(lowerCamelCase_ ) self.assertEqual(len(loading_info["""missing_keys"""] ) ,0 ) model.to(lowerCamelCase_ ) UpperCAmelCase_ : Dict = model(**self.dummy_input ) assert image is not None, "Make sure output is not None" def A__ ( self: Optional[int] ) -> int: UpperCAmelCase_ : Dict = AutoencoderKL.from_pretrained("""fusing/autoencoder-kl-dummy""" ) UpperCAmelCase_ : Tuple = model.to(lowerCamelCase_ ) model.eval() if torch_device == "mps": UpperCAmelCase_ : Tuple = torch.manual_seed(0 ) else: UpperCAmelCase_ : Optional[int] = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 ) UpperCAmelCase_ : str = torch.randn( 1 ,model.config.in_channels ,model.config.sample_size ,model.config.sample_size ,generator=torch.manual_seed(0 ) ,) UpperCAmelCase_ : int = image.to(lowerCamelCase_ ) with torch.no_grad(): UpperCAmelCase_ : Dict = model(lowerCamelCase_ ,sample_posterior=lowerCamelCase_ ,generator=lowerCamelCase_ ).sample UpperCAmelCase_ : Optional[int] = output[0, -1, -3:, -3:].flatten().cpu() # Since the VAE Gaussian prior's generator is seeded on the appropriate device, # the expected output slices are not the same for CPU and GPU. if torch_device == "mps": UpperCAmelCase_ : Tuple = torch.tensor( [ -4.0078e-01, -3.8323e-04, -1.2681e-01, -1.1462e-01, 2.0095e-01, 1.0893e-01, -8.8247e-02, -3.0361e-01, -9.8644e-03, ] ) elif torch_device == "cpu": UpperCAmelCase_ : List[str] = torch.tensor( [-0.1_3_5_2, 0.0_8_7_8, 0.0_4_1_9, -0.0_8_1_8, -0.1_0_6_9, 0.0_6_8_8, -0.1_4_5_8, -0.4_4_4_6, -0.0_0_2_6] ) else: UpperCAmelCase_ : List[str] = torch.tensor( [-0.2_4_2_1, 0.4_6_4_2, 0.2_5_0_7, -0.0_4_3_8, 0.0_6_8_2, 0.3_1_6_0, -0.2_0_1_8, -0.0_7_2_7, 0.2_4_8_5] ) self.assertTrue(torch_all_close(lowerCamelCase_ ,lowerCamelCase_ ,rtol=1e-2 ) ) @slow class _snake_case ( unittest.TestCase ): '''simple docstring''' def A__ ( self: Any ,lowerCamelCase_: List[Any] ,lowerCamelCase_: Any ) -> Optional[Any]: return F'''gaussian_noise_s={seed}_shape={'_'.join([str(lowerCamelCase_ ) for s in shape] )}.npy''' def A__ ( self: Union[str, Any] ) -> Optional[int]: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def A__ ( self: List[str] ,lowerCamelCase_: Optional[int]=0 ,lowerCamelCase_: List[Any]=(4, 3, 512, 512) ,lowerCamelCase_: Optional[Any]=False ) -> Optional[int]: UpperCAmelCase_ : Tuple = torch.floataa if fpaa else torch.floataa UpperCAmelCase_ : Tuple = torch.from_numpy(load_hf_numpy(self.get_file_format(lowerCamelCase_ ,lowerCamelCase_ ) ) ).to(lowerCamelCase_ ).to(lowerCamelCase_ ) return image def A__ ( self: List[Any] ,lowerCamelCase_: List[str]="CompVis/stable-diffusion-v1-4" ,lowerCamelCase_: Union[str, Any]=False ) -> Any: UpperCAmelCase_ : Optional[Any] = """fp16""" if fpaa else None UpperCAmelCase_ : str = torch.floataa if fpaa else torch.floataa UpperCAmelCase_ : int = AutoencoderKL.from_pretrained( lowerCamelCase_ ,subfolder="""vae""" ,torch_dtype=lowerCamelCase_ ,revision=lowerCamelCase_ ,) model.to(lowerCamelCase_ ).eval() return model def A__ ( self: Dict ,lowerCamelCase_: Union[str, Any]=0 ) -> Optional[int]: if torch_device == "mps": return torch.manual_seed(lowerCamelCase_ ) return torch.Generator(device=lowerCamelCase_ ).manual_seed(lowerCamelCase_ ) @parameterized.expand( [ # fmt: off [33, [-0.1_6_0_3, 0.9_8_7_8, -0.0_4_9_5, -0.0_7_9_0, -0.2_7_0_9, 0.8_3_7_5, -0.2_0_6_0, -0.0_8_2_4], [-0.2_3_9_5, 0.0_0_9_8, 0.0_1_0_2, -0.0_7_0_9, -0.2_8_4_0, -0.0_2_7_4, -0.0_7_1_8, -0.1_8_2_4]], [47, [-0.2_3_7_6, 0.1_1_6_8, 0.1_3_3_2, -0.4_8_4_0, -0.2_5_0_8, -0.0_7_9_1, -0.0_4_9_3, -0.4_0_8_9], [0.0_3_5_0, 0.0_8_4_7, 0.0_4_6_7, 0.0_3_4_4, -0.0_8_4_2, -0.0_5_4_7, -0.0_6_3_3, -0.1_1_3_1]], # fmt: on ] ) def A__ ( self: List[Any] ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: str ,lowerCamelCase_: Dict ) -> Tuple: UpperCAmelCase_ : List[Any] = self.get_sd_vae_model() UpperCAmelCase_ : int = self.get_sd_image(lowerCamelCase_ ) UpperCAmelCase_ : Optional[int] = self.get_generator(lowerCamelCase_ ) with torch.no_grad(): UpperCAmelCase_ : Union[str, Any] = model(lowerCamelCase_ ,generator=lowerCamelCase_ ,sample_posterior=lowerCamelCase_ ).sample assert sample.shape == image.shape UpperCAmelCase_ : Optional[Any] = sample[-1, -2:, -2:, :2].flatten().float().cpu() UpperCAmelCase_ : Tuple = torch.tensor(expected_slice_mps if torch_device == """mps""" else expected_slice ) assert torch_all_close(lowerCamelCase_ ,lowerCamelCase_ ,atol=3e-3 ) @parameterized.expand( [ # fmt: off [33, [-0.0_5_1_3, 0.0_2_8_9, 1.3_7_9_9, 0.2_1_6_6, -0.2_5_7_3, -0.0_8_7_1, 0.5_1_0_3, -0.0_9_9_9]], [47, [-0.4_1_2_8, -0.1_3_2_0, -0.3_7_0_4, 0.1_9_6_5, -0.4_1_1_6, -0.2_3_3_2, -0.3_3_4_0, 0.2_2_4_7]], # fmt: on ] ) @require_torch_gpu def A__ ( self: Union[str, Any] ,lowerCamelCase_: Any ,lowerCamelCase_: List[str] ) -> Tuple: UpperCAmelCase_ : List[str] = self.get_sd_vae_model(fpaa=lowerCamelCase_ ) UpperCAmelCase_ : Any = self.get_sd_image(lowerCamelCase_ ,fpaa=lowerCamelCase_ ) UpperCAmelCase_ : Union[str, Any] = self.get_generator(lowerCamelCase_ ) with torch.no_grad(): UpperCAmelCase_ : Union[str, Any] = model(lowerCamelCase_ ,generator=lowerCamelCase_ ,sample_posterior=lowerCamelCase_ ).sample assert sample.shape == image.shape UpperCAmelCase_ : Tuple = sample[-1, -2:, :2, -2:].flatten().float().cpu() UpperCAmelCase_ : Optional[int] = torch.tensor(lowerCamelCase_ ) assert torch_all_close(lowerCamelCase_ ,lowerCamelCase_ ,atol=1e-2 ) @parameterized.expand( [ # fmt: off [33, [-0.1_6_0_9, 0.9_8_6_6, -0.0_4_8_7, -0.0_7_7_7, -0.2_7_1_6, 0.8_3_6_8, -0.2_0_5_5, -0.0_8_1_4], [-0.2_3_9_5, 0.0_0_9_8, 0.0_1_0_2, -0.0_7_0_9, -0.2_8_4_0, -0.0_2_7_4, -0.0_7_1_8, -0.1_8_2_4]], [47, [-0.2_3_7_7, 0.1_1_4_7, 0.1_3_3_3, -0.4_8_4_1, -0.2_5_0_6, -0.0_8_0_5, -0.0_4_9_1, -0.4_0_8_5], [0.0_3_5_0, 0.0_8_4_7, 0.0_4_6_7, 0.0_3_4_4, -0.0_8_4_2, -0.0_5_4_7, -0.0_6_3_3, -0.1_1_3_1]], # fmt: on ] ) def A__ ( self: Tuple ,lowerCamelCase_: List[Any] ,lowerCamelCase_: Optional[int] ,lowerCamelCase_: List[str] ) -> Dict: UpperCAmelCase_ : Optional[int] = self.get_sd_vae_model() UpperCAmelCase_ : Dict = self.get_sd_image(lowerCamelCase_ ) with torch.no_grad(): UpperCAmelCase_ : str = model(lowerCamelCase_ ).sample assert sample.shape == image.shape UpperCAmelCase_ : List[Any] = sample[-1, -2:, -2:, :2].flatten().float().cpu() UpperCAmelCase_ : Any = torch.tensor(expected_slice_mps if torch_device == """mps""" else expected_slice ) assert torch_all_close(lowerCamelCase_ ,lowerCamelCase_ ,atol=3e-3 ) @parameterized.expand( [ # fmt: off [13, [-0.2_0_5_1, -0.1_8_0_3, -0.2_3_1_1, -0.2_1_1_4, -0.3_2_9_2, -0.3_5_7_4, -0.2_9_5_3, -0.3_3_2_3]], [37, [-0.2_6_3_2, -0.2_6_2_5, -0.2_1_9_9, -0.2_7_4_1, -0.4_5_3_9, -0.4_9_9_0, -0.3_7_2_0, -0.4_9_2_5]], # fmt: on ] ) @require_torch_gpu def A__ ( self: Optional[Any] ,lowerCamelCase_: Tuple ,lowerCamelCase_: str ) -> Optional[Any]: UpperCAmelCase_ : List[str] = self.get_sd_vae_model() UpperCAmelCase_ : Optional[int] = self.get_sd_image(lowerCamelCase_ ,shape=(3, 4, 64, 64) ) with torch.no_grad(): UpperCAmelCase_ : str = model.decode(lowerCamelCase_ ).sample assert list(sample.shape ) == [3, 3, 512, 512] UpperCAmelCase_ : Any = sample[-1, -2:, :2, -2:].flatten().cpu() UpperCAmelCase_ : Union[str, Any] = torch.tensor(lowerCamelCase_ ) assert torch_all_close(lowerCamelCase_ ,lowerCamelCase_ ,atol=1e-3 ) @parameterized.expand( [ # fmt: off [27, [-0.0_3_6_9, 0.0_2_0_7, -0.0_7_7_6, -0.0_6_8_2, -0.1_7_4_7, -0.1_9_3_0, -0.1_4_6_5, -0.2_0_3_9]], [16, [-0.1_6_2_8, -0.2_1_3_4, -0.2_7_4_7, -0.2_6_4_2, -0.3_7_7_4, -0.4_4_0_4, -0.3_6_8_7, -0.4_2_7_7]], # fmt: on ] ) @require_torch_gpu def A__ ( self: str ,lowerCamelCase_: List[Any] ,lowerCamelCase_: Any ) -> Optional[Any]: UpperCAmelCase_ : Dict = self.get_sd_vae_model(fpaa=lowerCamelCase_ ) UpperCAmelCase_ : List[Any] = self.get_sd_image(lowerCamelCase_ ,shape=(3, 4, 64, 64) ,fpaa=lowerCamelCase_ ) with torch.no_grad(): UpperCAmelCase_ : List[str] = model.decode(lowerCamelCase_ ).sample assert list(sample.shape ) == [3, 3, 512, 512] UpperCAmelCase_ : str = sample[-1, -2:, :2, -2:].flatten().float().cpu() UpperCAmelCase_ : str = torch.tensor(lowerCamelCase_ ) assert torch_all_close(lowerCamelCase_ ,lowerCamelCase_ ,atol=5e-3 ) @parameterized.expand([(13,), (16,), (27,)] ) @require_torch_gpu @unittest.skipIf(not is_xformers_available() ,reason="""xformers is not required when using PyTorch 2.0.""" ) def A__ ( self: List[Any] ,lowerCamelCase_: Union[str, Any] ) -> int: UpperCAmelCase_ : Optional[Any] = self.get_sd_vae_model(fpaa=lowerCamelCase_ ) UpperCAmelCase_ : List[str] = self.get_sd_image(lowerCamelCase_ ,shape=(3, 4, 64, 64) ,fpaa=lowerCamelCase_ ) with torch.no_grad(): UpperCAmelCase_ : Optional[Any] = model.decode(lowerCamelCase_ ).sample model.enable_xformers_memory_efficient_attention() with torch.no_grad(): UpperCAmelCase_ : List[str] = model.decode(lowerCamelCase_ ).sample assert list(sample.shape ) == [3, 3, 512, 512] assert torch_all_close(lowerCamelCase_ ,lowerCamelCase_ ,atol=1e-1 ) @parameterized.expand([(13,), (16,), (37,)] ) @require_torch_gpu @unittest.skipIf(not is_xformers_available() ,reason="""xformers is not required when using PyTorch 2.0.""" ) def A__ ( self: Optional[Any] ,lowerCamelCase_: Dict ) -> Union[str, Any]: UpperCAmelCase_ : Tuple = self.get_sd_vae_model() UpperCAmelCase_ : Any = self.get_sd_image(lowerCamelCase_ ,shape=(3, 4, 64, 64) ) with torch.no_grad(): UpperCAmelCase_ : Union[str, Any] = model.decode(lowerCamelCase_ ).sample model.enable_xformers_memory_efficient_attention() with torch.no_grad(): UpperCAmelCase_ : Optional[Any] = model.decode(lowerCamelCase_ ).sample assert list(sample.shape ) == [3, 3, 512, 512] assert torch_all_close(lowerCamelCase_ ,lowerCamelCase_ ,atol=1e-2 ) @parameterized.expand( [ # fmt: off [33, [-0.3_0_0_1, 0.0_9_1_8, -2.6_9_8_4, -3.9_7_2_0, -3.2_0_9_9, -5.0_3_5_3, 1.7_3_3_8, -0.2_0_6_5, 3.4_2_6_7]], [47, [-1.5_0_3_0, -4.3_8_7_1, -6.0_3_5_5, -9.1_1_5_7, -1.6_6_6_1, -2.7_8_5_3, 2.1_6_0_7, -5.0_8_2_3, 2.5_6_3_3]], # fmt: on ] ) def A__ ( self: Union[str, Any] ,lowerCamelCase_: Any ,lowerCamelCase_: Union[str, Any] ) -> Union[str, Any]: UpperCAmelCase_ : Dict = self.get_sd_vae_model() UpperCAmelCase_ : Optional[Any] = self.get_sd_image(lowerCamelCase_ ) UpperCAmelCase_ : str = self.get_generator(lowerCamelCase_ ) with torch.no_grad(): UpperCAmelCase_ : int = model.encode(lowerCamelCase_ ).latent_dist UpperCAmelCase_ : Optional[Any] = dist.sample(generator=lowerCamelCase_ ) assert list(sample.shape ) == [image.shape[0], 4] + [i // 8 for i in image.shape[2:]] UpperCAmelCase_ : Tuple = sample[0, -1, -3:, -3:].flatten().cpu() UpperCAmelCase_ : Optional[Any] = torch.tensor(lowerCamelCase_ ) UpperCAmelCase_ : List[Any] = 3e-3 if torch_device != """mps""" else 1e-2 assert torch_all_close(lowerCamelCase_ ,lowerCamelCase_ ,atol=lowerCamelCase_ )
345
1
UpperCamelCase_ = [ '''Audio''', '''Array2D''', '''Array3D''', '''Array4D''', '''Array5D''', '''ClassLabel''', '''Features''', '''Sequence''', '''Value''', '''Image''', '''Translation''', '''TranslationVariableLanguages''', ] from .audio import Audio from .features import ArrayaD, ArrayaD, ArrayaD, ArrayaD, ClassLabel, Features, Sequence, Value from .image import Image from .translation import Translation, TranslationVariableLanguages
345
import copy from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import Audio, Features, Value from .base import TaskTemplate @dataclass(frozen=__snake_case ) class _snake_case ( __snake_case ): '''simple docstring''' A__ : str = field(default="automatic-speech-recognition" , metadata={"include_in_asdict_even_if_is_default": True} ) A__ : ClassVar[Features] = Features({"audio": Audio()} ) A__ : ClassVar[Features] = Features({"transcription": Value("string" )} ) A__ : str = "audio" A__ : str = "transcription" def A__ ( self: int ,lowerCamelCase_: Union[str, Any] ) -> Optional[Any]: if self.audio_column not in features: raise ValueError(F'''Column {self.audio_column} is not present in features.''' ) if not isinstance(features[self.audio_column] ,lowerCamelCase_ ): raise ValueError(F'''Column {self.audio_column} is not an Audio type.''' ) UpperCAmelCase_ : Any = copy.deepcopy(self ) UpperCAmelCase_ : Union[str, Any] = self.input_schema.copy() UpperCAmelCase_ : Any = features[self.audio_column] UpperCAmelCase_ : Union[str, Any] = input_schema return task_template @property def A__ ( self: List[str] ) -> Dict[str, str]: return {self.audio_column: "audio", self.transcription_column: "transcription"}
345
1
def lowerCamelCase_ ( _a : str ): '''simple docstring''' UpperCAmelCase_ : List[str] = 0 # if input_string is "aba" than new_input_string become "a|b|a" UpperCAmelCase_ : Union[str, Any] = """""" UpperCAmelCase_ : str = """""" # append each character + "|" in new_string for range(0, length-1) for i in input_string[: len(_a ) - 1]: new_input_string += i + "|" # append last character new_input_string += input_string[-1] # we will store the starting and ending of previous furthest ending palindromic # substring UpperCAmelCase_ , UpperCAmelCase_ : Tuple = 0, 0 # length[i] shows the length of palindromic substring with center i UpperCAmelCase_ : Tuple = [1 for i in range(len(_a ) )] # for each character in new_string find corresponding palindromic string UpperCAmelCase_ : Optional[Any] = 0 for j in range(len(_a ) ): UpperCAmelCase_ : Tuple = 1 if j > r else min(length[l + r - j] // 2 , r - j + 1 ) while ( j - k >= 0 and j + k < len(_a ) and new_input_string[k + j] == new_input_string[j - k] ): k += 1 UpperCAmelCase_ : Optional[Any] = 2 * k - 1 # does this string is ending after the previously explored end (that is r) ? # if yes the update the new r to the last index of this if j + k - 1 > r: UpperCAmelCase_ : int = j - k + 1 # noqa: E741 UpperCAmelCase_ : List[str] = j + k - 1 # update max_length and start position if max_length < length[j]: UpperCAmelCase_ : List[str] = length[j] UpperCAmelCase_ : List[str] = j # create that string UpperCAmelCase_ : Optional[Any] = new_input_string[start - max_length // 2 : start + max_length // 2 + 1] for i in s: if i != "|": output_string += i return output_string if __name__ == "__main__": import doctest doctest.testmod()
345
from collections import OrderedDict from typing import TYPE_CHECKING, Any, Mapping, Optional from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...onnx.utils import compute_effective_axis_dimension from ...utils import logging if TYPE_CHECKING: from ...processing_utils import ProcessorMixin from ...utils import TensorType UpperCamelCase_ = logging.get_logger(__name__) UpperCamelCase_ = { '''microsoft/layoutlmv3-base''': '''https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json''', } class _snake_case ( __snake_case ): '''simple docstring''' A__ : Optional[Any] = "layoutlmv3" def __init__( self: str ,lowerCamelCase_: Any=50265 ,lowerCamelCase_: int=768 ,lowerCamelCase_: Any=12 ,lowerCamelCase_: Any=12 ,lowerCamelCase_: List[Any]=3072 ,lowerCamelCase_: str="gelu" ,lowerCamelCase_: List[str]=0.1 ,lowerCamelCase_: Any=0.1 ,lowerCamelCase_: Tuple=512 ,lowerCamelCase_: Union[str, Any]=2 ,lowerCamelCase_: Dict=0.0_2 ,lowerCamelCase_: List[str]=1e-5 ,lowerCamelCase_: int=1 ,lowerCamelCase_: int=0 ,lowerCamelCase_: List[str]=2 ,lowerCamelCase_: Dict=1024 ,lowerCamelCase_: Tuple=128 ,lowerCamelCase_: Tuple=128 ,lowerCamelCase_: Dict=True ,lowerCamelCase_: Union[str, Any]=32 ,lowerCamelCase_: Union[str, Any]=128 ,lowerCamelCase_: Tuple=64 ,lowerCamelCase_: Tuple=256 ,lowerCamelCase_: List[str]=True ,lowerCamelCase_: Optional[int]=True ,lowerCamelCase_: Any=True ,lowerCamelCase_: Dict=224 ,lowerCamelCase_: Optional[int]=3 ,lowerCamelCase_: Optional[int]=16 ,lowerCamelCase_: Dict=None ,**lowerCamelCase_: str ,) -> List[Any]: super().__init__( vocab_size=lowerCamelCase_ ,hidden_size=lowerCamelCase_ ,num_hidden_layers=lowerCamelCase_ ,num_attention_heads=lowerCamelCase_ ,intermediate_size=lowerCamelCase_ ,hidden_act=lowerCamelCase_ ,hidden_dropout_prob=lowerCamelCase_ ,attention_probs_dropout_prob=lowerCamelCase_ ,max_position_embeddings=lowerCamelCase_ ,type_vocab_size=lowerCamelCase_ ,initializer_range=lowerCamelCase_ ,layer_norm_eps=lowerCamelCase_ ,pad_token_id=lowerCamelCase_ ,bos_token_id=lowerCamelCase_ ,eos_token_id=lowerCamelCase_ ,**lowerCamelCase_ ,) UpperCAmelCase_ : List[Any] = max_ad_position_embeddings UpperCAmelCase_ : Optional[int] = coordinate_size UpperCAmelCase_ : Optional[int] = shape_size UpperCAmelCase_ : Optional[Any] = has_relative_attention_bias UpperCAmelCase_ : Optional[int] = rel_pos_bins UpperCAmelCase_ : Union[str, Any] = max_rel_pos UpperCAmelCase_ : Dict = has_spatial_attention_bias UpperCAmelCase_ : Optional[int] = rel_ad_pos_bins UpperCAmelCase_ : Tuple = max_rel_ad_pos UpperCAmelCase_ : Union[str, Any] = text_embed UpperCAmelCase_ : Optional[Any] = visual_embed UpperCAmelCase_ : List[str] = input_size UpperCAmelCase_ : str = num_channels UpperCAmelCase_ : Optional[int] = patch_size UpperCAmelCase_ : Tuple = classifier_dropout class _snake_case ( __snake_case ): '''simple docstring''' A__ : Optional[Any] = version.parse("1.12" ) @property def A__ ( self: Dict ) -> Mapping[str, Mapping[int, str]]: # The order of inputs is different for question answering and sequence classification if self.task in ["question-answering", "sequence-classification"]: return OrderedDict( [ ("""input_ids""", {0: """batch""", 1: """sequence"""}), ("""attention_mask""", {0: """batch""", 1: """sequence"""}), ("""bbox""", {0: """batch""", 1: """sequence"""}), ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ] ) else: return OrderedDict( [ ("""input_ids""", {0: """batch""", 1: """sequence"""}), ("""bbox""", {0: """batch""", 1: """sequence"""}), ("""attention_mask""", {0: """batch""", 1: """sequence"""}), ("""pixel_values""", {0: """batch""", 1: """num_channels"""}), ] ) @property def A__ ( self: Any ) -> float: return 1e-5 @property def A__ ( self: int ) -> int: return 12 def A__ ( self: List[str] ,lowerCamelCase_: "ProcessorMixin" ,lowerCamelCase_: int = -1 ,lowerCamelCase_: int = -1 ,lowerCamelCase_: bool = False ,lowerCamelCase_: Optional["TensorType"] = None ,lowerCamelCase_: int = 3 ,lowerCamelCase_: int = 40 ,lowerCamelCase_: int = 40 ,) -> Mapping[str, Any]: setattr(processor.image_processor ,"""apply_ocr""" ,lowerCamelCase_ ) # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX UpperCAmelCase_ : List[str] = compute_effective_axis_dimension( lowerCamelCase_ ,fixed_dimension=OnnxConfig.default_fixed_batch ,num_token_to_add=0 ) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX UpperCAmelCase_ : int = processor.tokenizer.num_special_tokens_to_add(lowerCamelCase_ ) UpperCAmelCase_ : int = compute_effective_axis_dimension( lowerCamelCase_ ,fixed_dimension=OnnxConfig.default_fixed_sequence ,num_token_to_add=lowerCamelCase_ ) # Generate dummy inputs according to compute batch and sequence UpperCAmelCase_ : Optional[int] = [[""" """.join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size # Generate dummy bounding boxes UpperCAmelCase_ : List[Any] = [[[48, 84, 73, 128]]] * batch_size # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX # batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch) UpperCAmelCase_ : Any = self._generate_dummy_images(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ) UpperCAmelCase_ : Optional[Any] = dict( processor( lowerCamelCase_ ,text=lowerCamelCase_ ,boxes=lowerCamelCase_ ,return_tensors=lowerCamelCase_ ,) ) return inputs
345
1
from __future__ import annotations from fractions import Fraction def lowerCamelCase_ ( _a : int , _a : int ): '''simple docstring''' return ( num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den ) def lowerCamelCase_ ( _a : int ): '''simple docstring''' UpperCAmelCase_ : Any = [] UpperCAmelCase_ : Union[str, Any] = 11 UpperCAmelCase_ : Optional[Any] = int("""1""" + """0""" * digit_len ) for num in range(_a , _a ): while den <= 99: if (num != den) and (num % 10 == den // 10) and (den % 10 != 0): if is_digit_cancelling(_a , _a ): solutions.append(F'''{num}/{den}''' ) den += 1 num += 1 UpperCAmelCase_ : str = 10 return solutions def lowerCamelCase_ ( _a : int = 2 ): '''simple docstring''' UpperCAmelCase_ : List[str] = 1.0 for fraction in fraction_list(_a ): UpperCAmelCase_ : Tuple = Fraction(_a ) result *= frac.denominator / frac.numerator return int(_a ) if __name__ == "__main__": print(solution())
345
import argparse from argparse import Namespace import torch from torch import nn from transformers import XGLMConfig, XGLMForCausalLM def lowerCamelCase_ ( _a : List[Any] ): '''simple docstring''' UpperCAmelCase_ : Optional[int] = [ """decoder.version""", """decoder.output_projection.weight""", """_float_tensor""", """decoder.embed_positions._float_tensor""", ] for k in ignore_keys: state_dict.pop(_a , _a ) def lowerCamelCase_ ( _a : Any ): '''simple docstring''' UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = emb.weight.shape UpperCAmelCase_ : Tuple = nn.Linear(_a , _a , bias=_a ) UpperCAmelCase_ : List[Any] = emb.weight.data return lin_layer def lowerCamelCase_ ( _a : Dict ): '''simple docstring''' UpperCAmelCase_ : int = torch.load(_a , map_location="""cpu""" ) UpperCAmelCase_ : Dict = Namespace(**checkpoint["""cfg"""]["""model"""] ) UpperCAmelCase_ : Optional[int] = checkpoint["""model"""] remove_ignore_keys_(_a ) UpperCAmelCase_ : str = state_dict["""decoder.embed_tokens.weight"""].shape[0] UpperCAmelCase_ : List[str] = {key.replace("""decoder""" , """model""" ): val for key, val in state_dict.items()} UpperCAmelCase_ : int = XGLMConfig( vocab_size=_a , max_position_embeddings=args.max_target_positions , num_layers=args.decoder_layers , attention_heads=args.decoder_attention_heads , ffn_dim=args.decoder_ffn_embed_dim , d_model=args.decoder_embed_dim , layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="""gelu""" , scale_embedding=not args.no_scale_embedding , tie_word_embeddings=args.share_decoder_input_output_embed , ) UpperCAmelCase_ : List[str] = XGLMForCausalLM(_a ) UpperCAmelCase_ : Tuple = model.load_state_dict(_a , strict=_a ) print(_a ) UpperCAmelCase_ : Optional[Any] = make_linear_from_emb(model.model.embed_tokens ) return model if __name__ == "__main__": UpperCamelCase_ = argparse.ArgumentParser() # Required parameters parser.add_argument('''fairseq_path''', type=str, help='''path to a model.pt on local filesystem.''') parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') UpperCamelCase_ = parser.parse_args() UpperCamelCase_ = convert_fairseq_xglm_checkpoint_from_disk(args.fairseq_path) model.save_pretrained(args.pytorch_dump_folder_path)
345
1
import collections import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging UpperCamelCase_ = logging.get_logger(__name__) UpperCamelCase_ = '''▁''' UpperCamelCase_ = {'''vocab_file''': '''prophetnet.tokenizer'''} UpperCamelCase_ = { '''vocab_file''': { '''microsoft/xprophetnet-large-wiki100-cased''': ( '''https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/prophetnet.tokenizer''' ), } } UpperCamelCase_ = { '''microsoft/xprophetnet-large-wiki100-cased''': {'''do_lower_case''': False}, } UpperCamelCase_ = { '''microsoft/xprophetnet-large-wiki100-cased''': 512, } def lowerCamelCase_ ( _a : Optional[int] ): '''simple docstring''' UpperCAmelCase_ : Tuple = collections.OrderedDict() with open(_a , """r""" , encoding="""utf-8""" ) as reader: UpperCAmelCase_ : Dict = reader.readlines() for index, token in enumerate(_a ): UpperCAmelCase_ : str = token.rstrip("""\n""" ) UpperCAmelCase_ : int = index return vocab class _snake_case ( __snake_case ): '''simple docstring''' A__ : Optional[Any] = VOCAB_FILES_NAMES A__ : Tuple = PRETRAINED_VOCAB_FILES_MAP A__ : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES A__ : Optional[Any] = ["input_ids", "attention_mask"] def __init__( self: Union[str, Any] ,lowerCamelCase_: Dict ,lowerCamelCase_: Union[str, Any]="[SEP]" ,lowerCamelCase_: Any="[SEP]" ,lowerCamelCase_: Any="[SEP]" ,lowerCamelCase_: List[Any]="[UNK]" ,lowerCamelCase_: Union[str, Any]="[PAD]" ,lowerCamelCase_: str="[CLS]" ,lowerCamelCase_: Optional[Any]="[MASK]" ,lowerCamelCase_: Optional[Dict[str, Any]] = None ,**lowerCamelCase_: List[Any] ,) -> None: UpperCAmelCase_ : Dict = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=lowerCamelCase_ ,eos_token=lowerCamelCase_ ,sep_token=lowerCamelCase_ ,unk_token=lowerCamelCase_ ,pad_token=lowerCamelCase_ ,cls_token=lowerCamelCase_ ,mask_token=lowerCamelCase_ ,sp_model_kwargs=self.sp_model_kwargs ,**lowerCamelCase_ ,) try: import sentencepiece as spm except ImportError: logger.warning( """You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece""" """ pip install sentencepiece""" ) raise UpperCAmelCase_ : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(lowerCamelCase_ ) ) UpperCAmelCase_ : Tuple = vocab_file # Original fairseq vocab and spm vocab must be "aligned": # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 # -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ---- # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-' # spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a' # put special tokens and [unused] tokens into the vocab UpperCAmelCase_ : str = {"""[PAD]""": 0, """[CLS]""": 1, """[SEP]""": 2, """[UNK]""": 3, """[MASK]""": 4} for i in range(10 ): UpperCAmelCase_ : Union[str, Any] = F'''[unused{i}]''' UpperCAmelCase_ : Optional[int] = 5 + i # The first "real" token "," has position 15 in the embedding vocab and position 3 in the spm vocab UpperCAmelCase_ : List[str] = 12 UpperCAmelCase_ : int = {v: k for k, v in self.fairseq_tokens_to_ids.items()} for k in self.fairseq_tokens_to_ids.keys(): self.unique_no_split_tokens.append(lowerCamelCase_ ) def __getstate__( self: Union[str, Any] ) -> Union[str, Any]: UpperCAmelCase_ : str = self.__dict__.copy() UpperCAmelCase_ : Union[str, Any] = None return state def __setstate__( self: int ,lowerCamelCase_: Optional[int] ) -> Any: UpperCAmelCase_ : List[str] = d try: import sentencepiece as spm except ImportError: logger.warning( """You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece""" """ pip install sentencepiece""" ) raise # for backward compatibility if not hasattr(self ,"""sp_model_kwargs""" ): UpperCAmelCase_ : Any = {} UpperCAmelCase_ : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def A__ ( self: List[str] ,lowerCamelCase_: List[int] ,lowerCamelCase_: Optional[List[int]] = None ,lowerCamelCase_: bool = False ) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=lowerCamelCase_ ,token_ids_a=lowerCamelCase_ ,already_has_special_tokens=lowerCamelCase_ ) if token_ids_a is None: return ([0] * len(lowerCamelCase_ )) + [1] return ([0] * len(lowerCamelCase_ )) + [1] + ([0] * len(lowerCamelCase_ )) + [1] def A__ ( self: str ,lowerCamelCase_: List[int] ,lowerCamelCase_: Optional[List[int]] = None ) -> List[int]: UpperCAmelCase_ : Tuple = [self.sep_token_id] if token_ids_a is None: return len(token_ids_a + sep ) * [0] return len(token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def A__ ( self: Union[str, Any] ) -> Optional[int]: return len(self.sp_model ) + self.fairseq_offset def A__ ( self: Union[str, Any] ) -> Union[str, Any]: UpperCAmelCase_ : Optional[int] = {self.convert_ids_to_tokens(lowerCamelCase_ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def A__ ( self: Optional[Any] ,lowerCamelCase_: str ) -> str: return self.sp_model.encode(lowerCamelCase_ ,out_type=lowerCamelCase_ ) def A__ ( self: List[Any] ,lowerCamelCase_: Dict ) -> Optional[int]: if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] UpperCAmelCase_ : int = self.sp_model.PieceToId(lowerCamelCase_ ) # Need to return unknown token if the SP model returned 0 return spm_id + self.fairseq_offset if spm_id else self.unk_token_id def A__ ( self: List[str] ,lowerCamelCase_: Dict ) -> Union[str, Any]: if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset ) def A__ ( self: Tuple ,lowerCamelCase_: int ) -> int: UpperCAmelCase_ : Union[str, Any] = """""".join(lowerCamelCase_ ).replace(lowerCamelCase_ ,""" """ ).strip() return out_string def A__ ( self: Tuple ,lowerCamelCase_: str ,lowerCamelCase_: Optional[str] = None ) -> Tuple[str]: if not os.path.isdir(lowerCamelCase_ ): logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' ) return UpperCAmelCase_ : Optional[int] = os.path.join( lowerCamelCase_ ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase_ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file ,lowerCamelCase_ ) elif not os.path.isfile(self.vocab_file ): with open(lowerCamelCase_ ,"""wb""" ) as fi: UpperCAmelCase_ : List[Any] = self.sp_model.serialized_model_proto() fi.write(lowerCamelCase_ ) return (out_vocab_file,) def A__ ( self: Optional[int] ,lowerCamelCase_: List[int] ,lowerCamelCase_: Optional[List[int]] = None ) -> List[int]: if token_ids_a is None: return token_ids_a + [self.sep_token_id] UpperCAmelCase_ : Optional[Any] = [self.sep_token_id] return token_ids_a + sep + token_ids_a + sep
345
import collections import inspect import unittest from transformers import FocalNetConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( FocalNetBackbone, FocalNetForImageClassification, FocalNetForMaskedImageModeling, FocalNetModel, ) from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class _snake_case : '''simple docstring''' def __init__( self: List[Any] ,lowerCamelCase_: Tuple ,lowerCamelCase_: Union[str, Any]=13 ,lowerCamelCase_: Optional[int]=32 ,lowerCamelCase_: List[str]=2 ,lowerCamelCase_: Optional[Any]=3 ,lowerCamelCase_: int=16 ,lowerCamelCase_: Optional[Any]=[32, 64, 128] ,lowerCamelCase_: Optional[int]=[1, 2, 1] ,lowerCamelCase_: Union[str, Any]=[2, 2, 4] ,lowerCamelCase_: int=2 ,lowerCamelCase_: List[str]=2.0 ,lowerCamelCase_: List[Any]=True ,lowerCamelCase_: List[str]=0.0 ,lowerCamelCase_: List[str]=0.0 ,lowerCamelCase_: Optional[int]=0.1 ,lowerCamelCase_: Optional[int]="gelu" ,lowerCamelCase_: Any=False ,lowerCamelCase_: Dict=True ,lowerCamelCase_: Union[str, Any]=0.0_2 ,lowerCamelCase_: int=1e-5 ,lowerCamelCase_: int=True ,lowerCamelCase_: Tuple=None ,lowerCamelCase_: str=True ,lowerCamelCase_: Dict=10 ,lowerCamelCase_: str=8 ,lowerCamelCase_: Union[str, Any]=["stage1", "stage2"] ,lowerCamelCase_: Optional[Any]=[1, 2] ,) -> str: UpperCAmelCase_ : List[Any] = parent UpperCAmelCase_ : Tuple = batch_size UpperCAmelCase_ : Any = image_size UpperCAmelCase_ : str = patch_size UpperCAmelCase_ : List[str] = num_channels UpperCAmelCase_ : Dict = embed_dim UpperCAmelCase_ : Dict = hidden_sizes UpperCAmelCase_ : str = depths UpperCAmelCase_ : int = num_heads UpperCAmelCase_ : List[Any] = window_size UpperCAmelCase_ : Union[str, Any] = mlp_ratio UpperCAmelCase_ : int = qkv_bias UpperCAmelCase_ : List[str] = hidden_dropout_prob UpperCAmelCase_ : Union[str, Any] = attention_probs_dropout_prob UpperCAmelCase_ : Optional[int] = drop_path_rate UpperCAmelCase_ : Union[str, Any] = hidden_act UpperCAmelCase_ : List[Any] = use_absolute_embeddings UpperCAmelCase_ : List[Any] = patch_norm UpperCAmelCase_ : int = layer_norm_eps UpperCAmelCase_ : int = initializer_range UpperCAmelCase_ : Optional[Any] = is_training UpperCAmelCase_ : Optional[Any] = scope UpperCAmelCase_ : Union[str, Any] = use_labels UpperCAmelCase_ : Union[str, Any] = type_sequence_label_size UpperCAmelCase_ : Optional[int] = encoder_stride UpperCAmelCase_ : Optional[int] = out_features UpperCAmelCase_ : Optional[int] = out_indices def A__ ( self: Union[str, Any] ) -> List[Any]: UpperCAmelCase_ : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) UpperCAmelCase_ : int = None if self.use_labels: UpperCAmelCase_ : str = ids_tensor([self.batch_size] ,self.type_sequence_label_size ) UpperCAmelCase_ : Any = self.get_config() return config, pixel_values, labels def A__ ( self: List[Any] ) -> Tuple: return FocalNetConfig( image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,embed_dim=self.embed_dim ,hidden_sizes=self.hidden_sizes ,depths=self.depths ,num_heads=self.num_heads ,window_size=self.window_size ,mlp_ratio=self.mlp_ratio ,qkv_bias=self.qkv_bias ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,drop_path_rate=self.drop_path_rate ,hidden_act=self.hidden_act ,use_absolute_embeddings=self.use_absolute_embeddings ,path_norm=self.patch_norm ,layer_norm_eps=self.layer_norm_eps ,initializer_range=self.initializer_range ,encoder_stride=self.encoder_stride ,out_features=self.out_features ,out_indices=self.out_indices ,) def A__ ( self: Dict ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: str ,lowerCamelCase_: str ) -> List[str]: UpperCAmelCase_ : Optional[int] = FocalNetModel(config=lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() UpperCAmelCase_ : List[Any] = model(lowerCamelCase_ ) UpperCAmelCase_ : Dict = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1)) UpperCAmelCase_ : Optional[Any] = int(config.embed_dim * 2 ** (len(config.depths ) - 1) ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, expected_seq_len, expected_dim) ) def A__ ( self: Union[str, Any] ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: Any ,lowerCamelCase_: Optional[int] ) -> List[str]: UpperCAmelCase_ : List[str] = FocalNetBackbone(config=lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() UpperCAmelCase_ : Tuple = model(lowerCamelCase_ ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) ,len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) ,[self.batch_size, self.image_size, 8, 8] ) # verify channels self.parent.assertEqual(len(model.channels ) ,len(config.out_features ) ) self.parent.assertListEqual(model.channels ,config.hidden_sizes[:-1] ) # verify backbone works with out_features=None UpperCAmelCase_ : Union[str, Any] = None UpperCAmelCase_ : List[str] = FocalNetBackbone(config=lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() UpperCAmelCase_ : Tuple = model(lowerCamelCase_ ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) ,1 ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) ,[self.batch_size, self.image_size * 2, 4, 4] ) # verify channels self.parent.assertEqual(len(model.channels ) ,1 ) self.parent.assertListEqual(model.channels ,[config.hidden_sizes[-1]] ) def A__ ( self: Optional[int] ,lowerCamelCase_: List[str] ,lowerCamelCase_: Tuple ,lowerCamelCase_: Union[str, Any] ) -> List[Any]: UpperCAmelCase_ : Any = FocalNetForMaskedImageModeling(config=lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() UpperCAmelCase_ : Optional[Any] = model(lowerCamelCase_ ) self.parent.assertEqual( result.reconstruction.shape ,(self.batch_size, self.num_channels, self.image_size, self.image_size) ) # test greyscale images UpperCAmelCase_ : int = 1 UpperCAmelCase_ : List[str] = FocalNetForMaskedImageModeling(lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() UpperCAmelCase_ : int = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) UpperCAmelCase_ : Optional[int] = model(lowerCamelCase_ ) self.parent.assertEqual(result.reconstruction.shape ,(self.batch_size, 1, self.image_size, self.image_size) ) def A__ ( self: List[str] ,lowerCamelCase_: List[str] ,lowerCamelCase_: List[str] ,lowerCamelCase_: Any ) -> int: UpperCAmelCase_ : List[Any] = self.type_sequence_label_size UpperCAmelCase_ : int = FocalNetForImageClassification(lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() UpperCAmelCase_ : Union[str, Any] = model(lowerCamelCase_ ,labels=lowerCamelCase_ ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) ) # test greyscale images UpperCAmelCase_ : List[Any] = 1 UpperCAmelCase_ : Optional[int] = FocalNetForImageClassification(lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() UpperCAmelCase_ : List[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) UpperCAmelCase_ : List[str] = model(lowerCamelCase_ ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) ) def A__ ( self: Union[str, Any] ) -> Optional[int]: UpperCAmelCase_ : List[Any] = self.prepare_config_and_inputs() UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = config_and_inputs UpperCAmelCase_ : int = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class _snake_case ( __snake_case , __snake_case , unittest.TestCase ): '''simple docstring''' A__ : List[Any] = ( ( FocalNetModel, FocalNetForImageClassification, FocalNetForMaskedImageModeling, FocalNetBackbone, ) if is_torch_available() else () ) A__ : Union[str, Any] = ( {"feature-extraction": FocalNetModel, "image-classification": FocalNetForImageClassification} if is_torch_available() else {} ) A__ : Optional[Any] = False A__ : Any = False A__ : List[str] = False A__ : Any = False A__ : Any = False def A__ ( self: List[str] ) -> Tuple: UpperCAmelCase_ : Dict = FocalNetModelTester(self ) UpperCAmelCase_ : int = ConfigTester(self ,config_class=lowerCamelCase_ ,embed_dim=37 ,has_text_modality=lowerCamelCase_ ) def A__ ( self: List[str] ) -> int: self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def A__ ( self: List[str] ) -> Union[str, Any]: return def A__ ( self: str ) -> List[str]: UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCamelCase_ ) def A__ ( self: Tuple ) -> int: UpperCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*lowerCamelCase_ ) def A__ ( self: Dict ) -> List[str]: UpperCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*lowerCamelCase_ ) def A__ ( self: int ) -> int: UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*lowerCamelCase_ ) @unittest.skip(reason="""FocalNet does not use inputs_embeds""" ) def A__ ( self: int ) -> Dict: pass @unittest.skip(reason="""FocalNet does not use feedforward chunking""" ) def A__ ( self: Optional[Any] ) -> Optional[Any]: pass def A__ ( self: Optional[Any] ) -> List[str]: UpperCAmelCase_ , UpperCAmelCase_ : str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes[:-1]: UpperCAmelCase_ : Optional[Any] = model_class(lowerCamelCase_ ) self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) ) UpperCAmelCase_ : List[Any] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(lowerCamelCase_ ,nn.Linear ) ) def A__ ( self: str ) -> Optional[int]: UpperCAmelCase_ , UpperCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes[:-1]: UpperCAmelCase_ : str = model_class(lowerCamelCase_ ) UpperCAmelCase_ : Dict = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCAmelCase_ : Any = [*signature.parameters.keys()] UpperCAmelCase_ : List[str] = ["""pixel_values"""] self.assertListEqual(arg_names[:1] ,lowerCamelCase_ ) def A__ ( self: Dict ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: List[str] ,lowerCamelCase_: Dict ,lowerCamelCase_: Any ) -> List[str]: UpperCAmelCase_ : Tuple = model_class(lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() with torch.no_grad(): UpperCAmelCase_ : Optional[int] = model(**self._prepare_for_class(lowerCamelCase_ ,lowerCamelCase_ ) ) UpperCAmelCase_ : Any = outputs.hidden_states UpperCAmelCase_ : List[Any] = getattr( self.model_tester ,"""expected_num_hidden_layers""" ,len(self.model_tester.depths ) + 1 ) self.assertEqual(len(lowerCamelCase_ ) ,lowerCamelCase_ ) # FocalNet has a different seq_length UpperCAmelCase_ : int = ( config.patch_size if isinstance(config.patch_size ,collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) UpperCAmelCase_ : Optional[int] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.assertListEqual( list(hidden_states[0].shape[-2:] ) ,[num_patches, self.model_tester.embed_dim] ,) UpperCAmelCase_ : Union[str, Any] = outputs.reshaped_hidden_states self.assertEqual(len(lowerCamelCase_ ) ,lowerCamelCase_ ) UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Tuple = reshaped_hidden_states[0].shape UpperCAmelCase_ : List[Any] = ( reshaped_hidden_states[0].view(lowerCamelCase_ ,lowerCamelCase_ ,height * width ).permute(0 ,2 ,1 ) ) self.assertListEqual( list(reshaped_hidden_states.shape[-2:] ) ,[num_patches, self.model_tester.embed_dim] ,) def A__ ( self: Any ) -> List[Any]: UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase_ : Optional[int] = ( self.model_tester.image_size if isinstance(self.model_tester.image_size ,collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) for model_class in self.all_model_classes[:-1]: UpperCAmelCase_ : str = True self.check_hidden_states_output(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] UpperCAmelCase_ : Union[str, Any] = True self.check_hidden_states_output(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ) def A__ ( self: List[str] ) -> str: UpperCAmelCase_ , UpperCAmelCase_ : int = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase_ : Tuple = 3 UpperCAmelCase_ : Tuple = ( self.model_tester.image_size if isinstance(self.model_tester.image_size ,collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) UpperCAmelCase_ : Union[str, Any] = ( config.patch_size if isinstance(config.patch_size ,collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) UpperCAmelCase_ : Union[str, Any] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0]) UpperCAmelCase_ : Any = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1]) for model_class in self.all_model_classes[:-1]: UpperCAmelCase_ : Optional[Any] = True self.check_hidden_states_output(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,(padded_height, padded_width) ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] UpperCAmelCase_ : Optional[int] = True self.check_hidden_states_output(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,(padded_height, padded_width) ) @slow def A__ ( self: Optional[int] ) -> Optional[Any]: for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase_ : Tuple = FocalNetModel.from_pretrained(lowerCamelCase_ ) self.assertIsNotNone(lowerCamelCase_ ) def A__ ( self: Optional[Any] ) -> Optional[int]: UpperCAmelCase_ , UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase_ : Optional[int] = _config_zero_init(lowerCamelCase_ ) for model_class in self.all_model_classes: UpperCAmelCase_ : List[Any] = model_class(config=lowerCamelCase_ ) for name, param in model.named_parameters(): if "embeddings" not in name and param.requires_grad: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item() ,[0.0, 1.0] ,msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' ,) @require_vision @require_torch class _snake_case ( unittest.TestCase ): '''simple docstring''' @cached_property def A__ ( self: Optional[int] ) -> str: # TODO update organization return AutoImageProcessor.from_pretrained("""microsoft/focalnet-tiny""" ) if is_vision_available() else None @slow def A__ ( self: List[Any] ) -> List[str]: UpperCAmelCase_ : Optional[int] = FocalNetForImageClassification.from_pretrained("""microsoft/focalnet-tiny""" ).to(lowerCamelCase_ ) UpperCAmelCase_ : Tuple = self.default_image_processor UpperCAmelCase_ : Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) UpperCAmelCase_ : Dict = image_processor(images=lowerCamelCase_ ,return_tensors="""pt""" ).to(lowerCamelCase_ ) # forward pass with torch.no_grad(): UpperCAmelCase_ : Dict = model(**lowerCamelCase_ ) # verify the logits UpperCAmelCase_ : str = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape ,lowerCamelCase_ ) UpperCAmelCase_ : List[Any] = torch.tensor([0.2_1_6_6, -0.4_3_6_8, 0.2_1_9_1] ).to(lowerCamelCase_ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] ,lowerCamelCase_ ,atol=1e-4 ) ) self.assertTrue(outputs.logits.argmax(dim=-1 ).item() ,281 ) @require_torch class _snake_case ( __snake_case , unittest.TestCase ): '''simple docstring''' A__ : List[Any] = (FocalNetBackbone,) if is_torch_available() else () A__ : int = FocalNetConfig A__ : List[str] = False def A__ ( self: Any ) -> Optional[int]: UpperCAmelCase_ : str = FocalNetModelTester(self )
345
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, ) UpperCamelCase_ = { '''configuration_deberta''': ['''DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''DebertaConfig''', '''DebertaOnnxConfig'''], '''tokenization_deberta''': ['''DebertaTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase_ = ['''DebertaTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase_ = [ '''DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''', '''DebertaForMaskedLM''', '''DebertaForQuestionAnswering''', '''DebertaForSequenceClassification''', '''DebertaForTokenClassification''', '''DebertaModel''', '''DebertaPreTrainedModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase_ = [ '''TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFDebertaForMaskedLM''', '''TFDebertaForQuestionAnswering''', '''TFDebertaForSequenceClassification''', '''TFDebertaForTokenClassification''', '''TFDebertaModel''', '''TFDebertaPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_deberta import DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, DebertaConfig, DebertaOnnxConfig from .tokenization_deberta import DebertaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_deberta_fast import DebertaTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_deberta import ( DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, DebertaForMaskedLM, DebertaForQuestionAnswering, DebertaForSequenceClassification, DebertaForTokenClassification, DebertaModel, DebertaPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_deberta import ( TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, TFDebertaForMaskedLM, TFDebertaForQuestionAnswering, TFDebertaForSequenceClassification, TFDebertaForTokenClassification, TFDebertaModel, TFDebertaPreTrainedModel, ) else: import sys UpperCamelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
345
import collections import inspect import unittest from transformers import SwinvaConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class _snake_case : '''simple docstring''' def __init__( self: Tuple ,lowerCamelCase_: List[str] ,lowerCamelCase_: int=13 ,lowerCamelCase_: int=32 ,lowerCamelCase_: Optional[int]=2 ,lowerCamelCase_: Any=3 ,lowerCamelCase_: str=16 ,lowerCamelCase_: Optional[Any]=[1, 2, 1] ,lowerCamelCase_: Tuple=[2, 2, 4] ,lowerCamelCase_: int=2 ,lowerCamelCase_: List[Any]=2.0 ,lowerCamelCase_: str=True ,lowerCamelCase_: Optional[int]=0.0 ,lowerCamelCase_: List[Any]=0.0 ,lowerCamelCase_: List[str]=0.1 ,lowerCamelCase_: Tuple="gelu" ,lowerCamelCase_: Union[str, Any]=False ,lowerCamelCase_: Union[str, Any]=True ,lowerCamelCase_: Optional[int]=0.0_2 ,lowerCamelCase_: int=1e-5 ,lowerCamelCase_: Optional[int]=True ,lowerCamelCase_: Union[str, Any]=None ,lowerCamelCase_: Union[str, Any]=True ,lowerCamelCase_: Optional[int]=10 ,lowerCamelCase_: Tuple=8 ,) -> List[Any]: UpperCAmelCase_ : List[str] = parent UpperCAmelCase_ : int = batch_size UpperCAmelCase_ : int = image_size UpperCAmelCase_ : Union[str, Any] = patch_size UpperCAmelCase_ : Optional[Any] = num_channels UpperCAmelCase_ : int = embed_dim UpperCAmelCase_ : Union[str, Any] = depths UpperCAmelCase_ : List[str] = num_heads UpperCAmelCase_ : int = window_size UpperCAmelCase_ : List[str] = mlp_ratio UpperCAmelCase_ : Tuple = qkv_bias UpperCAmelCase_ : Tuple = hidden_dropout_prob UpperCAmelCase_ : str = attention_probs_dropout_prob UpperCAmelCase_ : Tuple = drop_path_rate UpperCAmelCase_ : List[str] = hidden_act UpperCAmelCase_ : int = use_absolute_embeddings UpperCAmelCase_ : Any = patch_norm UpperCAmelCase_ : Optional[int] = layer_norm_eps UpperCAmelCase_ : Tuple = initializer_range UpperCAmelCase_ : Optional[Any] = is_training UpperCAmelCase_ : Dict = scope UpperCAmelCase_ : int = use_labels UpperCAmelCase_ : Optional[Any] = type_sequence_label_size UpperCAmelCase_ : List[str] = encoder_stride def A__ ( self: Any ) -> int: UpperCAmelCase_ : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) UpperCAmelCase_ : List[Any] = None if self.use_labels: UpperCAmelCase_ : Optional[int] = ids_tensor([self.batch_size] ,self.type_sequence_label_size ) UpperCAmelCase_ : str = self.get_config() return config, pixel_values, labels def A__ ( self: List[Any] ) -> Union[str, Any]: return SwinvaConfig( image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,embed_dim=self.embed_dim ,depths=self.depths ,num_heads=self.num_heads ,window_size=self.window_size ,mlp_ratio=self.mlp_ratio ,qkv_bias=self.qkv_bias ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,drop_path_rate=self.drop_path_rate ,hidden_act=self.hidden_act ,use_absolute_embeddings=self.use_absolute_embeddings ,path_norm=self.patch_norm ,layer_norm_eps=self.layer_norm_eps ,initializer_range=self.initializer_range ,encoder_stride=self.encoder_stride ,) def A__ ( self: Dict ,lowerCamelCase_: Tuple ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: List[str] ) -> str: UpperCAmelCase_ : str = SwinvaModel(config=lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() UpperCAmelCase_ : Optional[Any] = model(lowerCamelCase_ ) UpperCAmelCase_ : List[Any] = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1)) UpperCAmelCase_ : List[Any] = int(config.embed_dim * 2 ** (len(config.depths ) - 1) ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, expected_seq_len, expected_dim) ) def A__ ( self: List[Any] ,lowerCamelCase_: List[Any] ,lowerCamelCase_: int ,lowerCamelCase_: int ) -> int: UpperCAmelCase_ : Any = SwinvaForMaskedImageModeling(config=lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() UpperCAmelCase_ : Union[str, Any] = model(lowerCamelCase_ ) self.parent.assertEqual( result.logits.shape ,(self.batch_size, self.num_channels, self.image_size, self.image_size) ) # test greyscale images UpperCAmelCase_ : str = 1 UpperCAmelCase_ : Optional[Any] = SwinvaForMaskedImageModeling(lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() UpperCAmelCase_ : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) UpperCAmelCase_ : int = model(lowerCamelCase_ ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, 1, self.image_size, self.image_size) ) def A__ ( self: int ,lowerCamelCase_: int ,lowerCamelCase_: List[Any] ,lowerCamelCase_: Optional[Any] ) -> int: UpperCAmelCase_ : Union[str, Any] = self.type_sequence_label_size UpperCAmelCase_ : int = SwinvaForImageClassification(lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() UpperCAmelCase_ : Optional[int] = model(lowerCamelCase_ ,labels=lowerCamelCase_ ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) ) def A__ ( self: str ) -> Union[str, Any]: UpperCAmelCase_ : Optional[Any] = self.prepare_config_and_inputs() UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = config_and_inputs UpperCAmelCase_ : Optional[int] = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class _snake_case ( __snake_case , __snake_case , unittest.TestCase ): '''simple docstring''' A__ : Tuple = ( (SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else () ) A__ : Optional[Any] = ( {"feature-extraction": SwinvaModel, "image-classification": SwinvaForImageClassification} if is_torch_available() else {} ) A__ : List[Any] = False A__ : Tuple = False A__ : int = False A__ : Union[str, Any] = False def A__ ( self: List[str] ) -> Optional[Any]: UpperCAmelCase_ : Any = SwinvaModelTester(self ) UpperCAmelCase_ : str = ConfigTester(self ,config_class=lowerCamelCase_ ,embed_dim=37 ) def A__ ( self: Optional[int] ) -> List[Any]: self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def A__ ( self: Any ) -> Dict: UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCamelCase_ ) @unittest.skip(reason="""Got `CUDA error: misaligned address` with PyTorch 2.0.0.""" ) def A__ ( self: int ) -> Dict: pass @unittest.skip(reason="""Swinv2 does not use inputs_embeds""" ) def A__ ( self: Tuple ) -> List[str]: pass def A__ ( self: str ) -> List[Any]: UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase_ : int = model_class(lowerCamelCase_ ) self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) ) UpperCAmelCase_ : Tuple = model.get_output_embeddings() self.assertTrue(x is None or isinstance(lowerCamelCase_ ,nn.Linear ) ) def A__ ( self: Optional[Any] ) -> Optional[int]: UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase_ : Dict = model_class(lowerCamelCase_ ) UpperCAmelCase_ : Any = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCAmelCase_ : int = [*signature.parameters.keys()] UpperCAmelCase_ : Tuple = ["""pixel_values"""] self.assertListEqual(arg_names[:1] ,lowerCamelCase_ ) def A__ ( self: Union[str, Any] ) -> Optional[Any]: UpperCAmelCase_ , UpperCAmelCase_ : int = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase_ : Any = True for model_class in self.all_model_classes: UpperCAmelCase_ : Optional[Any] = True UpperCAmelCase_ : Union[str, Any] = False UpperCAmelCase_ : str = True UpperCAmelCase_ : List[Any] = model_class(lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() with torch.no_grad(): UpperCAmelCase_ : Optional[int] = model(**self._prepare_for_class(lowerCamelCase_ ,lowerCamelCase_ ) ) UpperCAmelCase_ : Optional[Any] = outputs.attentions UpperCAmelCase_ : List[str] = len(self.model_tester.depths ) self.assertEqual(len(lowerCamelCase_ ) ,lowerCamelCase_ ) # check that output_attentions also work using config del inputs_dict["output_attentions"] UpperCAmelCase_ : str = True UpperCAmelCase_ : Optional[Any] = config.window_size**2 UpperCAmelCase_ : Optional[int] = model_class(lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() with torch.no_grad(): UpperCAmelCase_ : Optional[Any] = model(**self._prepare_for_class(lowerCamelCase_ ,lowerCamelCase_ ) ) UpperCAmelCase_ : List[Any] = outputs.attentions self.assertEqual(len(lowerCamelCase_ ) ,lowerCamelCase_ ) self.assertListEqual( list(attentions[0].shape[-3:] ) ,[self.model_tester.num_heads[0], window_size_squared, window_size_squared] ,) UpperCAmelCase_ : Optional[Any] = len(lowerCamelCase_ ) # Check attention is always last and order is fine UpperCAmelCase_ : Tuple = True UpperCAmelCase_ : List[Any] = True UpperCAmelCase_ : Tuple = model_class(lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() with torch.no_grad(): UpperCAmelCase_ : Union[str, Any] = model(**self._prepare_for_class(lowerCamelCase_ ,lowerCamelCase_ ) ) if hasattr(self.model_tester ,"""num_hidden_states_types""" ): UpperCAmelCase_ : List[Any] = self.model_tester.num_hidden_states_types else: # also another +1 for reshaped_hidden_states UpperCAmelCase_ : List[str] = 2 self.assertEqual(out_len + added_hidden_states ,len(lowerCamelCase_ ) ) UpperCAmelCase_ : Any = outputs.attentions self.assertEqual(len(lowerCamelCase_ ) ,lowerCamelCase_ ) self.assertListEqual( list(self_attentions[0].shape[-3:] ) ,[self.model_tester.num_heads[0], window_size_squared, window_size_squared] ,) def A__ ( self: List[str] ,lowerCamelCase_: Dict ,lowerCamelCase_: Tuple ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: Optional[int] ) -> List[Any]: UpperCAmelCase_ : str = model_class(lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() with torch.no_grad(): UpperCAmelCase_ : int = model(**self._prepare_for_class(lowerCamelCase_ ,lowerCamelCase_ ) ) UpperCAmelCase_ : List[str] = outputs.hidden_states UpperCAmelCase_ : Optional[Any] = getattr( self.model_tester ,"""expected_num_hidden_layers""" ,len(self.model_tester.depths ) + 1 ) self.assertEqual(len(lowerCamelCase_ ) ,lowerCamelCase_ ) # Swinv2 has a different seq_length UpperCAmelCase_ : Optional[Any] = ( config.patch_size if isinstance(config.patch_size ,collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) UpperCAmelCase_ : int = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.assertListEqual( list(hidden_states[0].shape[-2:] ) ,[num_patches, self.model_tester.embed_dim] ,) UpperCAmelCase_ : Optional[int] = outputs.reshaped_hidden_states self.assertEqual(len(lowerCamelCase_ ) ,lowerCamelCase_ ) UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = reshaped_hidden_states[0].shape UpperCAmelCase_ : Optional[Any] = ( reshaped_hidden_states[0].view(lowerCamelCase_ ,lowerCamelCase_ ,height * width ).permute(0 ,2 ,1 ) ) self.assertListEqual( list(reshaped_hidden_states.shape[-2:] ) ,[num_patches, self.model_tester.embed_dim] ,) def A__ ( self: Any ) -> int: UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase_ : Dict = ( self.model_tester.image_size if isinstance(self.model_tester.image_size ,collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) for model_class in self.all_model_classes: UpperCAmelCase_ : Any = True self.check_hidden_states_output(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] UpperCAmelCase_ : str = True self.check_hidden_states_output(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ) def A__ ( self: List[str] ) -> Dict: UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase_ : Union[str, Any] = 3 UpperCAmelCase_ : Optional[int] = ( self.model_tester.image_size if isinstance(self.model_tester.image_size ,collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) UpperCAmelCase_ : List[str] = ( config.patch_size if isinstance(config.patch_size ,collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) UpperCAmelCase_ : List[Any] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0]) UpperCAmelCase_ : Optional[Any] = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1]) for model_class in self.all_model_classes: UpperCAmelCase_ : Optional[Any] = True self.check_hidden_states_output(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,(padded_height, padded_width) ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] UpperCAmelCase_ : List[str] = True self.check_hidden_states_output(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,(padded_height, padded_width) ) def A__ ( self: Optional[int] ) -> str: UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*lowerCamelCase_ ) def A__ ( self: Union[str, Any] ) -> Dict: UpperCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*lowerCamelCase_ ) @slow def A__ ( self: str ) -> Tuple: for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase_ : Dict = SwinvaModel.from_pretrained(lowerCamelCase_ ) self.assertIsNotNone(lowerCamelCase_ ) def A__ ( self: Any ) -> int: UpperCAmelCase_ , UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase_ : List[str] = _config_zero_init(lowerCamelCase_ ) for model_class in self.all_model_classes: UpperCAmelCase_ : int = model_class(config=lowerCamelCase_ ) for name, param in model.named_parameters(): if "embeddings" not in name and "logit_scale" not in name and param.requires_grad: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item() ,[0.0, 1.0] ,msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' ,) @require_vision @require_torch class _snake_case ( unittest.TestCase ): '''simple docstring''' @cached_property def A__ ( self: Dict ) -> Optional[Any]: return ( AutoImageProcessor.from_pretrained("""microsoft/swinv2-tiny-patch4-window8-256""" ) if is_vision_available() else None ) @slow def A__ ( self: str ) -> List[Any]: UpperCAmelCase_ : Tuple = SwinvaForImageClassification.from_pretrained("""microsoft/swinv2-tiny-patch4-window8-256""" ).to( lowerCamelCase_ ) UpperCAmelCase_ : Any = self.default_image_processor UpperCAmelCase_ : List[str] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) UpperCAmelCase_ : Optional[int] = image_processor(images=lowerCamelCase_ ,return_tensors="""pt""" ).to(lowerCamelCase_ ) # forward pass with torch.no_grad(): UpperCAmelCase_ : Optional[Any] = model(**lowerCamelCase_ ) # verify the logits UpperCAmelCase_ : Dict = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape ,lowerCamelCase_ ) UpperCAmelCase_ : Any = torch.tensor([-0.3_9_4_7, -0.4_3_0_6, 0.0_0_2_6] ).to(lowerCamelCase_ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] ,lowerCamelCase_ ,atol=1e-4 ) )
345
1
import argparse import requests import torch from PIL import Image from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel def lowerCamelCase_ ( _a : Optional[Any] ): '''simple docstring''' if "img_encoder.pos_embed" in name: UpperCAmelCase_ : Optional[int] = name.replace("""img_encoder.pos_embed""" , """vision_model.embeddings.position_embeddings""" ) if "img_encoder.patch_embed.proj" in name: UpperCAmelCase_ : Optional[int] = name.replace("""img_encoder.patch_embed.proj""" , """vision_model.embeddings.patch_embeddings.projection""" ) if "img_encoder.patch_embed.norm" in name: UpperCAmelCase_ : List[str] = name.replace("""img_encoder.patch_embed.norm""" , """vision_model.embeddings.layernorm""" ) if "img_encoder.layers" in name: UpperCAmelCase_ : Any = name.replace("""img_encoder.layers""" , """vision_model.encoder.stages""" ) if "blocks" in name and "res" not in name: UpperCAmelCase_ : Tuple = name.replace("""blocks""" , """layers""" ) if "attn" in name and "pre_assign" not in name: UpperCAmelCase_ : List[Any] = name.replace("""attn""" , """self_attn""" ) if "proj" in name and "self_attn" in name and "text" not in name: UpperCAmelCase_ : Optional[Any] = name.replace("""proj""" , """out_proj""" ) if "pre_assign_attn.attn.proj" in name: UpperCAmelCase_ : List[str] = name.replace("""pre_assign_attn.attn.proj""" , """pre_assign_attn.attn.out_proj""" ) if "norm1" in name: UpperCAmelCase_ : Tuple = name.replace("""norm1""" , """layer_norm1""" ) if "norm2" in name and "pre_assign" not in name: UpperCAmelCase_ : Union[str, Any] = name.replace("""norm2""" , """layer_norm2""" ) if "img_encoder.norm" in name: UpperCAmelCase_ : Optional[Any] = name.replace("""img_encoder.norm""" , """vision_model.layernorm""" ) # text encoder if "text_encoder.token_embedding" in name: UpperCAmelCase_ : Any = name.replace("""text_encoder.token_embedding""" , """text_model.embeddings.token_embedding""" ) if "text_encoder.positional_embedding" in name: UpperCAmelCase_ : int = name.replace("""text_encoder.positional_embedding""" , """text_model.embeddings.position_embedding.weight""" ) if "text_encoder.transformer.resblocks." in name: UpperCAmelCase_ : List[Any] = name.replace("""text_encoder.transformer.resblocks.""" , """text_model.encoder.layers.""" ) if "ln_1" in name: UpperCAmelCase_ : str = name.replace("""ln_1""" , """layer_norm1""" ) if "ln_2" in name: UpperCAmelCase_ : Union[str, Any] = name.replace("""ln_2""" , """layer_norm2""" ) if "c_fc" in name: UpperCAmelCase_ : List[Any] = name.replace("""c_fc""" , """fc1""" ) if "c_proj" in name: UpperCAmelCase_ : List[Any] = name.replace("""c_proj""" , """fc2""" ) if "text_encoder" in name: UpperCAmelCase_ : Union[str, Any] = name.replace("""text_encoder""" , """text_model""" ) if "ln_final" in name: UpperCAmelCase_ : str = name.replace("""ln_final""" , """final_layer_norm""" ) # projection layers if "img_projector.linear_hidden." in name: UpperCAmelCase_ : Optional[Any] = name.replace("""img_projector.linear_hidden.""" , """visual_projection.""" ) if "img_projector.linear_out." in name: UpperCAmelCase_ : Optional[int] = name.replace("""img_projector.linear_out.""" , """visual_projection.3.""" ) if "text_projector.linear_hidden" in name: UpperCAmelCase_ : Tuple = name.replace("""text_projector.linear_hidden""" , """text_projection""" ) if "text_projector.linear_out" in name: UpperCAmelCase_ : str = name.replace("""text_projector.linear_out""" , """text_projection.3""" ) return name def lowerCamelCase_ ( _a : Optional[Any] , _a : Dict ): '''simple docstring''' for key in orig_state_dict.copy().keys(): UpperCAmelCase_ : str = orig_state_dict.pop(_a ) if "qkv" in key: # weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment: # we need to split them up into separate matrices/vectors UpperCAmelCase_ : Tuple = key.split(""".""" ) UpperCAmelCase_ , UpperCAmelCase_ : Tuple = int(key_split[2] ), int(key_split[4] ) UpperCAmelCase_ : int = config.vision_config.hidden_size if "weight" in key: UpperCAmelCase_ : Tuple = val[:dim, :] UpperCAmelCase_ : Dict = val[dim : dim * 2, :] UpperCAmelCase_ : Optional[Any] = val[-dim:, :] else: UpperCAmelCase_ : Tuple = val[:dim] UpperCAmelCase_ : str = val[dim : dim * 2] UpperCAmelCase_ : Optional[Any] = val[-dim:] elif "in_proj" in key: # weights and biases of the key, value and query projections of text encoder's attention layers require special treatment: # we need to split them up into separate matrices/vectors UpperCAmelCase_ : List[Any] = key.split(""".""" ) UpperCAmelCase_ : int = int(key_split[3] ) UpperCAmelCase_ : int = config.text_config.hidden_size if "weight" in key: UpperCAmelCase_ : Dict = val[:dim, :] UpperCAmelCase_ : Optional[int] = val[ dim : dim * 2, : ] UpperCAmelCase_ : str = val[-dim:, :] else: UpperCAmelCase_ : Optional[Any] = val[:dim] UpperCAmelCase_ : Optional[int] = val[dim : dim * 2] UpperCAmelCase_ : int = val[-dim:] else: UpperCAmelCase_ : List[str] = rename_key(_a ) # squeeze if necessary if ( "text_projection.0" in new_name or "text_projection.3" in new_name or "visual_projection.0" in new_name or "visual_projection.3" in new_name ): UpperCAmelCase_ : Tuple = val.squeeze_() else: UpperCAmelCase_ : int = val return orig_state_dict def lowerCamelCase_ ( ): '''simple docstring''' UpperCAmelCase_ : Union[str, Any] = """http://images.cocodataset.org/val2017/000000039769.jpg""" UpperCAmelCase_ : Optional[Any] = Image.open(requests.get(_a , stream=_a ).raw ) return im @torch.no_grad() def lowerCamelCase_ ( _a : Optional[Any] , _a : str , _a : List[str]="groupvit-gcc-yfcc" , _a : Tuple=False ): '''simple docstring''' UpperCAmelCase_ : Optional[int] = GroupViTConfig() UpperCAmelCase_ : Union[str, Any] = GroupViTModel(_a ).eval() UpperCAmelCase_ : str = torch.load(_a , map_location="""cpu""" )["""model"""] UpperCAmelCase_ : List[str] = convert_state_dict(_a , _a ) UpperCAmelCase_ , UpperCAmelCase_ : Tuple = model.load_state_dict(_a , strict=_a ) assert missing_keys == ["text_model.embeddings.position_ids"] assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(_a ) == 0) # verify result UpperCAmelCase_ : int = CLIPProcessor.from_pretrained("""openai/clip-vit-base-patch32""" ) UpperCAmelCase_ : Union[str, Any] = prepare_img() UpperCAmelCase_ : Optional[int] = processor(text=["""a photo of a cat""", """a photo of a dog"""] , images=_a , padding=_a , return_tensors="""pt""" ) with torch.no_grad(): UpperCAmelCase_ : int = model(**_a ) if model_name == "groupvit-gcc-yfcc": UpperCAmelCase_ : Optional[Any] = torch.tensor([[1_3.3_5_2_3, 6.3_6_2_9]] ) elif model_name == "groupvit-gcc-redcaps": UpperCAmelCase_ : str = torch.tensor([[1_6.1_8_7_3, 8.6_2_3_0]] ) else: raise ValueError(F'''Model name {model_name} not supported.''' ) assert torch.allclose(outputs.logits_per_image , _a , atol=1E-3 ) processor.save_pretrained(_a ) model.save_pretrained(_a ) print("""Successfully saved processor and model to""" , _a ) if push_to_hub: print("""Pushing to the hub...""" ) processor.push_to_hub(_a , organization="""nielsr""" ) model.push_to_hub(_a , organization="""nielsr""" ) if __name__ == "__main__": UpperCamelCase_ = argparse.ArgumentParser() parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to dump the processor and PyTorch model.''' ) parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to GroupViT checkpoint''') parser.add_argument( '''--model_name''', default='''groupvit-gccy-fcc''', type=str, help='''Name of the model. Expecting either \'groupvit-gcc-yfcc\' or \'groupvit-gcc-redcaps\'''', ) parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.''', ) UpperCamelCase_ = parser.parse_args() convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
345
import json import os from functools import lru_cache from typing import Dict, List, Optional, Tuple, Union import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...tokenization_utils_base import BatchEncoding, EncodedInput from ...utils import PaddingStrategy, logging UpperCamelCase_ = logging.get_logger(__name__) UpperCamelCase_ = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt'''} # See all LED models at https://huggingface.co/models?filter=LED UpperCamelCase_ = { '''vocab_file''': { '''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json''', }, '''merges_file''': { '''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt''', }, '''tokenizer_file''': { '''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json''', }, } UpperCamelCase_ = { '''allenai/led-base-16384''': 16384, } @lru_cache() # Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode def lowerCamelCase_ ( ): '''simple docstring''' UpperCAmelCase_ : int = ( list(range(ord("""!""" ) , ord("""~""" ) + 1 ) ) + list(range(ord("""¡""" ) , ord("""¬""" ) + 1 ) ) + list(range(ord("""®""" ) , ord("""ÿ""" ) + 1 ) ) ) UpperCAmelCase_ : Dict = bs[:] UpperCAmelCase_ : Any = 0 for b in range(2**8 ): if b not in bs: bs.append(_a ) cs.append(2**8 + n ) n += 1 UpperCAmelCase_ : Any = [chr(_a ) for n in cs] return dict(zip(_a , _a ) ) def lowerCamelCase_ ( _a : List[str] ): '''simple docstring''' UpperCAmelCase_ : Union[str, Any] = set() UpperCAmelCase_ : List[Any] = word[0] for char in word[1:]: pairs.add((prev_char, char) ) UpperCAmelCase_ : Optional[int] = char return pairs class _snake_case ( __snake_case ): '''simple docstring''' A__ : str = VOCAB_FILES_NAMES A__ : List[str] = PRETRAINED_VOCAB_FILES_MAP A__ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES A__ : Optional[int] = ["input_ids", "attention_mask"] def __init__( self: Union[str, Any] ,lowerCamelCase_: Tuple ,lowerCamelCase_: Any ,lowerCamelCase_: Union[str, Any]="replace" ,lowerCamelCase_: Optional[Any]="<s>" ,lowerCamelCase_: List[Any]="</s>" ,lowerCamelCase_: List[str]="</s>" ,lowerCamelCase_: int="<s>" ,lowerCamelCase_: int="<unk>" ,lowerCamelCase_: str="<pad>" ,lowerCamelCase_: Optional[Any]="<mask>" ,lowerCamelCase_: List[str]=False ,**lowerCamelCase_: Tuple ,) -> Any: UpperCAmelCase_ : Union[str, Any] = AddedToken(lowerCamelCase_ ,lstrip=lowerCamelCase_ ,rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ ,lowerCamelCase_ ) else bos_token UpperCAmelCase_ : int = AddedToken(lowerCamelCase_ ,lstrip=lowerCamelCase_ ,rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ ,lowerCamelCase_ ) else eos_token UpperCAmelCase_ : List[str] = AddedToken(lowerCamelCase_ ,lstrip=lowerCamelCase_ ,rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ ,lowerCamelCase_ ) else sep_token UpperCAmelCase_ : List[str] = AddedToken(lowerCamelCase_ ,lstrip=lowerCamelCase_ ,rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ ,lowerCamelCase_ ) else cls_token UpperCAmelCase_ : Optional[Any] = AddedToken(lowerCamelCase_ ,lstrip=lowerCamelCase_ ,rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ ,lowerCamelCase_ ) else unk_token UpperCAmelCase_ : List[str] = AddedToken(lowerCamelCase_ ,lstrip=lowerCamelCase_ ,rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ ,lowerCamelCase_ ) else pad_token # Mask token behave like a normal word, i.e. include the space before it UpperCAmelCase_ : str = AddedToken(lowerCamelCase_ ,lstrip=lowerCamelCase_ ,rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ ,lowerCamelCase_ ) else mask_token super().__init__( errors=lowerCamelCase_ ,bos_token=lowerCamelCase_ ,eos_token=lowerCamelCase_ ,unk_token=lowerCamelCase_ ,sep_token=lowerCamelCase_ ,cls_token=lowerCamelCase_ ,pad_token=lowerCamelCase_ ,mask_token=lowerCamelCase_ ,add_prefix_space=lowerCamelCase_ ,**lowerCamelCase_ ,) with open(lowerCamelCase_ ,encoding="""utf-8""" ) as vocab_handle: UpperCAmelCase_ : Union[str, Any] = json.load(lowerCamelCase_ ) UpperCAmelCase_ : Optional[int] = {v: k for k, v in self.encoder.items()} UpperCAmelCase_ : Any = errors # how to handle errors in decoding UpperCAmelCase_ : int = bytes_to_unicode() UpperCAmelCase_ : Dict = {v: k for k, v in self.byte_encoder.items()} with open(lowerCamelCase_ ,encoding="""utf-8""" ) as merges_handle: UpperCAmelCase_ : Any = merges_handle.read().split("""\n""" )[1:-1] UpperCAmelCase_ : int = [tuple(merge.split() ) for merge in bpe_merges] UpperCAmelCase_ : Union[str, Any] = dict(zip(lowerCamelCase_ ,range(len(lowerCamelCase_ ) ) ) ) UpperCAmelCase_ : Tuple = {} UpperCAmelCase_ : Optional[int] = add_prefix_space # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions UpperCAmelCase_ : int = re.compile(R"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""" ) @property # Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size def A__ ( self: List[str] ) -> List[str]: return len(self.encoder ) def A__ ( self: Any ) -> Union[str, Any]: return dict(self.encoder ,**self.added_tokens_encoder ) def A__ ( self: Tuple ,lowerCamelCase_: Dict ) -> Optional[Any]: if token in self.cache: return self.cache[token] UpperCAmelCase_ : Union[str, Any] = tuple(lowerCamelCase_ ) UpperCAmelCase_ : Union[str, Any] = get_pairs(lowerCamelCase_ ) if not pairs: return token while True: UpperCAmelCase_ : Union[str, Any] = min(lowerCamelCase_ ,key=lambda lowerCamelCase_ : self.bpe_ranks.get(lowerCamelCase_ ,float("""inf""" ) ) ) if bigram not in self.bpe_ranks: break UpperCAmelCase_ , UpperCAmelCase_ : Any = bigram UpperCAmelCase_ : Optional[Any] = [] UpperCAmelCase_ : List[str] = 0 while i < len(lowerCamelCase_ ): try: UpperCAmelCase_ : str = word.index(lowerCamelCase_ ,lowerCamelCase_ ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) UpperCAmelCase_ : Union[str, Any] = j if word[i] == first and i < len(lowerCamelCase_ ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 UpperCAmelCase_ : List[str] = tuple(lowerCamelCase_ ) UpperCAmelCase_ : List[Any] = new_word if len(lowerCamelCase_ ) == 1: break else: UpperCAmelCase_ : List[str] = get_pairs(lowerCamelCase_ ) UpperCAmelCase_ : int = """ """.join(lowerCamelCase_ ) UpperCAmelCase_ : Optional[Any] = word return word def A__ ( self: Union[str, Any] ,lowerCamelCase_: Tuple ) -> List[str]: UpperCAmelCase_ : str = [] for token in re.findall(self.pat ,lowerCamelCase_ ): UpperCAmelCase_ : List[Any] = """""".join( self.byte_encoder[b] for b in token.encode("""utf-8""" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowerCamelCase_ ).split(""" """ ) ) return bpe_tokens def A__ ( self: List[Any] ,lowerCamelCase_: Optional[Any] ) -> Optional[int]: return self.encoder.get(lowerCamelCase_ ,self.encoder.get(self.unk_token ) ) def A__ ( self: List[str] ,lowerCamelCase_: str ) -> Optional[Any]: return self.decoder.get(lowerCamelCase_ ) def A__ ( self: List[str] ,lowerCamelCase_: List[str] ) -> List[Any]: UpperCAmelCase_ : str = """""".join(lowerCamelCase_ ) UpperCAmelCase_ : int = bytearray([self.byte_decoder[c] for c in text] ).decode("""utf-8""" ,errors=self.errors ) return text def A__ ( self: Optional[Any] ,lowerCamelCase_: str ,lowerCamelCase_: Optional[str] = None ) -> Tuple[str]: if not os.path.isdir(lowerCamelCase_ ): logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' ) return UpperCAmelCase_ : List[Any] = os.path.join( lowerCamelCase_ ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) UpperCAmelCase_ : List[str] = os.path.join( lowerCamelCase_ ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] ) with open(lowerCamelCase_ ,"""w""" ,encoding="""utf-8""" ) as f: f.write(json.dumps(self.encoder ,indent=2 ,sort_keys=lowerCamelCase_ ,ensure_ascii=lowerCamelCase_ ) + """\n""" ) UpperCAmelCase_ : str = 0 with open(lowerCamelCase_ ,"""w""" ,encoding="""utf-8""" ) as writer: writer.write("""#version: 0.2\n""" ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() ,key=lambda lowerCamelCase_ : kv[1] ): if index != token_index: logger.warning( F'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.''' """ Please check that the tokenizer is not corrupted!""" ) UpperCAmelCase_ : Tuple = token_index writer.write(""" """.join(lowerCamelCase_ ) + """\n""" ) index += 1 return vocab_file, merge_file def A__ ( self: str ,lowerCamelCase_: List[int] ,lowerCamelCase_: Optional[List[int]] = None ) -> List[int]: if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] UpperCAmelCase_ : int = [self.cls_token_id] UpperCAmelCase_ : Optional[int] = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def A__ ( self: Union[str, Any] ,lowerCamelCase_: List[int] ,lowerCamelCase_: Optional[List[int]] = None ,lowerCamelCase_: bool = False ) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=lowerCamelCase_ ,token_ids_a=lowerCamelCase_ ,already_has_special_tokens=lowerCamelCase_ ) if token_ids_a is None: return [1] + ([0] * len(lowerCamelCase_ )) + [1] return [1] + ([0] * len(lowerCamelCase_ )) + [1, 1] + ([0] * len(lowerCamelCase_ )) + [1] def A__ ( self: str ,lowerCamelCase_: List[int] ,lowerCamelCase_: Optional[List[int]] = None ) -> List[int]: UpperCAmelCase_ : Optional[Any] = [self.sep_token_id] UpperCAmelCase_ : Optional[Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def A__ ( self: Optional[Any] ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: str=False ,**lowerCamelCase_: List[str] ) -> Optional[int]: UpperCAmelCase_ : Optional[int] = kwargs.pop("""add_prefix_space""" ,self.add_prefix_space ) if (is_split_into_words or add_prefix_space) and (len(lowerCamelCase_ ) > 0 and not text[0].isspace()): UpperCAmelCase_ : Dict = """ """ + text return (text, kwargs) def A__ ( self: List[str] ,lowerCamelCase_: Union[Dict[str, EncodedInput], BatchEncoding] ,lowerCamelCase_: Optional[int] = None ,lowerCamelCase_: PaddingStrategy = PaddingStrategy.DO_NOT_PAD ,lowerCamelCase_: Optional[int] = None ,lowerCamelCase_: Optional[bool] = None ,) -> dict: UpperCAmelCase_ : Optional[int] = super()._pad( encoded_inputs=lowerCamelCase_ ,max_length=lowerCamelCase_ ,padding_strategy=lowerCamelCase_ ,pad_to_multiple_of=lowerCamelCase_ ,return_attention_mask=lowerCamelCase_ ,) # Load from model defaults if return_attention_mask is None: UpperCAmelCase_ : str = """attention_mask""" in self.model_input_names if return_attention_mask and "global_attention_mask" in encoded_inputs: UpperCAmelCase_ : str = encoded_inputs[self.model_input_names[0]] # `global_attention_mask` need to have the same length as other (sequential) inputs. UpperCAmelCase_ : List[Any] = len(encoded_inputs["""global_attention_mask"""] ) != len(lowerCamelCase_ ) if needs_to_be_padded: UpperCAmelCase_ : Dict = len(lowerCamelCase_ ) - len(encoded_inputs["""global_attention_mask"""] ) if self.padding_side == "right": # Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend` UpperCAmelCase_ : str = ( encoded_inputs["""global_attention_mask"""] + [-1] * difference ) elif self.padding_side == "left": UpperCAmelCase_ : List[str] = [-1] * difference + encoded_inputs[ """global_attention_mask""" ] else: raise ValueError("""Invalid padding strategy:""" + str(self.padding_side ) ) return encoded_inputs
345
1
import logging import os import random import sys from dataclasses import dataclass, field from typing import Optional import datasets import numpy as np import pandas as pd from datasets import load_dataset import transformers from transformers import ( AutoConfig, BartForSequenceClassification, DataCollatorWithPadding, EvalPrediction, HfArgumentParser, TapexTokenizer, Trainer, TrainingArguments, default_data_collator, set_seed, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version from transformers.utils.versions import require_version # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version('''4.17.0.dev0''') require_version('''datasets>=1.8.0''', '''To fix: pip install -r examples/pytorch/text-classification/requirements.txt''') UpperCamelCase_ = logging.getLogger(__name__) @dataclass class _snake_case : '''simple docstring''' A__ : Optional[str] = field( default="tab_fact" , metadata={"help": "The name of the dataset to use (via the datasets library)."} ) A__ : Optional[str] = field( default="tab_fact" , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} , ) A__ : int = field( default=1_024 , metadata={ "help": ( "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) } , ) A__ : bool = field( default=__snake_case , metadata={"help": "Overwrite the cached preprocessed datasets or not."} ) A__ : bool = field( default=__snake_case , metadata={ "help": ( "Whether to pad all samples to `max_seq_length`. " "If False, will pad the samples dynamically when batching to the maximum length in the batch." ) } , ) A__ : Optional[int] = field( default=__snake_case , metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of training examples to this " "value if set." ) } , ) A__ : Optional[int] = field( default=__snake_case , metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of evaluation examples to this " "value if set." ) } , ) A__ : Optional[int] = field( default=__snake_case , metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of prediction examples to this " "value if set." ) } , ) A__ : Optional[str] = field( default=__snake_case , metadata={"help": "A csv or a json file containing the training data."} ) A__ : Optional[str] = field( default=__snake_case , metadata={"help": "A csv or a json file containing the validation data."} ) A__ : Optional[str] = field(default=__snake_case , metadata={"help": "A csv or a json file containing the test data."} ) def A__ ( self: List[str] ) -> List[Any]: if self.dataset_name is not None: pass elif self.train_file is None or self.validation_file is None: raise ValueError("""Need either a GLUE task, a training/validation file or a dataset name.""" ) else: UpperCAmelCase_ : Optional[Any] = self.train_file.split(""".""" )[-1] assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file." UpperCAmelCase_ : str = self.validation_file.split(""".""" )[-1] assert ( validation_extension == train_extension ), "`validation_file` should have the same extension (csv or json) as `train_file`." @dataclass class _snake_case : '''simple docstring''' A__ : str = field( default=__snake_case , metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} ) A__ : Optional[str] = field( default=__snake_case , metadata={"help": "Pretrained config name or path if not the same as model_name"} ) A__ : Optional[str] = field( default=__snake_case , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} ) A__ : Optional[str] = field( default=__snake_case , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , ) A__ : bool = field( default=__snake_case , metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."} , ) A__ : str = field( default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , ) A__ : bool = field( default=__snake_case , metadata={ "help": ( "Will use the token generated when running `huggingface-cli login` (necessary to use this script " "with private models)." ) } , ) def lowerCamelCase_ ( ): '''simple docstring''' UpperCAmelCase_ : int = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Dict = parser.parse_args_into_dataclasses() # Setup logging logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , ) UpperCAmelCase_ : int = training_args.get_process_log_level() logger.setLevel(_a ) datasets.utils.logging.set_verbosity(_a ) transformers.utils.logging.set_verbosity(_a ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}''' + F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' ) logger.info(F'''Training/evaluation parameters {training_args}''' ) # Detecting last checkpoint. UpperCAmelCase_ : str = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: UpperCAmelCase_ : str = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( F'''Output directory ({training_args.output_dir}) already exists and is not empty. ''' """Use --overwrite_output_dir to overcome.""" ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change ''' """the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" ) # Set seed before initializing model. set_seed(training_args.seed ) # Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below) # or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub). # # For JSON files, this script will use the `question` column for the input question and `table` column for the corresponding table. # # If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this # single column. You can easily tweak this behavior (see below) # # In distributed training, the load_dataset function guarantee that only one local process can concurrently # download the dataset. if data_args.dataset_name is not None: # Downloading and loading a dataset from the hub. UpperCAmelCase_ : List[str] = load_dataset( data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir ) else: # Loading a dataset from your local files. # CSV/JSON training and evaluation files are needed. UpperCAmelCase_ : Any = {"""train""": data_args.train_file, """validation""": data_args.validation_file} # Get the test dataset: you can provide your own CSV/JSON test file (see below) # when you use `do_predict` without specifying a GLUE benchmark task. if training_args.do_predict: if data_args.test_file is not None: UpperCAmelCase_ : Tuple = data_args.train_file.split(""".""" )[-1] UpperCAmelCase_ : int = data_args.test_file.split(""".""" )[-1] assert ( test_extension == train_extension ), "`test_file` should have the same extension (csv or json) as `train_file`." UpperCAmelCase_ : Dict = data_args.test_file else: raise ValueError("""Need either a GLUE task or a test file for `do_predict`.""" ) for key in data_files.keys(): logger.info(F'''load a local file for {key}: {data_files[key]}''' ) if data_args.train_file.endswith(""".csv""" ): # Loading a dataset from local csv files UpperCAmelCase_ : Union[str, Any] = load_dataset("""csv""" , data_files=_a , cache_dir=model_args.cache_dir ) else: # Loading a dataset from local json files UpperCAmelCase_ : List[str] = load_dataset("""json""" , data_files=_a , cache_dir=model_args.cache_dir ) # See more about loading any type of standard or custom dataset at # https://huggingface.co/docs/datasets/loading_datasets.html. # Labels UpperCAmelCase_ : Any = raw_datasets["""train"""].features["""label"""].names UpperCAmelCase_ : Optional[Any] = len(_a ) # Load pretrained model and tokenizer # # In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. UpperCAmelCase_ : List[str] = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=_a , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) # load tapex tokenizer UpperCAmelCase_ : Optional[Any] = TapexTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , add_prefix_space=_a , ) UpperCAmelCase_ : List[Any] = BartForSequenceClassification.from_pretrained( model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=_a , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) # Padding strategy if data_args.pad_to_max_length: UpperCAmelCase_ : Union[str, Any] = """max_length""" else: # We will pad later, dynamically at batch creation, to the max sequence length in each batch UpperCAmelCase_ : int = False # Some models have set the order of the labels to use, so let's make sure we do use it. UpperCAmelCase_ : int = {"""Refused""": 0, """Entailed""": 1} UpperCAmelCase_ : List[Any] = {0: """Refused""", 1: """Entailed"""} if data_args.max_seq_length > tokenizer.model_max_length: logger.warning( F'''The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the''' F'''model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.''' ) UpperCAmelCase_ : Optional[Any] = min(data_args.max_seq_length , tokenizer.model_max_length ) def preprocess_tabfact_function(_a : Optional[Any] ): # Tokenize the texts def _convert_table_text_to_pandas(_a : Optional[Any] ): UpperCAmelCase_ : int = [_table_row.split("""#""" ) for _table_row in _table_text.strip("""\n""" ).split("""\n""" )] UpperCAmelCase_ : Union[str, Any] = pd.DataFrame.from_records(_table_content[1:] , columns=_table_content[0] ) return _table_pd UpperCAmelCase_ : List[str] = examples["""statement"""] UpperCAmelCase_ : int = list(map(_convert_table_text_to_pandas , examples["""table_text"""] ) ) UpperCAmelCase_ : List[Any] = tokenizer(_a , _a , padding=_a , max_length=_a , truncation=_a ) UpperCAmelCase_ : Union[str, Any] = examples["""label"""] return result with training_args.main_process_first(desc="""dataset map pre-processing""" ): UpperCAmelCase_ : List[Any] = raw_datasets.map( _a , batched=_a , load_from_cache_file=not data_args.overwrite_cache , desc="""Running tokenizer on dataset""" , ) if training_args.do_train: if "train" not in raw_datasets: raise ValueError("""--do_train requires a train dataset""" ) UpperCAmelCase_ : str = raw_datasets["""train"""] if data_args.max_train_samples is not None: UpperCAmelCase_ : List[Any] = train_dataset.select(range(data_args.max_train_samples ) ) if training_args.do_eval: if "validation" not in raw_datasets and "validation_matched" not in raw_datasets: raise ValueError("""--do_eval requires a validation dataset""" ) UpperCAmelCase_ : str = raw_datasets["""validation"""] if data_args.max_eval_samples is not None: UpperCAmelCase_ : Tuple = eval_dataset.select(range(data_args.max_eval_samples ) ) if training_args.do_predict or data_args.test_file is not None: if "test" not in raw_datasets and "test_matched" not in raw_datasets: raise ValueError("""--do_predict requires a test dataset""" ) UpperCAmelCase_ : Optional[Any] = raw_datasets["""test"""] if data_args.max_predict_samples is not None: UpperCAmelCase_ : Optional[int] = predict_dataset.select(range(data_args.max_predict_samples ) ) # Log a few random samples from the training set: if training_args.do_train: for index in random.sample(range(len(_a ) ) , 3 ): logger.info(F'''Sample {index} of the training set: {train_dataset[index]}.''' ) # You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a # predictions and label_ids field) and has to return a dictionary string to float. def compute_metrics(_a : EvalPrediction ): UpperCAmelCase_ : Any = p.predictions[0] if isinstance(p.predictions , _a ) else p.predictions UpperCAmelCase_ : List[str] = np.argmax(_a , axis=1 ) return {"accuracy": (preds == p.label_ids).astype(np.floataa ).mean().item()} # Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding. if data_args.pad_to_max_length: UpperCAmelCase_ : Union[str, Any] = default_data_collator elif training_args.fpaa: UpperCAmelCase_ : int = DataCollatorWithPadding(_a , pad_to_multiple_of=8 ) else: UpperCAmelCase_ : str = None # Initialize our Trainer UpperCAmelCase_ : List[Any] = Trainer( model=_a , args=_a , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=_a , tokenizer=_a , data_collator=_a , ) # Training if training_args.do_train: UpperCAmelCase_ : List[str] = None if training_args.resume_from_checkpoint is not None: UpperCAmelCase_ : Optional[Any] = training_args.resume_from_checkpoint elif last_checkpoint is not None: UpperCAmelCase_ : Optional[Any] = last_checkpoint UpperCAmelCase_ : Any = trainer.train(resume_from_checkpoint=_a ) UpperCAmelCase_ : str = train_result.metrics UpperCAmelCase_ : List[str] = ( data_args.max_train_samples if data_args.max_train_samples is not None else len(_a ) ) UpperCAmelCase_ : str = min(_a , len(_a ) ) trainer.save_model() # Saves the tokenizer too for easy upload trainer.log_metrics("""train""" , _a ) trainer.save_metrics("""train""" , _a ) trainer.save_state() # Evaluation if training_args.do_eval: logger.info("""*** Evaluate ***""" ) UpperCAmelCase_ : Dict = trainer.evaluate(eval_dataset=_a ) UpperCAmelCase_ : Union[str, Any] = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(_a ) UpperCAmelCase_ : List[Any] = min(_a , len(_a ) ) trainer.log_metrics("""eval""" , _a ) trainer.save_metrics("""eval""" , _a ) if training_args.do_predict: logger.info("""*** Predict ***""" ) # Removing the `label` columns because it contains -1 and Trainer won't like that. UpperCAmelCase_ : Dict = predict_dataset.remove_columns("""label""" ) UpperCAmelCase_ : Any = trainer.predict(_a , metric_key_prefix="""predict""" ).predictions UpperCAmelCase_ : Optional[int] = np.argmax(_a , axis=1 ) UpperCAmelCase_ : Union[str, Any] = os.path.join(training_args.output_dir , """predict_results_tabfact.txt""" ) if trainer.is_world_process_zero(): with open(_a , """w""" ) as writer: logger.info("""***** Predict Results *****""" ) writer.write("""index\tprediction\n""" ) for index, item in enumerate(_a ): UpperCAmelCase_ : List[Any] = label_list[item] writer.write(F'''{index}\t{item}\n''' ) UpperCAmelCase_ : Tuple = {"""finetuned_from""": model_args.model_name_or_path, """tasks""": """text-classification"""} if training_args.push_to_hub: trainer.push_to_hub(**_a ) else: trainer.create_model_card(**_a ) def lowerCamelCase_ ( _a : Dict ): '''simple docstring''' main() if __name__ == "__main__": main()
345
import gc import random import tempfile import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline from diffusers.utils import floats_tensor, nightly, torch_device from diffusers.utils.testing_utils import require_torch_gpu class _snake_case ( unittest.TestCase ): '''simple docstring''' def A__ ( self: Union[str, Any] ) -> Union[str, Any]: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() @property def A__ ( self: List[str] ) -> Dict: UpperCAmelCase_ : Union[str, Any] = 1 UpperCAmelCase_ : Tuple = 3 UpperCAmelCase_ : Optional[Any] = (32, 32) UpperCAmelCase_ : Optional[int] = floats_tensor((batch_size, num_channels) + sizes ,rng=random.Random(0 ) ).to(lowerCamelCase_ ) return image @property def A__ ( self: List[Any] ) -> Optional[Any]: torch.manual_seed(0 ) UpperCAmelCase_ : int = UNetaDConditionModel( block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") ,up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") ,cross_attention_dim=32 ,) return model @property def A__ ( self: str ) -> List[str]: torch.manual_seed(0 ) UpperCAmelCase_ : Optional[int] = AutoencoderKL( block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] ,up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] ,latent_channels=4 ,) return model @property def A__ ( self: Optional[int] ) -> int: torch.manual_seed(0 ) UpperCAmelCase_ : Dict = CLIPTextConfig( bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1e-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1000 ,) return CLIPTextModel(lowerCamelCase_ ) @property def A__ ( self: Tuple ) -> Tuple: def extract(*lowerCamelCase_: Optional[Any] ,**lowerCamelCase_: str ): class _snake_case : '''simple docstring''' def __init__( self: List[Any] ) -> Optional[Any]: UpperCAmelCase_ : List[str] = torch.ones([0] ) def A__ ( self: List[Any] ,lowerCamelCase_: str ) -> int: self.pixel_values.to(lowerCamelCase_ ) return self return Out() return extract def A__ ( self: Union[str, Any] ) -> Tuple: UpperCAmelCase_ : int = """cpu""" # ensure determinism for the device-dependent torch.Generator UpperCAmelCase_ : int = self.dummy_cond_unet UpperCAmelCase_ : Optional[Any] = DDIMScheduler( beta_start=0.0_0_0_8_5 ,beta_end=0.0_1_2 ,beta_schedule="""scaled_linear""" ,clip_sample=lowerCamelCase_ ,set_alpha_to_one=lowerCamelCase_ ,) UpperCAmelCase_ : str = self.dummy_vae UpperCAmelCase_ : List[str] = self.dummy_text_encoder UpperCAmelCase_ : int = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) # make sure here that pndm scheduler skips prk UpperCAmelCase_ : str = StableDiffusionPipeline( unet=lowerCamelCase_ ,scheduler=lowerCamelCase_ ,vae=lowerCamelCase_ ,text_encoder=lowerCamelCase_ ,tokenizer=lowerCamelCase_ ,safety_checker=lowerCamelCase_ ,feature_extractor=self.dummy_extractor ,) UpperCAmelCase_ : List[str] = sd_pipe.to(lowerCamelCase_ ) sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ ) UpperCAmelCase_ : List[str] = """A painting of a squirrel eating a burger""" UpperCAmelCase_ : str = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 ) UpperCAmelCase_ : int = sd_pipe([prompt] ,generator=lowerCamelCase_ ,guidance_scale=6.0 ,num_inference_steps=2 ,output_type="""np""" ) UpperCAmelCase_ : List[Any] = output.images UpperCAmelCase_ : str = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 ) UpperCAmelCase_ : Dict = sd_pipe( [prompt] ,generator=lowerCamelCase_ ,guidance_scale=6.0 ,num_inference_steps=2 ,output_type="""np""" ,return_dict=lowerCamelCase_ ,)[0] UpperCAmelCase_ : int = image[0, -3:, -3:, -1] UpperCAmelCase_ : Union[str, Any] = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) UpperCAmelCase_ : Tuple = np.array([0.5_7_5_6, 0.6_1_1_8, 0.5_0_0_5, 0.5_0_4_1, 0.5_4_7_1, 0.4_7_2_6, 0.4_9_7_6, 0.4_8_6_5, 0.4_8_6_4] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 def A__ ( self: Optional[Any] ) -> Any: UpperCAmelCase_ : Tuple = """cpu""" # ensure determinism for the device-dependent torch.Generator UpperCAmelCase_ : Dict = self.dummy_cond_unet UpperCAmelCase_ : List[Any] = PNDMScheduler(skip_prk_steps=lowerCamelCase_ ) UpperCAmelCase_ : str = self.dummy_vae UpperCAmelCase_ : Union[str, Any] = self.dummy_text_encoder UpperCAmelCase_ : str = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) # make sure here that pndm scheduler skips prk UpperCAmelCase_ : Any = StableDiffusionPipeline( unet=lowerCamelCase_ ,scheduler=lowerCamelCase_ ,vae=lowerCamelCase_ ,text_encoder=lowerCamelCase_ ,tokenizer=lowerCamelCase_ ,safety_checker=lowerCamelCase_ ,feature_extractor=self.dummy_extractor ,) UpperCAmelCase_ : int = sd_pipe.to(lowerCamelCase_ ) sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ ) UpperCAmelCase_ : Optional[Any] = """A painting of a squirrel eating a burger""" UpperCAmelCase_ : Optional[Any] = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 ) UpperCAmelCase_ : Optional[Any] = sd_pipe([prompt] ,generator=lowerCamelCase_ ,guidance_scale=6.0 ,num_inference_steps=2 ,output_type="""np""" ) UpperCAmelCase_ : str = output.images UpperCAmelCase_ : Union[str, Any] = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 ) UpperCAmelCase_ : int = sd_pipe( [prompt] ,generator=lowerCamelCase_ ,guidance_scale=6.0 ,num_inference_steps=2 ,output_type="""np""" ,return_dict=lowerCamelCase_ ,)[0] UpperCAmelCase_ : Dict = image[0, -3:, -3:, -1] UpperCAmelCase_ : List[Any] = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) UpperCAmelCase_ : Tuple = np.array([0.5_1_2_5, 0.5_7_1_6, 0.4_8_2_8, 0.5_0_6_0, 0.5_6_5_0, 0.4_7_6_8, 0.5_1_8_5, 0.4_8_9_5, 0.4_9_9_3] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 def A__ ( self: str ) -> Dict: UpperCAmelCase_ : Any = StableDiffusionPipeline.from_pretrained( """hf-internal-testing/tiny-stable-diffusion-lms-pipe""" ,safety_checker=lowerCamelCase_ ) assert isinstance(lowerCamelCase_ ,lowerCamelCase_ ) assert isinstance(pipe.scheduler ,lowerCamelCase_ ) assert pipe.safety_checker is None UpperCAmelCase_ : List[Any] = pipe("""example prompt""" ,num_inference_steps=2 ).images[0] assert image is not None # check that there's no error when saving a pipeline with one of the models being None with tempfile.TemporaryDirectory() as tmpdirname: pipe.save_pretrained(lowerCamelCase_ ) UpperCAmelCase_ : Any = StableDiffusionPipeline.from_pretrained(lowerCamelCase_ ) # sanity check that the pipeline still works assert pipe.safety_checker is None UpperCAmelCase_ : Optional[int] = pipe("""example prompt""" ,num_inference_steps=2 ).images[0] assert image is not None @unittest.skipIf(torch_device != """cuda""" ,"""This test requires a GPU""" ) def A__ ( self: List[str] ) -> Any: UpperCAmelCase_ : Tuple = self.dummy_cond_unet UpperCAmelCase_ : Dict = PNDMScheduler(skip_prk_steps=lowerCamelCase_ ) UpperCAmelCase_ : List[Any] = self.dummy_vae UpperCAmelCase_ : List[str] = self.dummy_text_encoder UpperCAmelCase_ : Union[str, Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) # put models in fp16 UpperCAmelCase_ : Optional[Any] = unet.half() UpperCAmelCase_ : Optional[int] = vae.half() UpperCAmelCase_ : int = bert.half() # make sure here that pndm scheduler skips prk UpperCAmelCase_ : Any = StableDiffusionPipeline( unet=lowerCamelCase_ ,scheduler=lowerCamelCase_ ,vae=lowerCamelCase_ ,text_encoder=lowerCamelCase_ ,tokenizer=lowerCamelCase_ ,safety_checker=lowerCamelCase_ ,feature_extractor=self.dummy_extractor ,) UpperCAmelCase_ : List[Any] = sd_pipe.to(lowerCamelCase_ ) sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ ) UpperCAmelCase_ : Tuple = """A painting of a squirrel eating a burger""" UpperCAmelCase_ : Optional[int] = sd_pipe([prompt] ,num_inference_steps=2 ,output_type="""np""" ).images assert image.shape == (1, 64, 64, 3) @nightly @require_torch_gpu class _snake_case ( unittest.TestCase ): '''simple docstring''' def A__ ( self: Optional[int] ) -> Optional[Any]: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def A__ ( self: List[str] ) -> List[Any]: UpperCAmelCase_ : Tuple = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" ,safety_checker=lowerCamelCase_ ) UpperCAmelCase_ : Optional[int] = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config ) UpperCAmelCase_ : str = sd_pipe.to(lowerCamelCase_ ) sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ ) UpperCAmelCase_ : str = ( """portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle""" """ coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with""" """ anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and""" """ children from bahnhof zoo, detailed """ ) UpperCAmelCase_ : Optional[int] = 4003660346 UpperCAmelCase_ : int = 7 # without safety guidance (sld_guidance_scale = 0) UpperCAmelCase_ : Dict = torch.manual_seed(lowerCamelCase_ ) UpperCAmelCase_ : List[Any] = sd_pipe( [prompt] ,generator=lowerCamelCase_ ,guidance_scale=lowerCamelCase_ ,num_inference_steps=50 ,output_type="""np""" ,width=512 ,height=512 ,sld_guidance_scale=0 ,) UpperCAmelCase_ : Optional[int] = output.images UpperCAmelCase_ : Union[str, Any] = image[0, -3:, -3:, -1] UpperCAmelCase_ : Dict = [0.2_2_7_8, 0.2_2_3_1, 0.2_2_4_9, 0.2_3_3_3, 0.2_3_0_3, 0.1_8_8_5, 0.2_2_7_3, 0.2_1_4_4, 0.2_1_7_6] assert image.shape == (1, 512, 512, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 # without safety guidance (strong configuration) UpperCAmelCase_ : Union[str, Any] = torch.manual_seed(lowerCamelCase_ ) UpperCAmelCase_ : Any = sd_pipe( [prompt] ,generator=lowerCamelCase_ ,guidance_scale=lowerCamelCase_ ,num_inference_steps=50 ,output_type="""np""" ,width=512 ,height=512 ,sld_guidance_scale=2000 ,sld_warmup_steps=7 ,sld_threshold=0.0_2_5 ,sld_momentum_scale=0.5 ,sld_mom_beta=0.7 ,) UpperCAmelCase_ : Tuple = output.images UpperCAmelCase_ : Union[str, Any] = image[0, -3:, -3:, -1] UpperCAmelCase_ : str = [0.2_3_8_3, 0.2_2_7_6, 0.2_3_6, 0.2_1_9_2, 0.2_1_8_6, 0.2_0_5_3, 0.1_9_7_1, 0.1_9_0_1, 0.1_7_1_9] assert image.shape == (1, 512, 512, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def A__ ( self: Optional[int] ) -> Any: UpperCAmelCase_ : Any = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" ,safety_checker=lowerCamelCase_ ) UpperCAmelCase_ : Any = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config ) UpperCAmelCase_ : str = sd_pipe.to(lowerCamelCase_ ) sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ ) UpperCAmelCase_ : Any = """padme amidala taking a bath artwork, safe for work, no nudity""" UpperCAmelCase_ : List[Any] = 2734971755 UpperCAmelCase_ : Optional[Any] = 7 UpperCAmelCase_ : int = torch.manual_seed(lowerCamelCase_ ) UpperCAmelCase_ : Optional[int] = sd_pipe( [prompt] ,generator=lowerCamelCase_ ,guidance_scale=lowerCamelCase_ ,num_inference_steps=50 ,output_type="""np""" ,width=512 ,height=512 ,sld_guidance_scale=0 ,) UpperCAmelCase_ : Dict = output.images UpperCAmelCase_ : Tuple = image[0, -3:, -3:, -1] UpperCAmelCase_ : Optional[Any] = [0.3_5_0_2, 0.3_6_2_2, 0.3_3_9_6, 0.3_6_4_2, 0.3_4_7_8, 0.3_3_1_8, 0.3_5, 0.3_3_4_8, 0.3_2_9_7] assert image.shape == (1, 512, 512, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 UpperCAmelCase_ : Any = torch.manual_seed(lowerCamelCase_ ) UpperCAmelCase_ : Tuple = sd_pipe( [prompt] ,generator=lowerCamelCase_ ,guidance_scale=lowerCamelCase_ ,num_inference_steps=50 ,output_type="""np""" ,width=512 ,height=512 ,sld_guidance_scale=2000 ,sld_warmup_steps=7 ,sld_threshold=0.0_2_5 ,sld_momentum_scale=0.5 ,sld_mom_beta=0.7 ,) UpperCAmelCase_ : Dict = output.images UpperCAmelCase_ : List[Any] = image[0, -3:, -3:, -1] UpperCAmelCase_ : Tuple = [0.5_5_3_1, 0.5_2_0_6, 0.4_8_9_5, 0.5_1_5_6, 0.5_1_8_2, 0.4_7_5_1, 0.4_8_0_2, 0.4_8_0_3, 0.4_4_4_3] assert image.shape == (1, 512, 512, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def A__ ( self: Union[str, Any] ) -> int: UpperCAmelCase_ : List[Any] = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" ) UpperCAmelCase_ : List[str] = sd_pipe.to(lowerCamelCase_ ) sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ ) UpperCAmelCase_ : Any = ( """the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c.""" """ leyendecker""" ) UpperCAmelCase_ : Optional[Any] = 1044355234 UpperCAmelCase_ : List[str] = 12 UpperCAmelCase_ : List[Any] = torch.manual_seed(lowerCamelCase_ ) UpperCAmelCase_ : List[Any] = sd_pipe( [prompt] ,generator=lowerCamelCase_ ,guidance_scale=lowerCamelCase_ ,num_inference_steps=50 ,output_type="""np""" ,width=512 ,height=512 ,sld_guidance_scale=0 ,) UpperCAmelCase_ : Any = output.images UpperCAmelCase_ : Dict = image[0, -3:, -3:, -1] UpperCAmelCase_ : Optional[Any] = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] ) assert image.shape == (1, 512, 512, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-7 UpperCAmelCase_ : Optional[int] = torch.manual_seed(lowerCamelCase_ ) UpperCAmelCase_ : Optional[Any] = sd_pipe( [prompt] ,generator=lowerCamelCase_ ,guidance_scale=lowerCamelCase_ ,num_inference_steps=50 ,output_type="""np""" ,width=512 ,height=512 ,sld_guidance_scale=2000 ,sld_warmup_steps=7 ,sld_threshold=0.0_2_5 ,sld_momentum_scale=0.5 ,sld_mom_beta=0.7 ,) UpperCAmelCase_ : List[str] = output.images UpperCAmelCase_ : Any = image[0, -3:, -3:, -1] UpperCAmelCase_ : Any = np.array([0.5_8_1_8, 0.6_2_8_5, 0.6_8_3_5, 0.6_0_1_9, 0.6_2_5, 0.6_7_5_4, 0.6_0_9_6, 0.6_3_3_4, 0.6_5_6_1] ) assert image.shape == (1, 512, 512, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
345
1
from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCamelCase_ = logging.get_logger(__name__) UpperCamelCase_ = { '''facebook/dpr-ctx_encoder-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json''' ), '''facebook/dpr-question_encoder-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json''' ), '''facebook/dpr-reader-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json''' ), '''facebook/dpr-ctx_encoder-multiset-base''': ( '''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json''' ), '''facebook/dpr-question_encoder-multiset-base''': ( '''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json''' ), '''facebook/dpr-reader-multiset-base''': ( '''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json''' ), } class _snake_case ( __snake_case ): '''simple docstring''' A__ : Tuple = "dpr" def __init__( self: List[str] ,lowerCamelCase_: Tuple=30522 ,lowerCamelCase_: Union[str, Any]=768 ,lowerCamelCase_: Any=12 ,lowerCamelCase_: Optional[Any]=12 ,lowerCamelCase_: Optional[Any]=3072 ,lowerCamelCase_: Optional[Any]="gelu" ,lowerCamelCase_: Optional[int]=0.1 ,lowerCamelCase_: Union[str, Any]=0.1 ,lowerCamelCase_: str=512 ,lowerCamelCase_: Optional[int]=2 ,lowerCamelCase_: int=0.0_2 ,lowerCamelCase_: List[Any]=1e-12 ,lowerCamelCase_: Tuple=0 ,lowerCamelCase_: Dict="absolute" ,lowerCamelCase_: int = 0 ,**lowerCamelCase_: List[str] ,) -> Optional[int]: super().__init__(pad_token_id=lowerCamelCase_ ,**lowerCamelCase_ ) UpperCAmelCase_ : Dict = vocab_size UpperCAmelCase_ : List[str] = hidden_size UpperCAmelCase_ : List[Any] = num_hidden_layers UpperCAmelCase_ : Dict = num_attention_heads UpperCAmelCase_ : Tuple = hidden_act UpperCAmelCase_ : Optional[Any] = intermediate_size UpperCAmelCase_ : Any = hidden_dropout_prob UpperCAmelCase_ : List[Any] = attention_probs_dropout_prob UpperCAmelCase_ : Any = max_position_embeddings UpperCAmelCase_ : str = type_vocab_size UpperCAmelCase_ : Any = initializer_range UpperCAmelCase_ : Optional[Any] = layer_norm_eps UpperCAmelCase_ : Tuple = projection_dim UpperCAmelCase_ : str = position_embedding_type
345
import unittest from transformers import MobileBertConfig, is_torch_available from transformers.models.auto import get_values from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_PRETRAINING_MAPPING, MobileBertForMaskedLM, MobileBertForMultipleChoice, MobileBertForNextSentencePrediction, MobileBertForPreTraining, MobileBertForQuestionAnswering, MobileBertForSequenceClassification, MobileBertForTokenClassification, MobileBertModel, ) class _snake_case : '''simple docstring''' def __init__( self: Optional[int] ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: Tuple=13 ,lowerCamelCase_: int=7 ,lowerCamelCase_: Union[str, Any]=True ,lowerCamelCase_: Dict=True ,lowerCamelCase_: str=True ,lowerCamelCase_: Tuple=True ,lowerCamelCase_: int=99 ,lowerCamelCase_: List[str]=64 ,lowerCamelCase_: Tuple=32 ,lowerCamelCase_: List[str]=5 ,lowerCamelCase_: str=4 ,lowerCamelCase_: str=37 ,lowerCamelCase_: Union[str, Any]="gelu" ,lowerCamelCase_: Union[str, Any]=0.1 ,lowerCamelCase_: str=0.1 ,lowerCamelCase_: List[str]=512 ,lowerCamelCase_: Dict=16 ,lowerCamelCase_: List[str]=2 ,lowerCamelCase_: List[str]=0.0_2 ,lowerCamelCase_: Optional[Any]=3 ,lowerCamelCase_: Union[str, Any]=4 ,lowerCamelCase_: str=None ,) -> List[str]: UpperCAmelCase_ : Any = parent UpperCAmelCase_ : List[Any] = batch_size UpperCAmelCase_ : Union[str, Any] = seq_length UpperCAmelCase_ : Optional[int] = is_training UpperCAmelCase_ : Dict = use_input_mask UpperCAmelCase_ : Any = use_token_type_ids UpperCAmelCase_ : Tuple = use_labels UpperCAmelCase_ : List[Any] = vocab_size UpperCAmelCase_ : str = hidden_size UpperCAmelCase_ : List[str] = embedding_size UpperCAmelCase_ : List[Any] = num_hidden_layers UpperCAmelCase_ : List[Any] = num_attention_heads UpperCAmelCase_ : List[Any] = intermediate_size UpperCAmelCase_ : Tuple = hidden_act UpperCAmelCase_ : str = hidden_dropout_prob UpperCAmelCase_ : List[str] = attention_probs_dropout_prob UpperCAmelCase_ : Any = max_position_embeddings UpperCAmelCase_ : List[str] = type_vocab_size UpperCAmelCase_ : Any = type_sequence_label_size UpperCAmelCase_ : Optional[Any] = initializer_range UpperCAmelCase_ : Optional[int] = num_labels UpperCAmelCase_ : Optional[int] = num_choices UpperCAmelCase_ : List[str] = scope def A__ ( self: Any ) -> Optional[int]: UpperCAmelCase_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size ) UpperCAmelCase_ : List[str] = None if self.use_input_mask: UpperCAmelCase_ : Tuple = random_attention_mask([self.batch_size, self.seq_length] ) UpperCAmelCase_ : Dict = None if self.use_token_type_ids: UpperCAmelCase_ : str = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size ) UpperCAmelCase_ : int = None UpperCAmelCase_ : Union[str, Any] = None UpperCAmelCase_ : Union[str, Any] = None if self.use_labels: UpperCAmelCase_ : List[str] = ids_tensor([self.batch_size] ,self.type_sequence_label_size ) UpperCAmelCase_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels ) UpperCAmelCase_ : int = ids_tensor([self.batch_size] ,self.num_choices ) UpperCAmelCase_ : Tuple = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def A__ ( self: Any ) -> Dict: return MobileBertConfig( vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,embedding_size=self.embedding_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=lowerCamelCase_ ,initializer_range=self.initializer_range ,) def A__ ( self: List[Any] ,lowerCamelCase_: str ,lowerCamelCase_: Optional[int] ,lowerCamelCase_: Any ,lowerCamelCase_: List[Any] ,lowerCamelCase_: List[str] ,lowerCamelCase_: str ,lowerCamelCase_: str ) -> int: UpperCAmelCase_ : Any = MobileBertModel(config=lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() UpperCAmelCase_ : List[Any] = model(lowerCamelCase_ ,attention_mask=lowerCamelCase_ ,token_type_ids=lowerCamelCase_ ) UpperCAmelCase_ : Union[str, Any] = model(lowerCamelCase_ ,token_type_ids=lowerCamelCase_ ) UpperCAmelCase_ : Tuple = model(lowerCamelCase_ ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.hidden_size) ) def A__ ( self: Optional[Any] ,lowerCamelCase_: List[str] ,lowerCamelCase_: List[str] ,lowerCamelCase_: Tuple ,lowerCamelCase_: List[Any] ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: List[Any] ,lowerCamelCase_: Dict ) -> int: UpperCAmelCase_ : Union[str, Any] = MobileBertForMaskedLM(config=lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() UpperCAmelCase_ : Optional[Any] = model(lowerCamelCase_ ,attention_mask=lowerCamelCase_ ,token_type_ids=lowerCamelCase_ ,labels=lowerCamelCase_ ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) ) def A__ ( self: str ,lowerCamelCase_: Any ,lowerCamelCase_: Dict ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: List[str] ,lowerCamelCase_: str ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: int ) -> int: UpperCAmelCase_ : List[Any] = MobileBertForNextSentencePrediction(config=lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() UpperCAmelCase_ : Union[str, Any] = model( lowerCamelCase_ ,attention_mask=lowerCamelCase_ ,token_type_ids=lowerCamelCase_ ,labels=lowerCamelCase_ ,) self.parent.assertEqual(result.logits.shape ,(self.batch_size, 2) ) def A__ ( self: Tuple ,lowerCamelCase_: Tuple ,lowerCamelCase_: Dict ,lowerCamelCase_: List[str] ,lowerCamelCase_: Tuple ,lowerCamelCase_: Tuple ,lowerCamelCase_: Dict ,lowerCamelCase_: Any ) -> Optional[Any]: UpperCAmelCase_ : Tuple = MobileBertForPreTraining(config=lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() UpperCAmelCase_ : Optional[int] = model( lowerCamelCase_ ,attention_mask=lowerCamelCase_ ,token_type_ids=lowerCamelCase_ ,labels=lowerCamelCase_ ,next_sentence_label=lowerCamelCase_ ,) self.parent.assertEqual(result.prediction_logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertEqual(result.seq_relationship_logits.shape ,(self.batch_size, 2) ) def A__ ( self: Any ,lowerCamelCase_: Optional[int] ,lowerCamelCase_: Any ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: List[str] ,lowerCamelCase_: Any ,lowerCamelCase_: int ,lowerCamelCase_: List[Any] ) -> List[str]: UpperCAmelCase_ : Optional[Any] = MobileBertForQuestionAnswering(config=lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() UpperCAmelCase_ : int = model( lowerCamelCase_ ,attention_mask=lowerCamelCase_ ,token_type_ids=lowerCamelCase_ ,start_positions=lowerCamelCase_ ,end_positions=lowerCamelCase_ ,) self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) ) def A__ ( self: List[str] ,lowerCamelCase_: List[Any] ,lowerCamelCase_: Tuple ,lowerCamelCase_: Any ,lowerCamelCase_: Tuple ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: Any ) -> str: UpperCAmelCase_ : Optional[Any] = self.num_labels UpperCAmelCase_ : Union[str, Any] = MobileBertForSequenceClassification(lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() UpperCAmelCase_ : Optional[int] = model(lowerCamelCase_ ,attention_mask=lowerCamelCase_ ,token_type_ids=lowerCamelCase_ ,labels=lowerCamelCase_ ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) ) def A__ ( self: Union[str, Any] ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: str ,lowerCamelCase_: Dict ,lowerCamelCase_: Any ,lowerCamelCase_: List[str] ) -> Any: UpperCAmelCase_ : str = self.num_labels UpperCAmelCase_ : Optional[int] = MobileBertForTokenClassification(config=lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() UpperCAmelCase_ : List[Any] = model(lowerCamelCase_ ,attention_mask=lowerCamelCase_ ,token_type_ids=lowerCamelCase_ ,labels=lowerCamelCase_ ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) ) def A__ ( self: Tuple ,lowerCamelCase_: str ,lowerCamelCase_: int ,lowerCamelCase_: Tuple ,lowerCamelCase_: List[Any] ,lowerCamelCase_: str ,lowerCamelCase_: Optional[int] ,lowerCamelCase_: List[Any] ) -> Union[str, Any]: UpperCAmelCase_ : Union[str, Any] = self.num_choices UpperCAmelCase_ : Tuple = MobileBertForMultipleChoice(config=lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() UpperCAmelCase_ : Dict = input_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous() UpperCAmelCase_ : Union[str, Any] = token_type_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous() UpperCAmelCase_ : str = input_mask.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous() UpperCAmelCase_ : Optional[int] = model( lowerCamelCase_ ,attention_mask=lowerCamelCase_ ,token_type_ids=lowerCamelCase_ ,labels=lowerCamelCase_ ,) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) ) def A__ ( self: List[str] ) -> str: UpperCAmelCase_ : str = self.prepare_config_and_inputs() ( ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ) : Union[str, Any] = config_and_inputs UpperCAmelCase_ : Dict = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class _snake_case ( __snake_case , __snake_case , unittest.TestCase ): '''simple docstring''' A__ : Dict = ( ( MobileBertModel, MobileBertForMaskedLM, MobileBertForMultipleChoice, MobileBertForNextSentencePrediction, MobileBertForPreTraining, MobileBertForQuestionAnswering, MobileBertForSequenceClassification, MobileBertForTokenClassification, ) if is_torch_available() else () ) A__ : List[str] = ( { "feature-extraction": MobileBertModel, "fill-mask": MobileBertForMaskedLM, "question-answering": MobileBertForQuestionAnswering, "text-classification": MobileBertForSequenceClassification, "token-classification": MobileBertForTokenClassification, "zero-shot": MobileBertForSequenceClassification, } if is_torch_available() else {} ) A__ : List[str] = True def A__ ( self: Dict ,lowerCamelCase_: Tuple ,lowerCamelCase_: Tuple ,lowerCamelCase_: int=False ) -> Union[str, Any]: UpperCAmelCase_ : List[Any] = super()._prepare_for_class(lowerCamelCase_ ,lowerCamelCase_ ,return_labels=lowerCamelCase_ ) if return_labels: if model_class in get_values(lowerCamelCase_ ): UpperCAmelCase_ : Any = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length) ,dtype=torch.long ,device=lowerCamelCase_ ) UpperCAmelCase_ : List[str] = torch.zeros( self.model_tester.batch_size ,dtype=torch.long ,device=lowerCamelCase_ ) return inputs_dict def A__ ( self: List[str] ) -> Any: UpperCAmelCase_ : List[str] = MobileBertModelTester(self ) UpperCAmelCase_ : Union[str, Any] = ConfigTester(self ,config_class=lowerCamelCase_ ,hidden_size=37 ) def A__ ( self: Optional[Any] ) -> List[Any]: self.config_tester.run_common_tests() def A__ ( self: List[str] ) -> Optional[Any]: UpperCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_model(*lowerCamelCase_ ) def A__ ( self: Optional[int] ) -> Optional[int]: UpperCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_masked_lm(*lowerCamelCase_ ) def A__ ( self: Optional[Any] ) -> Tuple: UpperCAmelCase_ : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_multiple_choice(*lowerCamelCase_ ) def A__ ( self: List[Any] ) -> List[str]: UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*lowerCamelCase_ ) def A__ ( self: Optional[Any] ) -> Dict: UpperCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_pretraining(*lowerCamelCase_ ) def A__ ( self: Optional[int] ) -> Optional[int]: UpperCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_question_answering(*lowerCamelCase_ ) def A__ ( self: Union[str, Any] ) -> Optional[int]: UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_sequence_classification(*lowerCamelCase_ ) def A__ ( self: Any ) -> Optional[int]: UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_token_classification(*lowerCamelCase_ ) def lowerCamelCase_ ( _a : Union[str, Any] ): '''simple docstring''' return torch.tensor( _a , dtype=torch.long , device=_a , ) UpperCamelCase_ = 1E-3 @require_torch @require_sentencepiece @require_tokenizers class _snake_case ( unittest.TestCase ): '''simple docstring''' @slow def A__ ( self: List[Any] ) -> str: UpperCAmelCase_ : Any = MobileBertModel.from_pretrained("""google/mobilebert-uncased""" ).to(lowerCamelCase_ ) UpperCAmelCase_ : str = _long_tensor([[101, 7110, 1005, 1056, 2023, 11333, 17413, 1029, 102]] ) with torch.no_grad(): UpperCAmelCase_ : Union[str, Any] = model(lowerCamelCase_ )[0] UpperCAmelCase_ : Union[str, Any] = torch.Size((1, 9, 512) ) self.assertEqual(output.shape ,lowerCamelCase_ ) UpperCAmelCase_ : Tuple = torch.tensor( [ [ [-2.473_6526e07, 8.269_1656e04, 1.652_1838e05], [-5.754_1704e-01, 3.905_6022e00, 4.401_1507e00], [2.604_7359e00, 1.567_7652e00, -1.732_4188e-01], ] ] ,device=lowerCamelCase_ ,) # MobileBERT results range from 10e0 to 10e8. Even a 0.0000001% difference with a value of 10e8 results in a # ~1 difference, it's therefore not a good idea to measure using addition. # Here, we instead divide the expected result with the result in order to obtain ~1. We then check that the # result is held between bounds: 1 - TOLERANCE < expected_result / result < 1 + TOLERANCE UpperCAmelCase_ : Dict = torch.all((expected_slice / output[..., :3, :3]) >= 1 - TOLERANCE ) UpperCAmelCase_ : Dict = torch.all((expected_slice / output[..., :3, :3]) <= 1 + TOLERANCE ) self.assertTrue(lower_bound and upper_bound )
345
1
import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableDiffusionUpscalePipeline, UNetaDConditionModel from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu enable_full_determinism() class _snake_case ( unittest.TestCase ): '''simple docstring''' def A__ ( self: Dict ) -> str: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() @property def A__ ( self: Optional[int] ) -> Optional[Any]: UpperCAmelCase_ : Optional[int] = 1 UpperCAmelCase_ : Tuple = 3 UpperCAmelCase_ : List[Any] = (32, 32) UpperCAmelCase_ : str = floats_tensor((batch_size, num_channels) + sizes ,rng=random.Random(0 ) ).to(lowerCamelCase_ ) return image @property def A__ ( self: List[str] ) -> Tuple: torch.manual_seed(0 ) UpperCAmelCase_ : Dict = UNetaDConditionModel( block_out_channels=(32, 32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=7 ,out_channels=4 ,down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""", """CrossAttnDownBlock2D""") ,up_block_types=("""CrossAttnUpBlock2D""", """CrossAttnUpBlock2D""", """UpBlock2D""") ,cross_attention_dim=32 ,attention_head_dim=8 ,use_linear_projection=lowerCamelCase_ ,only_cross_attention=(True, True, False) ,num_class_embeds=100 ,) return model @property def A__ ( self: str ) -> List[Any]: torch.manual_seed(0 ) UpperCAmelCase_ : List[str] = AutoencoderKL( block_out_channels=[32, 32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D""", """DownEncoderBlock2D"""] ,up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D""", """UpDecoderBlock2D"""] ,latent_channels=4 ,) return model @property def A__ ( self: Tuple ) -> List[str]: torch.manual_seed(0 ) UpperCAmelCase_ : Dict = CLIPTextConfig( bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1e-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1000 ,hidden_act="""gelu""" ,projection_dim=512 ,) return CLIPTextModel(lowerCamelCase_ ) def A__ ( self: List[str] ) -> List[Any]: UpperCAmelCase_ : Optional[Any] = """cpu""" # ensure determinism for the device-dependent torch.Generator UpperCAmelCase_ : Any = self.dummy_cond_unet_upscale UpperCAmelCase_ : int = DDPMScheduler() UpperCAmelCase_ : List[Any] = DDIMScheduler(prediction_type="""v_prediction""" ) UpperCAmelCase_ : List[str] = self.dummy_vae UpperCAmelCase_ : str = self.dummy_text_encoder UpperCAmelCase_ : str = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) UpperCAmelCase_ : Any = self.dummy_image.cpu().permute(0 ,2 ,3 ,1 )[0] UpperCAmelCase_ : Optional[int] = Image.fromarray(np.uinta(lowerCamelCase_ ) ).convert("""RGB""" ).resize((64, 64) ) # make sure here that pndm scheduler skips prk UpperCAmelCase_ : str = StableDiffusionUpscalePipeline( unet=lowerCamelCase_ ,low_res_scheduler=lowerCamelCase_ ,scheduler=lowerCamelCase_ ,vae=lowerCamelCase_ ,text_encoder=lowerCamelCase_ ,tokenizer=lowerCamelCase_ ,max_noise_level=350 ,) UpperCAmelCase_ : List[Any] = sd_pipe.to(lowerCamelCase_ ) sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ ) UpperCAmelCase_ : Tuple = """A painting of a squirrel eating a burger""" UpperCAmelCase_ : Union[str, Any] = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 ) UpperCAmelCase_ : Optional[Any] = sd_pipe( [prompt] ,image=lowerCamelCase_ ,generator=lowerCamelCase_ ,guidance_scale=6.0 ,noise_level=20 ,num_inference_steps=2 ,output_type="""np""" ,) UpperCAmelCase_ : List[str] = output.images UpperCAmelCase_ : Optional[int] = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 ) UpperCAmelCase_ : str = sd_pipe( [prompt] ,image=lowerCamelCase_ ,generator=lowerCamelCase_ ,guidance_scale=6.0 ,noise_level=20 ,num_inference_steps=2 ,output_type="""np""" ,return_dict=lowerCamelCase_ ,)[0] UpperCAmelCase_ : Union[str, Any] = image[0, -3:, -3:, -1] UpperCAmelCase_ : Optional[int] = image_from_tuple[0, -3:, -3:, -1] UpperCAmelCase_ : str = low_res_image.size[0] * 4 assert image.shape == (1, expected_height_width, expected_height_width, 3) UpperCAmelCase_ : Union[str, Any] = np.array([0.3_1_1_3, 0.3_9_1_0, 0.4_2_7_2, 0.4_8_5_9, 0.5_0_6_1, 0.4_6_5_2, 0.5_3_6_2, 0.5_7_1_5, 0.5_6_6_1] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 def A__ ( self: Optional[Any] ) -> Optional[Any]: UpperCAmelCase_ : Tuple = """cpu""" # ensure determinism for the device-dependent torch.Generator UpperCAmelCase_ : Tuple = self.dummy_cond_unet_upscale UpperCAmelCase_ : Tuple = DDPMScheduler() UpperCAmelCase_ : Optional[Any] = DDIMScheduler(prediction_type="""v_prediction""" ) UpperCAmelCase_ : Any = self.dummy_vae UpperCAmelCase_ : Optional[int] = self.dummy_text_encoder UpperCAmelCase_ : Optional[Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) UpperCAmelCase_ : int = self.dummy_image.cpu().permute(0 ,2 ,3 ,1 )[0] UpperCAmelCase_ : List[str] = Image.fromarray(np.uinta(lowerCamelCase_ ) ).convert("""RGB""" ).resize((64, 64) ) # make sure here that pndm scheduler skips prk UpperCAmelCase_ : Optional[Any] = StableDiffusionUpscalePipeline( unet=lowerCamelCase_ ,low_res_scheduler=lowerCamelCase_ ,scheduler=lowerCamelCase_ ,vae=lowerCamelCase_ ,text_encoder=lowerCamelCase_ ,tokenizer=lowerCamelCase_ ,max_noise_level=350 ,) UpperCAmelCase_ : List[Any] = sd_pipe.to(lowerCamelCase_ ) sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ ) UpperCAmelCase_ : Optional[Any] = """A painting of a squirrel eating a burger""" UpperCAmelCase_ : int = sd_pipe( 2 * [prompt] ,image=2 * [low_res_image] ,guidance_scale=6.0 ,noise_level=20 ,num_inference_steps=2 ,output_type="""np""" ,) UpperCAmelCase_ : Union[str, Any] = output.images assert image.shape[0] == 2 UpperCAmelCase_ : Any = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 ) UpperCAmelCase_ : str = sd_pipe( [prompt] ,image=lowerCamelCase_ ,generator=lowerCamelCase_ ,num_images_per_prompt=2 ,guidance_scale=6.0 ,noise_level=20 ,num_inference_steps=2 ,output_type="""np""" ,) UpperCAmelCase_ : Union[str, Any] = output.images assert image.shape[0] == 2 @unittest.skipIf(torch_device != """cuda""" ,"""This test requires a GPU""" ) def A__ ( self: int ) -> int: UpperCAmelCase_ : Optional[int] = self.dummy_cond_unet_upscale UpperCAmelCase_ : Optional[Any] = DDPMScheduler() UpperCAmelCase_ : Optional[Any] = DDIMScheduler(prediction_type="""v_prediction""" ) UpperCAmelCase_ : List[str] = self.dummy_vae UpperCAmelCase_ : Any = self.dummy_text_encoder UpperCAmelCase_ : Union[str, Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) UpperCAmelCase_ : List[Any] = self.dummy_image.cpu().permute(0 ,2 ,3 ,1 )[0] UpperCAmelCase_ : Tuple = Image.fromarray(np.uinta(lowerCamelCase_ ) ).convert("""RGB""" ).resize((64, 64) ) # put models in fp16, except vae as it overflows in fp16 UpperCAmelCase_ : int = unet.half() UpperCAmelCase_ : Optional[int] = text_encoder.half() # make sure here that pndm scheduler skips prk UpperCAmelCase_ : List[str] = StableDiffusionUpscalePipeline( unet=lowerCamelCase_ ,low_res_scheduler=lowerCamelCase_ ,scheduler=lowerCamelCase_ ,vae=lowerCamelCase_ ,text_encoder=lowerCamelCase_ ,tokenizer=lowerCamelCase_ ,max_noise_level=350 ,) UpperCAmelCase_ : int = sd_pipe.to(lowerCamelCase_ ) sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ ) UpperCAmelCase_ : Tuple = """A painting of a squirrel eating a burger""" UpperCAmelCase_ : Optional[int] = torch.manual_seed(0 ) UpperCAmelCase_ : int = sd_pipe( [prompt] ,image=lowerCamelCase_ ,generator=lowerCamelCase_ ,num_inference_steps=2 ,output_type="""np""" ,).images UpperCAmelCase_ : List[str] = low_res_image.size[0] * 4 assert image.shape == (1, expected_height_width, expected_height_width, 3) @slow @require_torch_gpu class _snake_case ( unittest.TestCase ): '''simple docstring''' def A__ ( self: int ) -> Optional[Any]: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def A__ ( self: Tuple ) -> int: UpperCAmelCase_ : Tuple = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/sd2-upscale/low_res_cat.png""" ) UpperCAmelCase_ : Optional[Any] = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale""" """/upsampled_cat.npy""" ) UpperCAmelCase_ : int = """stabilityai/stable-diffusion-x4-upscaler""" UpperCAmelCase_ : Any = StableDiffusionUpscalePipeline.from_pretrained(lowerCamelCase_ ) pipe.to(lowerCamelCase_ ) pipe.set_progress_bar_config(disable=lowerCamelCase_ ) pipe.enable_attention_slicing() UpperCAmelCase_ : int = """a cat sitting on a park bench""" UpperCAmelCase_ : Optional[int] = torch.manual_seed(0 ) UpperCAmelCase_ : Dict = pipe( prompt=lowerCamelCase_ ,image=lowerCamelCase_ ,generator=lowerCamelCase_ ,output_type="""np""" ,) UpperCAmelCase_ : Optional[Any] = output.images[0] assert image.shape == (512, 512, 3) assert np.abs(expected_image - image ).max() < 1e-3 def A__ ( self: str ) -> Dict: UpperCAmelCase_ : List[str] = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/sd2-upscale/low_res_cat.png""" ) UpperCAmelCase_ : int = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale""" """/upsampled_cat_fp16.npy""" ) UpperCAmelCase_ : Optional[int] = """stabilityai/stable-diffusion-x4-upscaler""" UpperCAmelCase_ : Tuple = StableDiffusionUpscalePipeline.from_pretrained( lowerCamelCase_ ,torch_dtype=torch.floataa ,) pipe.to(lowerCamelCase_ ) pipe.set_progress_bar_config(disable=lowerCamelCase_ ) pipe.enable_attention_slicing() UpperCAmelCase_ : List[str] = """a cat sitting on a park bench""" UpperCAmelCase_ : List[Any] = torch.manual_seed(0 ) UpperCAmelCase_ : Union[str, Any] = pipe( prompt=lowerCamelCase_ ,image=lowerCamelCase_ ,generator=lowerCamelCase_ ,output_type="""np""" ,) UpperCAmelCase_ : Optional[Any] = output.images[0] assert image.shape == (512, 512, 3) assert np.abs(expected_image - image ).max() < 5e-1 def A__ ( self: int ) -> Tuple: torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() UpperCAmelCase_ : Dict = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/sd2-upscale/low_res_cat.png""" ) UpperCAmelCase_ : Tuple = """stabilityai/stable-diffusion-x4-upscaler""" UpperCAmelCase_ : Optional[int] = StableDiffusionUpscalePipeline.from_pretrained( lowerCamelCase_ ,torch_dtype=torch.floataa ,) pipe.to(lowerCamelCase_ ) pipe.set_progress_bar_config(disable=lowerCamelCase_ ) pipe.enable_attention_slicing(1 ) pipe.enable_sequential_cpu_offload() UpperCAmelCase_ : List[Any] = """a cat sitting on a park bench""" UpperCAmelCase_ : Optional[Any] = torch.manual_seed(0 ) UpperCAmelCase_ : Any = pipe( prompt=lowerCamelCase_ ,image=lowerCamelCase_ ,generator=lowerCamelCase_ ,num_inference_steps=5 ,output_type="""np""" ,) UpperCAmelCase_ : Dict = torch.cuda.max_memory_allocated() # make sure that less than 2.9 GB is allocated assert mem_bytes < 2.9 * 10**9
345
import os import shutil import tempfile import unittest import numpy as np from transformers import AutoTokenizer, BarkProcessor from transformers.testing_utils import require_torch, slow @require_torch class _snake_case ( unittest.TestCase ): '''simple docstring''' def A__ ( self: str ) -> int: UpperCAmelCase_ : List[Any] = """ylacombe/bark-small""" UpperCAmelCase_ : Tuple = tempfile.mkdtemp() UpperCAmelCase_ : Union[str, Any] = """en_speaker_1""" UpperCAmelCase_ : Optional[Any] = """This is a test string""" UpperCAmelCase_ : int = """speaker_embeddings_path.json""" UpperCAmelCase_ : Any = """speaker_embeddings""" def A__ ( self: Tuple ,**lowerCamelCase_: List[str] ) -> List[Any]: return AutoTokenizer.from_pretrained(self.checkpoint ,**lowerCamelCase_ ) def A__ ( self: str ) -> Union[str, Any]: shutil.rmtree(self.tmpdirname ) def A__ ( self: List[Any] ) -> int: UpperCAmelCase_ : int = self.get_tokenizer() UpperCAmelCase_ : Tuple = BarkProcessor(tokenizer=lowerCamelCase_ ) processor.save_pretrained(self.tmpdirname ) UpperCAmelCase_ : Optional[int] = BarkProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer.get_vocab() ) @slow def A__ ( self: List[Any] ) -> Optional[int]: UpperCAmelCase_ : List[Any] = BarkProcessor.from_pretrained( pretrained_processor_name_or_path=self.checkpoint ,speaker_embeddings_dict_path=self.speaker_embeddings_dict_path ,) processor.save_pretrained( self.tmpdirname ,speaker_embeddings_dict_path=self.speaker_embeddings_dict_path ,speaker_embeddings_directory=self.speaker_embeddings_directory ,) UpperCAmelCase_ : Optional[Any] = self.get_tokenizer(bos_token="""(BOS)""" ,eos_token="""(EOS)""" ) UpperCAmelCase_ : List[Any] = BarkProcessor.from_pretrained( self.tmpdirname ,self.speaker_embeddings_dict_path ,bos_token="""(BOS)""" ,eos_token="""(EOS)""" ,) self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer_add_kwargs.get_vocab() ) def A__ ( self: List[str] ) -> Optional[Any]: UpperCAmelCase_ : Any = BarkProcessor.from_pretrained( pretrained_processor_name_or_path=self.checkpoint ,speaker_embeddings_dict_path=self.speaker_embeddings_dict_path ,) UpperCAmelCase_ : Optional[int] = 35 UpperCAmelCase_ : Optional[int] = 2 UpperCAmelCase_ : Dict = 8 UpperCAmelCase_ : Optional[int] = { """semantic_prompt""": np.ones(lowerCamelCase_ ), """coarse_prompt""": np.ones((nb_codebooks_coarse, seq_len) ), """fine_prompt""": np.ones((nb_codebooks_total, seq_len) ), } # test providing already loaded voice_preset UpperCAmelCase_ : str = processor(text=self.input_string ,voice_preset=lowerCamelCase_ ) UpperCAmelCase_ : Optional[int] = inputs["""history_prompt"""] for key in voice_preset: self.assertListEqual(voice_preset[key].tolist() ,processed_voice_preset.get(lowerCamelCase_ ,np.array([] ) ).tolist() ) # test loading voice preset from npz file UpperCAmelCase_ : List[Any] = os.path.join(self.tmpdirname ,"""file.npz""" ) np.savez(lowerCamelCase_ ,**lowerCamelCase_ ) UpperCAmelCase_ : Optional[Any] = processor(text=self.input_string ,voice_preset=lowerCamelCase_ ) UpperCAmelCase_ : int = inputs["""history_prompt"""] for key in voice_preset: self.assertListEqual(voice_preset[key].tolist() ,processed_voice_preset.get(lowerCamelCase_ ,np.array([] ) ).tolist() ) # test loading voice preset from the hub UpperCAmelCase_ : Union[str, Any] = processor(text=self.input_string ,voice_preset=self.voice_preset ) def A__ ( self: Dict ) -> Tuple: UpperCAmelCase_ : Any = self.get_tokenizer() UpperCAmelCase_ : Dict = BarkProcessor(tokenizer=lowerCamelCase_ ) UpperCAmelCase_ : Optional[Any] = processor(text=self.input_string ) UpperCAmelCase_ : str = tokenizer( self.input_string ,padding="""max_length""" ,max_length=256 ,add_special_tokens=lowerCamelCase_ ,return_attention_mask=lowerCamelCase_ ,return_token_type_ids=lowerCamelCase_ ,) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] ,encoded_processor[key].squeeze().tolist() )
345
1
from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import Features, Value from .base import TaskTemplate @dataclass(frozen=__snake_case ) class _snake_case ( __snake_case ): '''simple docstring''' A__ : str = field(default="language-modeling" , metadata={"include_in_asdict_even_if_is_default": True} ) A__ : ClassVar[Features] = Features({"text": Value("string" )} ) A__ : ClassVar[Features] = Features({} ) A__ : str = "text" @property def A__ ( self: Any ) -> Dict[str, str]: return {self.text_column: "text"}
345
import unittest from queue import Empty from threading import Thread from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available from transformers.testing_utils import CaptureStdout, require_torch, torch_device from ..test_modeling_common import ids_tensor if is_torch_available(): import torch from transformers import AutoModelForCausalLM @require_torch class _snake_case ( unittest.TestCase ): '''simple docstring''' def A__ ( self: Optional[int] ) -> Any: UpperCAmelCase_ : List[str] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ) UpperCAmelCase_ : Union[str, Any] = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(lowerCamelCase_ ) UpperCAmelCase_ : str = -1 UpperCAmelCase_ : Dict = ids_tensor((1, 5) ,vocab_size=model.config.vocab_size ).to(lowerCamelCase_ ) UpperCAmelCase_ : Union[str, Any] = model.generate(lowerCamelCase_ ,max_new_tokens=10 ,do_sample=lowerCamelCase_ ) UpperCAmelCase_ : Any = tokenizer.decode(greedy_ids[0] ) with CaptureStdout() as cs: UpperCAmelCase_ : List[Any] = TextStreamer(lowerCamelCase_ ) model.generate(lowerCamelCase_ ,max_new_tokens=10 ,do_sample=lowerCamelCase_ ,streamer=lowerCamelCase_ ) # The greedy text should be printed to stdout, except for the final "\n" in the streamer UpperCAmelCase_ : Optional[int] = cs.out[:-1] self.assertEqual(lowerCamelCase_ ,lowerCamelCase_ ) def A__ ( self: Dict ) -> Optional[Any]: UpperCAmelCase_ : str = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ) UpperCAmelCase_ : Optional[Any] = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(lowerCamelCase_ ) UpperCAmelCase_ : Optional[int] = -1 UpperCAmelCase_ : List[Any] = ids_tensor((1, 5) ,vocab_size=model.config.vocab_size ).to(lowerCamelCase_ ) UpperCAmelCase_ : List[str] = model.generate(lowerCamelCase_ ,max_new_tokens=10 ,do_sample=lowerCamelCase_ ) UpperCAmelCase_ : Dict = tokenizer.decode(greedy_ids[0] ) UpperCAmelCase_ : str = TextIteratorStreamer(lowerCamelCase_ ) UpperCAmelCase_ : Optional[int] = {"""input_ids""": input_ids, """max_new_tokens""": 10, """do_sample""": False, """streamer""": streamer} UpperCAmelCase_ : str = Thread(target=model.generate ,kwargs=lowerCamelCase_ ) thread.start() UpperCAmelCase_ : int = """""" for new_text in streamer: streamer_text += new_text self.assertEqual(lowerCamelCase_ ,lowerCamelCase_ ) def A__ ( self: List[Any] ) -> Dict: UpperCAmelCase_ : List[Any] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ) UpperCAmelCase_ : Optional[Any] = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(lowerCamelCase_ ) UpperCAmelCase_ : Optional[int] = -1 UpperCAmelCase_ : Tuple = ids_tensor((1, 5) ,vocab_size=model.config.vocab_size ).to(lowerCamelCase_ ) UpperCAmelCase_ : Dict = model.generate(lowerCamelCase_ ,max_new_tokens=10 ,do_sample=lowerCamelCase_ ) UpperCAmelCase_ : str = greedy_ids[:, input_ids.shape[1] :] UpperCAmelCase_ : Dict = tokenizer.decode(new_greedy_ids[0] ) with CaptureStdout() as cs: UpperCAmelCase_ : List[Any] = TextStreamer(lowerCamelCase_ ,skip_prompt=lowerCamelCase_ ) model.generate(lowerCamelCase_ ,max_new_tokens=10 ,do_sample=lowerCamelCase_ ,streamer=lowerCamelCase_ ) # The greedy text should be printed to stdout, except for the final "\n" in the streamer UpperCAmelCase_ : List[str] = cs.out[:-1] self.assertEqual(lowerCamelCase_ ,lowerCamelCase_ ) def A__ ( self: str ) -> str: # Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested # with actual models -- the dummy models' tokenizers are not aligned with their models, and # `skip_special_tokens=True` has no effect on them UpperCAmelCase_ : Union[str, Any] = AutoTokenizer.from_pretrained("""distilgpt2""" ) UpperCAmelCase_ : Optional[Any] = AutoModelForCausalLM.from_pretrained("""distilgpt2""" ).to(lowerCamelCase_ ) UpperCAmelCase_ : Any = -1 UpperCAmelCase_ : Union[str, Any] = torch.ones((1, 5) ,device=lowerCamelCase_ ).long() * model.config.bos_token_id with CaptureStdout() as cs: UpperCAmelCase_ : Union[str, Any] = TextStreamer(lowerCamelCase_ ,skip_special_tokens=lowerCamelCase_ ) model.generate(lowerCamelCase_ ,max_new_tokens=1 ,do_sample=lowerCamelCase_ ,streamer=lowerCamelCase_ ) # The prompt contains a special token, so the streamer should not print it. As such, the output text, when # re-tokenized, must only contain one token UpperCAmelCase_ : List[str] = cs.out[:-1] # Remove the final "\n" UpperCAmelCase_ : Dict = tokenizer(lowerCamelCase_ ,return_tensors="""pt""" ) self.assertEqual(streamer_text_tokenized.input_ids.shape ,(1, 1) ) def A__ ( self: List[str] ) -> Any: UpperCAmelCase_ : List[Any] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ) UpperCAmelCase_ : Any = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(lowerCamelCase_ ) UpperCAmelCase_ : List[str] = -1 UpperCAmelCase_ : Optional[Any] = ids_tensor((1, 5) ,vocab_size=model.config.vocab_size ).to(lowerCamelCase_ ) UpperCAmelCase_ : Optional[int] = TextIteratorStreamer(lowerCamelCase_ ,timeout=0.0_0_1 ) UpperCAmelCase_ : Any = {"""input_ids""": input_ids, """max_new_tokens""": 10, """do_sample""": False, """streamer""": streamer} UpperCAmelCase_ : Dict = Thread(target=model.generate ,kwargs=lowerCamelCase_ ) thread.start() # The streamer will timeout after 0.001 seconds, so an exception will be raised with self.assertRaises(lowerCamelCase_ ): UpperCAmelCase_ : Union[str, Any] = """""" for new_text in streamer: streamer_text += new_text
345
1
import pandas as pd from matplotlib import pyplot as plt from sklearn.linear_model import LinearRegression # Splitting the dataset into the Training set and Test set from sklearn.model_selection import train_test_split # Fitting Polynomial Regression to the dataset from sklearn.preprocessing import PolynomialFeatures # Importing the dataset UpperCamelCase_ = pd.read_csv( '''https://s3.us-west-2.amazonaws.com/public.gamelab.fun/dataset/''' '''position_salaries.csv''' ) UpperCamelCase_ = dataset.iloc[:, 1:2].values UpperCamelCase_ = dataset.iloc[:, 2].values UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ = train_test_split(X, y, test_size=0.2, random_state=0) UpperCamelCase_ = PolynomialFeatures(degree=4) UpperCamelCase_ = poly_reg.fit_transform(X) UpperCamelCase_ = LinearRegression() pol_reg.fit(X_poly, y) def lowerCamelCase_ ( ): '''simple docstring''' plt.scatter(_a , _a , color="""red""" ) plt.plot(_a , pol_reg.predict(poly_reg.fit_transform(_a ) ) , color="""blue""" ) plt.title("""Truth or Bluff (Linear Regression)""" ) plt.xlabel("""Position level""" ) plt.ylabel("""Salary""" ) plt.show() if __name__ == "__main__": viz_polymonial() # Predicting a new result with Polymonial Regression pol_reg.predict(poly_reg.fit_transform([[5.5]])) # output should be 132148.43750003
345
import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel from diffusers import DDIMScheduler, LDMPipeline, UNetaDModel, VQModel from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device enable_full_determinism() class _snake_case ( unittest.TestCase ): '''simple docstring''' @property def A__ ( self: Optional[int] ) -> int: torch.manual_seed(0 ) UpperCAmelCase_ : Union[str, Any] = UNetaDModel( block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=3 ,out_channels=3 ,down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") ,up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") ,) return model @property def A__ ( self: Tuple ) -> Optional[Any]: torch.manual_seed(0 ) UpperCAmelCase_ : List[str] = VQModel( block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] ,up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] ,latent_channels=3 ,) return model @property def A__ ( self: Tuple ) -> Any: torch.manual_seed(0 ) UpperCAmelCase_ : int = CLIPTextConfig( bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1e-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1000 ,) return CLIPTextModel(lowerCamelCase_ ) def A__ ( self: str ) -> Optional[Any]: UpperCAmelCase_ : str = self.dummy_uncond_unet UpperCAmelCase_ : List[Any] = DDIMScheduler() UpperCAmelCase_ : List[Any] = self.dummy_vq_model UpperCAmelCase_ : Optional[int] = LDMPipeline(unet=lowerCamelCase_ ,vqvae=lowerCamelCase_ ,scheduler=lowerCamelCase_ ) ldm.to(lowerCamelCase_ ) ldm.set_progress_bar_config(disable=lowerCamelCase_ ) UpperCAmelCase_ : Any = torch.manual_seed(0 ) UpperCAmelCase_ : int = ldm(generator=lowerCamelCase_ ,num_inference_steps=2 ,output_type="""numpy""" ).images UpperCAmelCase_ : List[str] = torch.manual_seed(0 ) UpperCAmelCase_ : Union[str, Any] = ldm(generator=lowerCamelCase_ ,num_inference_steps=2 ,output_type="""numpy""" ,return_dict=lowerCamelCase_ )[0] UpperCAmelCase_ : Optional[Any] = image[0, -3:, -3:, -1] UpperCAmelCase_ : Tuple = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) UpperCAmelCase_ : str = np.array([0.8_5_1_2, 0.8_1_8, 0.6_4_1_1, 0.6_8_0_8, 0.4_4_6_5, 0.5_6_1_8, 0.4_6, 0.6_2_3_1, 0.5_1_7_2] ) UpperCAmelCase_ : Tuple = 1e-2 if torch_device != """mps""" else 3e-2 assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < tolerance @slow @require_torch class _snake_case ( unittest.TestCase ): '''simple docstring''' def A__ ( self: Optional[int] ) -> Optional[Any]: UpperCAmelCase_ : List[str] = LDMPipeline.from_pretrained("""CompVis/ldm-celebahq-256""" ) ldm.to(lowerCamelCase_ ) ldm.set_progress_bar_config(disable=lowerCamelCase_ ) UpperCAmelCase_ : Optional[Any] = torch.manual_seed(0 ) UpperCAmelCase_ : Optional[int] = ldm(generator=lowerCamelCase_ ,num_inference_steps=5 ,output_type="""numpy""" ).images UpperCAmelCase_ : Optional[Any] = image[0, -3:, -3:, -1] assert image.shape == (1, 256, 256, 3) UpperCAmelCase_ : int = np.array([0.4_3_9_9, 0.4_4_9_7_5, 0.4_6_8_2_5, 0.4_7_4, 0.4_3_5_9, 0.4_5_8_1, 0.4_5_0_9_5, 0.4_3_4_1, 0.4_4_4_7] ) UpperCAmelCase_ : Union[str, Any] = 1e-2 if torch_device != """mps""" else 3e-2 assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
345
1
import json import os import unittest from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer from ...test_tokenization_common import TokenizerTesterMixin class _snake_case ( __snake_case , unittest.TestCase ): '''simple docstring''' A__ : Optional[Any] = CTRLTokenizer A__ : Optional[Any] = False A__ : str = False def A__ ( self: Optional[int] ) -> List[Any]: super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt UpperCAmelCase_ : Dict = ["""adapt""", """re@@""", """a@@""", """apt""", """c@@""", """t""", """<unk>"""] UpperCAmelCase_ : Union[str, Any] = dict(zip(lowerCamelCase_ ,range(len(lowerCamelCase_ ) ) ) ) UpperCAmelCase_ : List[Any] = ["""#version: 0.2""", """a p""", """ap t</w>""", """r e""", """a d""", """ad apt</w>""", """"""] UpperCAmelCase_ : Optional[Any] = {"""unk_token""": """<unk>"""} UpperCAmelCase_ : Union[str, Any] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""vocab_file"""] ) UpperCAmelCase_ : Optional[Any] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""merges_file"""] ) with open(self.vocab_file ,"""w""" ,encoding="""utf-8""" ) as fp: fp.write(json.dumps(lowerCamelCase_ ) + """\n""" ) with open(self.merges_file ,"""w""" ,encoding="""utf-8""" ) as fp: fp.write("""\n""".join(lowerCamelCase_ ) ) def A__ ( self: Optional[int] ,**lowerCamelCase_: Any ) -> str: kwargs.update(self.special_tokens_map ) return CTRLTokenizer.from_pretrained(self.tmpdirname ,**lowerCamelCase_ ) def A__ ( self: int ,lowerCamelCase_: int ) -> str: UpperCAmelCase_ : List[str] = """adapt react readapt apt""" UpperCAmelCase_ : List[Any] = """adapt react readapt apt""" return input_text, output_text def A__ ( self: Union[str, Any] ) -> Optional[int]: UpperCAmelCase_ : Union[str, Any] = CTRLTokenizer(self.vocab_file ,self.merges_file ,**self.special_tokens_map ) UpperCAmelCase_ : List[Any] = """adapt react readapt apt""" UpperCAmelCase_ : Optional[int] = """adapt re@@ a@@ c@@ t re@@ adapt apt""".split() UpperCAmelCase_ : Tuple = tokenizer.tokenize(lowerCamelCase_ ) self.assertListEqual(lowerCamelCase_ ,lowerCamelCase_ ) UpperCAmelCase_ : Union[str, Any] = tokens + [tokenizer.unk_token] UpperCAmelCase_ : List[str] = [0, 1, 2, 4, 5, 1, 0, 3, 6] self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase_ ) ,lowerCamelCase_ )
345
def lowerCamelCase_ ( _a : List[str] ): '''simple docstring''' UpperCAmelCase_ : Tuple = [0] * len(_a ) UpperCAmelCase_ : Dict = [] UpperCAmelCase_ : Optional[int] = [] UpperCAmelCase_ : Dict = 0 for values in graph.values(): for i in values: indegree[i] += 1 for i in range(len(_a ) ): if indegree[i] == 0: queue.append(_a ) while queue: UpperCAmelCase_ : List[str] = queue.pop(0 ) cnt += 1 topo.append(_a ) for x in graph[vertex]: indegree[x] -= 1 if indegree[x] == 0: queue.append(_a ) if cnt != len(_a ): print("""Cycle exists""" ) else: print(_a ) # Adjacency List of Graph UpperCamelCase_ = {0: [1, 2], 1: [3], 2: [3], 3: [4, 5], 4: [], 5: []} topological_sort(graph)
345
1
from collections.abc import Generator from math import sin def lowerCamelCase_ ( _a : bytes ): '''simple docstring''' if len(_a ) != 32: raise ValueError("""Input must be of length 32""" ) UpperCAmelCase_ : Dict = B"""""" for i in [3, 2, 1, 0]: little_endian += string_aa[8 * i : 8 * i + 8] return little_endian def lowerCamelCase_ ( _a : int ): '''simple docstring''' if i < 0: raise ValueError("""Input must be non-negative""" ) UpperCAmelCase_ : Optional[int] = format(_a , """08x""" )[-8:] UpperCAmelCase_ : int = B"""""" for i in [3, 2, 1, 0]: little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode("""utf-8""" ) return little_endian_hex def lowerCamelCase_ ( _a : bytes ): '''simple docstring''' UpperCAmelCase_ : str = B"""""" for char in message: bit_string += format(_a , """08b""" ).encode("""utf-8""" ) UpperCAmelCase_ : Tuple = format(len(_a ) , """064b""" ).encode("""utf-8""" ) # Pad bit_string to a multiple of 512 chars bit_string += b"1" while len(_a ) % 512 != 448: bit_string += b"0" bit_string += to_little_endian(start_len[32:] ) + to_little_endian(start_len[:32] ) return bit_string def lowerCamelCase_ ( _a : bytes ): '''simple docstring''' if len(_a ) % 512 != 0: raise ValueError("""Input must have length that's a multiple of 512""" ) for pos in range(0 , len(_a ) , 512 ): UpperCAmelCase_ : str = bit_string[pos : pos + 512] UpperCAmelCase_ : Any = [] for i in range(0 , 512 , 32 ): block_words.append(int(to_little_endian(block[i : i + 32] ) , 2 ) ) yield block_words def lowerCamelCase_ ( _a : int ): '''simple docstring''' if i < 0: raise ValueError("""Input must be non-negative""" ) UpperCAmelCase_ : List[str] = format(_a , """032b""" ) UpperCAmelCase_ : Optional[int] = """""" for c in i_str: new_str += "1" if c == "0" else "0" return int(_a , 2 ) def lowerCamelCase_ ( _a : int , _a : int ): '''simple docstring''' return (a + b) % 2**32 def lowerCamelCase_ ( _a : int , _a : int ): '''simple docstring''' if i < 0: raise ValueError("""Input must be non-negative""" ) if shift < 0: raise ValueError("""Shift must be non-negative""" ) return ((i << shift) ^ (i >> (32 - shift))) % 2**32 def lowerCamelCase_ ( _a : bytes ): '''simple docstring''' UpperCAmelCase_ : Optional[Any] = preprocess(_a ) UpperCAmelCase_ : List[Any] = [int(2**32 * abs(sin(i + 1 ) ) ) for i in range(64 )] # Starting states UpperCAmelCase_ : Any = 0x6745_2301 UpperCAmelCase_ : int = 0xEFCD_AB89 UpperCAmelCase_ : int = 0x98BA_DCFE UpperCAmelCase_ : Tuple = 0x1032_5476 UpperCAmelCase_ : List[Any] = [ 7, 12, 17, 22, 7, 12, 17, 22, 7, 12, 17, 22, 7, 12, 17, 22, 5, 9, 14, 20, 5, 9, 14, 20, 5, 9, 14, 20, 5, 9, 14, 20, 4, 11, 16, 23, 4, 11, 16, 23, 4, 11, 16, 23, 4, 11, 16, 23, 6, 10, 15, 21, 6, 10, 15, 21, 6, 10, 15, 21, 6, 10, 15, 21, ] # Process bit string in chunks, each with 16 32-char words for block_words in get_block_words(_a ): UpperCAmelCase_ : int = aa UpperCAmelCase_ : Union[str, Any] = ba UpperCAmelCase_ : Optional[int] = ca UpperCAmelCase_ : str = da # Hash current chunk for i in range(64 ): if i <= 15: # f = (b & c) | (not_32(b) & d) # Alternate definition for f UpperCAmelCase_ : Tuple = d ^ (b & (c ^ d)) UpperCAmelCase_ : List[Any] = i elif i <= 31: # f = (d & b) | (not_32(d) & c) # Alternate definition for f UpperCAmelCase_ : Any = c ^ (d & (b ^ c)) UpperCAmelCase_ : Any = (5 * i + 1) % 16 elif i <= 47: UpperCAmelCase_ : Any = b ^ c ^ d UpperCAmelCase_ : Union[str, Any] = (3 * i + 5) % 16 else: UpperCAmelCase_ : int = c ^ (b | not_aa(_a )) UpperCAmelCase_ : Optional[int] = (7 * i) % 16 UpperCAmelCase_ : Tuple = (f + a + added_consts[i] + block_words[g]) % 2**32 UpperCAmelCase_ : Optional[int] = d UpperCAmelCase_ : Any = c UpperCAmelCase_ : List[Any] = b UpperCAmelCase_ : Union[str, Any] = sum_aa(_a , left_rotate_aa(_a , shift_amounts[i] ) ) # Add hashed chunk to running total UpperCAmelCase_ : Optional[int] = sum_aa(_a , _a ) UpperCAmelCase_ : Optional[int] = sum_aa(_a , _a ) UpperCAmelCase_ : Any = sum_aa(_a , _a ) UpperCAmelCase_ : Optional[int] = sum_aa(_a , _a ) UpperCAmelCase_ : Any = reformat_hex(_a ) + reformat_hex(_a ) + reformat_hex(_a ) + reformat_hex(_a ) return digest if __name__ == "__main__": import doctest doctest.testmod()
345
from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCamelCase_ = logging.get_logger(__name__) UpperCamelCase_ = { '''microsoft/swinv2-tiny-patch4-window8-256''': ( '''https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json''' ), } class _snake_case ( __snake_case ): '''simple docstring''' A__ : Optional[Any] = "swinv2" A__ : int = { "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers", } def __init__( self: List[str] ,lowerCamelCase_: List[str]=224 ,lowerCamelCase_: List[str]=4 ,lowerCamelCase_: List[Any]=3 ,lowerCamelCase_: Optional[Any]=96 ,lowerCamelCase_: Any=[2, 2, 6, 2] ,lowerCamelCase_: Dict=[3, 6, 12, 24] ,lowerCamelCase_: str=7 ,lowerCamelCase_: Optional[Any]=4.0 ,lowerCamelCase_: Tuple=True ,lowerCamelCase_: List[str]=0.0 ,lowerCamelCase_: Optional[int]=0.0 ,lowerCamelCase_: List[str]=0.1 ,lowerCamelCase_: str="gelu" ,lowerCamelCase_: str=False ,lowerCamelCase_: Dict=0.0_2 ,lowerCamelCase_: Union[str, Any]=1e-5 ,lowerCamelCase_: str=32 ,**lowerCamelCase_: List[str] ,) -> Tuple: super().__init__(**lowerCamelCase_ ) UpperCAmelCase_ : Tuple = image_size UpperCAmelCase_ : Tuple = patch_size UpperCAmelCase_ : Dict = num_channels UpperCAmelCase_ : List[Any] = embed_dim UpperCAmelCase_ : Dict = depths UpperCAmelCase_ : Dict = len(lowerCamelCase_ ) UpperCAmelCase_ : str = num_heads UpperCAmelCase_ : Tuple = window_size UpperCAmelCase_ : int = mlp_ratio UpperCAmelCase_ : str = qkv_bias UpperCAmelCase_ : Any = hidden_dropout_prob UpperCAmelCase_ : Tuple = attention_probs_dropout_prob UpperCAmelCase_ : int = drop_path_rate UpperCAmelCase_ : Optional[Any] = hidden_act UpperCAmelCase_ : List[str] = use_absolute_embeddings UpperCAmelCase_ : Dict = layer_norm_eps UpperCAmelCase_ : int = initializer_range UpperCAmelCase_ : Union[str, Any] = encoder_stride # we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model UpperCAmelCase_ : List[str] = int(embed_dim * 2 ** (len(lowerCamelCase_ ) - 1) ) UpperCAmelCase_ : Any = (0, 0, 0, 0)
345
1
import math import os import unittest from transformers import MegatronBertConfig, is_torch_available from transformers.models.auto import get_values from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_PRETRAINING_MAPPING, MegatronBertForCausalLM, MegatronBertForMaskedLM, MegatronBertForMultipleChoice, MegatronBertForNextSentencePrediction, MegatronBertForPreTraining, MegatronBertForQuestionAnswering, MegatronBertForSequenceClassification, MegatronBertForTokenClassification, MegatronBertModel, ) class _snake_case : '''simple docstring''' def __init__( self: int ,lowerCamelCase_: Tuple ,lowerCamelCase_: List[Any]=13 ,lowerCamelCase_: Optional[Any]=7 ,lowerCamelCase_: int=True ,lowerCamelCase_: Dict=True ,lowerCamelCase_: Union[str, Any]=True ,lowerCamelCase_: List[str]=True ,lowerCamelCase_: Dict=99 ,lowerCamelCase_: Optional[Any]=64 ,lowerCamelCase_: Dict=32 ,lowerCamelCase_: str=5 ,lowerCamelCase_: Any=4 ,lowerCamelCase_: List[str]=37 ,lowerCamelCase_: str="gelu" ,lowerCamelCase_: Dict=0.1 ,lowerCamelCase_: List[Any]=0.1 ,lowerCamelCase_: Tuple=512 ,lowerCamelCase_: Optional[int]=16 ,lowerCamelCase_: Tuple=2 ,lowerCamelCase_: Tuple=0.0_2 ,lowerCamelCase_: Optional[int]=3 ,lowerCamelCase_: Any=4 ,lowerCamelCase_: Dict=None ,) -> Optional[Any]: UpperCAmelCase_ : Any = parent UpperCAmelCase_ : str = batch_size UpperCAmelCase_ : List[Any] = seq_length UpperCAmelCase_ : Optional[Any] = is_training UpperCAmelCase_ : str = use_input_mask UpperCAmelCase_ : int = use_token_type_ids UpperCAmelCase_ : Union[str, Any] = use_labels UpperCAmelCase_ : List[str] = vocab_size UpperCAmelCase_ : Dict = hidden_size UpperCAmelCase_ : str = embedding_size UpperCAmelCase_ : str = num_hidden_layers UpperCAmelCase_ : Union[str, Any] = num_attention_heads UpperCAmelCase_ : Optional[int] = intermediate_size UpperCAmelCase_ : Tuple = hidden_act UpperCAmelCase_ : Tuple = hidden_dropout_prob UpperCAmelCase_ : int = attention_probs_dropout_prob UpperCAmelCase_ : Dict = max_position_embeddings UpperCAmelCase_ : Union[str, Any] = type_vocab_size UpperCAmelCase_ : Dict = type_sequence_label_size UpperCAmelCase_ : Tuple = initializer_range UpperCAmelCase_ : Tuple = num_labels UpperCAmelCase_ : str = num_choices UpperCAmelCase_ : Optional[int] = scope def A__ ( self: str ) -> Optional[int]: UpperCAmelCase_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size ) UpperCAmelCase_ : List[Any] = None if self.use_input_mask: UpperCAmelCase_ : Dict = random_attention_mask([self.batch_size, self.seq_length] ) UpperCAmelCase_ : int = None if self.use_token_type_ids: UpperCAmelCase_ : Dict = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size ) UpperCAmelCase_ : List[str] = None UpperCAmelCase_ : Dict = None UpperCAmelCase_ : Tuple = None if self.use_labels: UpperCAmelCase_ : Union[str, Any] = ids_tensor([self.batch_size] ,self.type_sequence_label_size ) UpperCAmelCase_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels ) UpperCAmelCase_ : int = ids_tensor([self.batch_size] ,self.num_choices ) UpperCAmelCase_ : Optional[Any] = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def A__ ( self: Dict ) -> Tuple: return MegatronBertConfig( vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,embedding_size=self.embedding_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=lowerCamelCase_ ,initializer_range=self.initializer_range ,) def A__ ( self: int ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: List[Any] ,lowerCamelCase_: Tuple ,lowerCamelCase_: Optional[int] ,lowerCamelCase_: Dict ,lowerCamelCase_: Any ,lowerCamelCase_: Tuple ) -> int: UpperCAmelCase_ : int = MegatronBertModel(config=lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() UpperCAmelCase_ : Optional[int] = model(lowerCamelCase_ ,attention_mask=lowerCamelCase_ ,token_type_ids=lowerCamelCase_ ) UpperCAmelCase_ : str = model(lowerCamelCase_ ,token_type_ids=lowerCamelCase_ ) UpperCAmelCase_ : int = model(lowerCamelCase_ ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.hidden_size) ) def A__ ( self: Optional[Any] ,lowerCamelCase_: List[Any] ,lowerCamelCase_: int ,lowerCamelCase_: str ,lowerCamelCase_: Any ,lowerCamelCase_: Optional[int] ,lowerCamelCase_: List[Any] ,lowerCamelCase_: Tuple ) -> Tuple: UpperCAmelCase_ : Dict = MegatronBertForMaskedLM(config=lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() UpperCAmelCase_ : Optional[Any] = model(lowerCamelCase_ ,attention_mask=lowerCamelCase_ ,token_type_ids=lowerCamelCase_ ,labels=lowerCamelCase_ ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) ) def A__ ( self: Union[str, Any] ,lowerCamelCase_: List[Any] ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: Optional[int] ,lowerCamelCase_: str ,lowerCamelCase_: Tuple ,lowerCamelCase_: Any ,lowerCamelCase_: Any ) -> Any: UpperCAmelCase_ : Union[str, Any] = MegatronBertForCausalLM(config=lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() UpperCAmelCase_ : str = model(lowerCamelCase_ ,attention_mask=lowerCamelCase_ ,token_type_ids=lowerCamelCase_ ,labels=lowerCamelCase_ ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) ) def A__ ( self: Dict ,lowerCamelCase_: Any ,lowerCamelCase_: Any ,lowerCamelCase_: Dict ,lowerCamelCase_: Tuple ,lowerCamelCase_: List[Any] ,lowerCamelCase_: Dict ,lowerCamelCase_: str ) -> Union[str, Any]: UpperCAmelCase_ : Dict = MegatronBertForNextSentencePrediction(config=lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() UpperCAmelCase_ : int = model( lowerCamelCase_ ,attention_mask=lowerCamelCase_ ,token_type_ids=lowerCamelCase_ ,labels=lowerCamelCase_ ,) self.parent.assertEqual(result.logits.shape ,(self.batch_size, 2) ) def A__ ( self: int ,lowerCamelCase_: int ,lowerCamelCase_: List[Any] ,lowerCamelCase_: Tuple ,lowerCamelCase_: Dict ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: Tuple ,lowerCamelCase_: Optional[Any] ) -> Union[str, Any]: UpperCAmelCase_ : Optional[int] = MegatronBertForPreTraining(config=lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() UpperCAmelCase_ : Dict = model( lowerCamelCase_ ,attention_mask=lowerCamelCase_ ,token_type_ids=lowerCamelCase_ ,labels=lowerCamelCase_ ,next_sentence_label=lowerCamelCase_ ,) self.parent.assertEqual(result.prediction_logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertEqual(result.seq_relationship_logits.shape ,(self.batch_size, 2) ) def A__ ( self: List[str] ,lowerCamelCase_: List[Any] ,lowerCamelCase_: Tuple ,lowerCamelCase_: List[str] ,lowerCamelCase_: Optional[int] ,lowerCamelCase_: List[str] ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: str ) -> int: UpperCAmelCase_ : List[str] = MegatronBertForQuestionAnswering(config=lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() UpperCAmelCase_ : Dict = model( lowerCamelCase_ ,attention_mask=lowerCamelCase_ ,token_type_ids=lowerCamelCase_ ,start_positions=lowerCamelCase_ ,end_positions=lowerCamelCase_ ,) self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) ) def A__ ( self: List[Any] ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: Optional[int] ,lowerCamelCase_: List[Any] ,lowerCamelCase_: List[str] ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: Dict ,lowerCamelCase_: Tuple ) -> Optional[int]: UpperCAmelCase_ : int = self.num_labels UpperCAmelCase_ : Union[str, Any] = MegatronBertForSequenceClassification(lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() UpperCAmelCase_ : Tuple = model(lowerCamelCase_ ,attention_mask=lowerCamelCase_ ,token_type_ids=lowerCamelCase_ ,labels=lowerCamelCase_ ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) ) def A__ ( self: Dict ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: Dict ,lowerCamelCase_: List[Any] ,lowerCamelCase_: List[str] ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: Dict ,lowerCamelCase_: Dict ) -> List[str]: UpperCAmelCase_ : Dict = self.num_labels UpperCAmelCase_ : List[str] = MegatronBertForTokenClassification(config=lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() UpperCAmelCase_ : List[str] = model(lowerCamelCase_ ,attention_mask=lowerCamelCase_ ,token_type_ids=lowerCamelCase_ ,labels=lowerCamelCase_ ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) ) def A__ ( self: Optional[int] ,lowerCamelCase_: List[Any] ,lowerCamelCase_: Tuple ,lowerCamelCase_: Dict ,lowerCamelCase_: Optional[int] ,lowerCamelCase_: Optional[int] ,lowerCamelCase_: Tuple ,lowerCamelCase_: str ) -> Optional[int]: UpperCAmelCase_ : Dict = self.num_choices UpperCAmelCase_ : Union[str, Any] = MegatronBertForMultipleChoice(config=lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() UpperCAmelCase_ : List[str] = input_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous() UpperCAmelCase_ : List[Any] = token_type_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous() UpperCAmelCase_ : List[str] = input_mask.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous() UpperCAmelCase_ : Dict = model( lowerCamelCase_ ,attention_mask=lowerCamelCase_ ,token_type_ids=lowerCamelCase_ ,labels=lowerCamelCase_ ,) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) ) def A__ ( self: Optional[Any] ) -> Optional[int]: UpperCAmelCase_ : Optional[Any] = self.prepare_config_and_inputs() ( ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ) : Optional[Any] = config_and_inputs UpperCAmelCase_ : List[str] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class _snake_case ( __snake_case , __snake_case , unittest.TestCase ): '''simple docstring''' A__ : Optional[Any] = ( ( MegatronBertModel, MegatronBertForMaskedLM, MegatronBertForCausalLM, MegatronBertForMultipleChoice, MegatronBertForNextSentencePrediction, MegatronBertForPreTraining, MegatronBertForQuestionAnswering, MegatronBertForSequenceClassification, MegatronBertForTokenClassification, ) if is_torch_available() else () ) A__ : Optional[int] = ( { "feature-extraction": MegatronBertModel, "fill-mask": MegatronBertForMaskedLM, "question-answering": MegatronBertForQuestionAnswering, "text-classification": MegatronBertForSequenceClassification, "text-generation": MegatronBertForCausalLM, "token-classification": MegatronBertForTokenClassification, "zero-shot": MegatronBertForSequenceClassification, } if is_torch_available() else {} ) A__ : List[str] = True # test_resize_embeddings = False A__ : int = False def A__ ( self: List[str] ,lowerCamelCase_: Dict ,lowerCamelCase_: Optional[int] ,lowerCamelCase_: Union[str, Any]=False ) -> Any: UpperCAmelCase_ : Any = super()._prepare_for_class(lowerCamelCase_ ,lowerCamelCase_ ,return_labels=lowerCamelCase_ ) if return_labels: if model_class in get_values(lowerCamelCase_ ): UpperCAmelCase_ : List[Any] = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length) ,dtype=torch.long ,device=lowerCamelCase_ ) UpperCAmelCase_ : Optional[Any] = torch.zeros( self.model_tester.batch_size ,dtype=torch.long ,device=lowerCamelCase_ ) return inputs_dict def A__ ( self: List[str] ) -> Any: UpperCAmelCase_ : Optional[Any] = MegatronBertModelTester(self ) UpperCAmelCase_ : Tuple = ConfigTester(self ,config_class=lowerCamelCase_ ,hidden_size=37 ) def A__ ( self: Optional[int] ) -> int: self.config_tester.run_common_tests() def A__ ( self: List[str] ) -> Union[str, Any]: UpperCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_model(*lowerCamelCase_ ) def A__ ( self: str ) -> List[str]: UpperCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_masked_lm(*lowerCamelCase_ ) def A__ ( self: str ) -> Dict: UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_multiple_choice(*lowerCamelCase_ ) def A__ ( self: Union[str, Any] ) -> List[Any]: UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_next_sequence_prediction(*lowerCamelCase_ ) def A__ ( self: Any ) -> List[str]: UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_pretraining(*lowerCamelCase_ ) def A__ ( self: Union[str, Any] ) -> List[Any]: UpperCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_question_answering(*lowerCamelCase_ ) def A__ ( self: Union[str, Any] ) -> Optional[Any]: UpperCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_sequence_classification(*lowerCamelCase_ ) def A__ ( self: List[Any] ) -> int: UpperCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_token_classification(*lowerCamelCase_ ) def lowerCamelCase_ ( _a : Union[str, Any] ): '''simple docstring''' return torch.tensor( _a , dtype=torch.long , device=_a , ) UpperCamelCase_ = 1E-4 @require_torch @require_sentencepiece @require_tokenizers class _snake_case ( unittest.TestCase ): '''simple docstring''' @slow @unittest.skip("""Model is not available.""" ) def A__ ( self: Dict ) -> Any: UpperCAmelCase_ : Any = """nvidia/megatron-bert-uncased-345m""" if "MYDIR" in os.environ: UpperCAmelCase_ : Union[str, Any] = os.path.join(os.environ["""MYDIR"""] ,lowerCamelCase_ ) UpperCAmelCase_ : List[Any] = MegatronBertModel.from_pretrained(lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.half() UpperCAmelCase_ : Tuple = _long_tensor([[101, 7110, 1005, 1056, 2023, 11333, 17413, 1029, 102]] ) with torch.no_grad(): UpperCAmelCase_ : int = model(lowerCamelCase_ )[0] UpperCAmelCase_ : str = torch.Size((1, 9, 1024) ) self.assertEqual(output.shape ,lowerCamelCase_ ) UpperCAmelCase_ : List[Any] = [-0.6_0_4_0, -0.2_5_1_7, -0.1_0_2_5, 0.3_4_2_0, -0.6_7_5_8, -0.0_0_1_7, -0.1_0_8_9, -0.1_9_9_0, 0.5_7_2_8] for ii in range(3 ): for jj in range(3 ): UpperCAmelCase_ : Optional[Any] = output[0, ii, jj] UpperCAmelCase_ : List[str] = expected[3 * ii + jj] UpperCAmelCase_ : Optional[int] = """ii={} jj={} a={} b={}""".format(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ) self.assertTrue(math.isclose(lowerCamelCase_ ,lowerCamelCase_ ,rel_tol=lowerCamelCase_ ,abs_tol=lowerCamelCase_ ) ,msg=lowerCamelCase_ )
345
import os import sys import tempfile import unittest import unittest.mock as mock from pathlib import Path from huggingface_hub import HfFolder, delete_repo from huggingface_hub.file_download import http_get from requests.exceptions import HTTPError from transformers import ( AlbertTokenizer, AutoTokenizer, BertTokenizer, BertTokenizerFast, GPTaTokenizerFast, is_tokenizers_available, ) from transformers.testing_utils import TOKEN, USER, is_staging_test, require_tokenizers from transformers.tokenization_utils import Trie sys.path.append(str(Path(__file__).parent.parent / '''utils''')) from test_module.custom_tokenization import CustomTokenizer # noqa E402 if is_tokenizers_available(): from test_module.custom_tokenization_fast import CustomTokenizerFast class _snake_case ( unittest.TestCase ): '''simple docstring''' def A__ ( self: int ) -> str: # A mock response for an HTTP head request to emulate server down UpperCAmelCase_ : List[str] = mock.Mock() UpperCAmelCase_ : List[Any] = 500 UpperCAmelCase_ : Union[str, Any] = {} UpperCAmelCase_ : Union[str, Any] = HTTPError UpperCAmelCase_ : Any = {} # Download this model to make sure it's in the cache. UpperCAmelCase_ : Union[str, Any] = BertTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bert""" ) # Under the mock environment we get a 500 error when trying to reach the tokenizer. with mock.patch("""requests.Session.request""" ,return_value=lowerCamelCase_ ) as mock_head: UpperCAmelCase_ : Any = BertTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bert""" ) # This check we did call the fake head request mock_head.assert_called() @require_tokenizers def A__ ( self: str ) -> int: # A mock response for an HTTP head request to emulate server down UpperCAmelCase_ : str = mock.Mock() UpperCAmelCase_ : Optional[int] = 500 UpperCAmelCase_ : int = {} UpperCAmelCase_ : Union[str, Any] = HTTPError UpperCAmelCase_ : List[Any] = {} # Download this model to make sure it's in the cache. UpperCAmelCase_ : Optional[int] = GPTaTokenizerFast.from_pretrained("""gpt2""" ) # Under the mock environment we get a 500 error when trying to reach the tokenizer. with mock.patch("""requests.Session.request""" ,return_value=lowerCamelCase_ ) as mock_head: UpperCAmelCase_ : Any = GPTaTokenizerFast.from_pretrained("""gpt2""" ) # This check we did call the fake head request mock_head.assert_called() def A__ ( self: str ) -> Dict: # This test is for deprecated behavior and can be removed in v5 try: UpperCAmelCase_ : Any = tempfile.mktemp() with open(lowerCamelCase_ ,"""wb""" ) as f: http_get("""https://huggingface.co/albert-base-v1/resolve/main/spiece.model""" ,lowerCamelCase_ ) UpperCAmelCase_ : Tuple = AlbertTokenizer.from_pretrained(lowerCamelCase_ ) finally: os.remove(lowerCamelCase_ ) # Supporting this legacy load introduced a weird bug where the tokenizer would load local files if they are in # the current folder and have the right name. if os.path.isfile("""tokenizer.json""" ): # We skip the test if the user has a `tokenizer.json` in this folder to avoid deleting it. return try: with open("""tokenizer.json""" ,"""wb""" ) as f: http_get("""https://huggingface.co/hf-internal-testing/tiny-random-bert/blob/main/tokenizer.json""" ,lowerCamelCase_ ) UpperCAmelCase_ : str = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ) # The tiny random BERT has a vocab size of 1024, tiny gpt2 as a vocab size of 1000 self.assertEqual(tokenizer.vocab_size ,1000 ) # Tokenizer should depend on the remote checkpoint, not the local tokenizer.json file. finally: os.remove("""tokenizer.json""" ) def A__ ( self: List[str] ) -> Tuple: # This test is for deprecated behavior and can be removed in v5 UpperCAmelCase_ : str = AlbertTokenizer.from_pretrained("""https://huggingface.co/albert-base-v1/resolve/main/spiece.model""" ) @is_staging_test class _snake_case ( unittest.TestCase ): '''simple docstring''' A__ : str = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"] @classmethod def A__ ( cls: Dict ) -> Optional[int]: UpperCAmelCase_ : List[str] = TOKEN HfFolder.save_token(lowerCamelCase_ ) @classmethod def A__ ( cls: Optional[Any] ) -> List[str]: try: delete_repo(token=cls._token ,repo_id="""test-tokenizer""" ) except HTTPError: pass try: delete_repo(token=cls._token ,repo_id="""valid_org/test-tokenizer-org""" ) except HTTPError: pass try: delete_repo(token=cls._token ,repo_id="""test-dynamic-tokenizer""" ) except HTTPError: pass def A__ ( self: Any ) -> Optional[int]: with tempfile.TemporaryDirectory() as tmp_dir: UpperCAmelCase_ : Tuple = os.path.join(lowerCamelCase_ ,"""vocab.txt""" ) with open(lowerCamelCase_ ,"""w""" ,encoding="""utf-8""" ) as vocab_writer: vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) ) UpperCAmelCase_ : List[Any] = BertTokenizer(lowerCamelCase_ ) tokenizer.push_to_hub("""test-tokenizer""" ,use_auth_token=self._token ) UpperCAmelCase_ : List[Any] = BertTokenizer.from_pretrained(F'''{USER}/test-tokenizer''' ) self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab ) # Reset repo delete_repo(token=self._token ,repo_id="""test-tokenizer""" ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(lowerCamelCase_ ,repo_id="""test-tokenizer""" ,push_to_hub=lowerCamelCase_ ,use_auth_token=self._token ) UpperCAmelCase_ : List[Any] = BertTokenizer.from_pretrained(F'''{USER}/test-tokenizer''' ) self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab ) def A__ ( self: Optional[int] ) -> Any: with tempfile.TemporaryDirectory() as tmp_dir: UpperCAmelCase_ : List[Any] = os.path.join(lowerCamelCase_ ,"""vocab.txt""" ) with open(lowerCamelCase_ ,"""w""" ,encoding="""utf-8""" ) as vocab_writer: vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) ) UpperCAmelCase_ : Dict = BertTokenizer(lowerCamelCase_ ) tokenizer.push_to_hub("""valid_org/test-tokenizer-org""" ,use_auth_token=self._token ) UpperCAmelCase_ : Dict = BertTokenizer.from_pretrained("""valid_org/test-tokenizer-org""" ) self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab ) # Reset repo delete_repo(token=self._token ,repo_id="""valid_org/test-tokenizer-org""" ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained( lowerCamelCase_ ,repo_id="""valid_org/test-tokenizer-org""" ,push_to_hub=lowerCamelCase_ ,use_auth_token=self._token ) UpperCAmelCase_ : List[Any] = BertTokenizer.from_pretrained("""valid_org/test-tokenizer-org""" ) self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab ) @require_tokenizers def A__ ( self: Optional[int] ) -> Optional[Any]: CustomTokenizer.register_for_auto_class() with tempfile.TemporaryDirectory() as tmp_dir: UpperCAmelCase_ : Any = os.path.join(lowerCamelCase_ ,"""vocab.txt""" ) with open(lowerCamelCase_ ,"""w""" ,encoding="""utf-8""" ) as vocab_writer: vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) ) UpperCAmelCase_ : Optional[Any] = CustomTokenizer(lowerCamelCase_ ) # No fast custom tokenizer tokenizer.push_to_hub("""test-dynamic-tokenizer""" ,use_auth_token=self._token ) UpperCAmelCase_ : Optional[Any] = AutoTokenizer.from_pretrained(F'''{USER}/test-dynamic-tokenizer''' ,trust_remote_code=lowerCamelCase_ ) # Can't make an isinstance check because the new_model.config is from the CustomTokenizer class of a dynamic module self.assertEqual(tokenizer.__class__.__name__ ,"""CustomTokenizer""" ) # Fast and slow custom tokenizer CustomTokenizerFast.register_for_auto_class() with tempfile.TemporaryDirectory() as tmp_dir: UpperCAmelCase_ : List[str] = os.path.join(lowerCamelCase_ ,"""vocab.txt""" ) with open(lowerCamelCase_ ,"""w""" ,encoding="""utf-8""" ) as vocab_writer: vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) ) UpperCAmelCase_ : str = BertTokenizerFast.from_pretrained(lowerCamelCase_ ) bert_tokenizer.save_pretrained(lowerCamelCase_ ) UpperCAmelCase_ : List[str] = CustomTokenizerFast.from_pretrained(lowerCamelCase_ ) tokenizer.push_to_hub("""test-dynamic-tokenizer""" ,use_auth_token=self._token ) UpperCAmelCase_ : List[str] = AutoTokenizer.from_pretrained(F'''{USER}/test-dynamic-tokenizer''' ,trust_remote_code=lowerCamelCase_ ) # Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module self.assertEqual(tokenizer.__class__.__name__ ,"""CustomTokenizerFast""" ) UpperCAmelCase_ : List[str] = AutoTokenizer.from_pretrained( F'''{USER}/test-dynamic-tokenizer''' ,use_fast=lowerCamelCase_ ,trust_remote_code=lowerCamelCase_ ) # Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module self.assertEqual(tokenizer.__class__.__name__ ,"""CustomTokenizer""" ) class _snake_case ( unittest.TestCase ): '''simple docstring''' def A__ ( self: Optional[Any] ) -> Any: UpperCAmelCase_ : Any = Trie() trie.add("""Hello 友達""" ) self.assertEqual(trie.data ,{"""H""": {"""e""": {"""l""": {"""l""": {"""o""": {""" """: {"""友""": {"""達""": {"""""": 1}}}}}}}}} ) trie.add("""Hello""" ) trie.data self.assertEqual(trie.data ,{"""H""": {"""e""": {"""l""": {"""l""": {"""o""": {"""""": 1, """ """: {"""友""": {"""達""": {"""""": 1}}}}}}}}} ) def A__ ( self: Tuple ) -> Optional[int]: UpperCAmelCase_ : str = Trie() self.assertEqual(trie.split("""[CLS] This is a extra_id_100""" ) ,["""[CLS] This is a extra_id_100"""] ) trie.add("""[CLS]""" ) trie.add("""extra_id_1""" ) trie.add("""extra_id_100""" ) self.assertEqual(trie.split("""[CLS] This is a extra_id_100""" ) ,["""[CLS]""", """ This is a """, """extra_id_100"""] ) def A__ ( self: Optional[Any] ) -> Optional[int]: UpperCAmelCase_ : Dict = Trie() trie.add("""A""" ) self.assertEqual(trie.split("""ABC""" ) ,["""A""", """BC"""] ) self.assertEqual(trie.split("""BCA""" ) ,["""BC""", """A"""] ) def A__ ( self: Union[str, Any] ) -> int: UpperCAmelCase_ : List[str] = Trie() trie.add("""TOKEN]""" ) trie.add("""[SPECIAL_TOKEN]""" ) self.assertEqual(trie.split("""This is something [SPECIAL_TOKEN]""" ) ,["""This is something """, """[SPECIAL_TOKEN]"""] ) def A__ ( self: int ) -> Union[str, Any]: UpperCAmelCase_ : List[str] = Trie() trie.add("""A""" ) trie.add("""P""" ) trie.add("""[SPECIAL_TOKEN]""" ) self.assertEqual(trie.split("""This is something [SPECIAL_TOKEN]""" ) ,["""This is something """, """[SPECIAL_TOKEN]"""] ) def A__ ( self: int ) -> List[str]: UpperCAmelCase_ : int = Trie() trie.add("""AB""" ) trie.add("""B""" ) trie.add("""C""" ) self.assertEqual(trie.split("""ABC""" ) ,["""AB""", """C"""] ) def A__ ( self: str ) -> Optional[int]: UpperCAmelCase_ : Optional[Any] = Trie() trie.add("""ABC""" ) trie.add("""B""" ) trie.add("""CD""" ) self.assertEqual(trie.split("""ABCD""" ) ,["""ABC""", """D"""] ) def A__ ( self: List[Any] ) -> Any: # Even if the offsets are wrong, we necessarily output correct string # parts. UpperCAmelCase_ : Tuple = Trie() UpperCAmelCase_ : Optional[Any] = trie.cut_text("""ABC""" ,[0, 0, 2, 1, 2, 3] ) self.assertEqual(lowerCamelCase_ ,["""AB""", """C"""] )
345
1
import json import os import subprocess import unittest from ast import literal_eval import pytest from parameterized import parameterized_class from . import is_sagemaker_available if is_sagemaker_available(): from sagemaker import Session, TrainingJobAnalytics from sagemaker.huggingface import HuggingFace @pytest.mark.skipif( literal_eval(os.getenv("TEST_SAGEMAKER" , "False" ) ) is not True , reason="Skipping test because should only be run when releasing minor transformers version" , ) @pytest.mark.usefixtures("sm_env" ) @parameterized_class( [ { "framework": "pytorch", "script": "run_glue.py", "model_name_or_path": "distilbert-base-cased", "instance_type": "ml.g4dn.xlarge", "results": {"train_runtime": 650, "eval_accuracy": 0.6, "eval_loss": 0.9}, }, { "framework": "tensorflow", "script": "run_tf.py", "model_name_or_path": "distilbert-base-cased", "instance_type": "ml.g4dn.xlarge", "results": {"train_runtime": 600, "eval_accuracy": 0.3, "eval_loss": 0.9}, }, ] ) class _snake_case ( unittest.TestCase ): '''simple docstring''' def A__ ( self: Any ) -> Union[str, Any]: if self.framework == "pytorch": subprocess.run( F'''cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'''.split() ,encoding="""utf-8""" ,check=lowerCamelCase_ ,) assert hasattr(self ,"""env""" ) def A__ ( self: Optional[int] ,lowerCamelCase_: str=1 ) -> Dict: # creates estimator return HuggingFace( entry_point=self.script ,source_dir=self.env.test_path ,role=self.env.role ,image_uri=self.env.image_uri ,base_job_name=F'''{self.env.base_job_name}-single''' ,instance_count=lowerCamelCase_ ,instance_type=self.instance_type ,debugger_hook_config=lowerCamelCase_ ,hyperparameters={**self.env.hyperparameters, """model_name_or_path""": self.model_name_or_path} ,metric_definitions=self.env.metric_definitions ,py_version="""py36""" ,) def A__ ( self: List[Any] ,lowerCamelCase_: Optional[Any] ) -> Optional[Any]: TrainingJobAnalytics(lowerCamelCase_ ).export_csv(F'''{self.env.test_path}/{job_name}_metrics.csv''' ) def A__ ( self: Union[str, Any] ) -> Dict: # create estimator UpperCAmelCase_ : int = self.create_estimator() # run training estimator.fit() # result dataframe UpperCAmelCase_ : Tuple = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe() # extract kpis UpperCAmelCase_ : Optional[int] = list(result_metrics_df[result_metrics_df.metric_name == """eval_accuracy"""]["""value"""] ) UpperCAmelCase_ : Union[str, Any] = list(result_metrics_df[result_metrics_df.metric_name == """eval_loss"""]["""value"""] ) # get train time from SageMaker job, this includes starting, preprocessing, stopping UpperCAmelCase_ : Optional[int] = ( Session().describe_training_job(estimator.latest_training_job.name ).get("""TrainingTimeInSeconds""" ,999999 ) ) # assert kpis assert train_runtime <= self.results["train_runtime"] assert all(t >= self.results["""eval_accuracy"""] for t in eval_accuracy ) assert all(t <= self.results["""eval_loss"""] for t in eval_loss ) # dump tests result into json file to share in PR with open(F'''{estimator.latest_training_job.name}.json''' ,"""w""" ) as outfile: json.dump({"""train_time""": train_runtime, """eval_accuracy""": eval_accuracy, """eval_loss""": eval_loss} ,lowerCamelCase_ )
345
from ..utils import DummyObject, requires_backends class _snake_case ( metaclass=__snake_case ): '''simple docstring''' A__ : Tuple = ["flax"] def __init__( self: str ,*lowerCamelCase_: int ,**lowerCamelCase_: List[str] ) -> str: requires_backends(self ,["""flax"""] ) @classmethod def A__ ( cls: Optional[Any] ,*lowerCamelCase_: Dict ,**lowerCamelCase_: List[str] ) -> Any: requires_backends(cls ,["""flax"""] ) @classmethod def A__ ( cls: Optional[int] ,*lowerCamelCase_: Optional[int] ,**lowerCamelCase_: int ) -> Optional[int]: requires_backends(cls ,["""flax"""] ) class _snake_case ( metaclass=__snake_case ): '''simple docstring''' A__ : Any = ["flax"] def __init__( self: int ,*lowerCamelCase_: List[Any] ,**lowerCamelCase_: Tuple ) -> Union[str, Any]: requires_backends(self ,["""flax"""] ) @classmethod def A__ ( cls: Optional[int] ,*lowerCamelCase_: Optional[int] ,**lowerCamelCase_: List[str] ) -> Union[str, Any]: requires_backends(cls ,["""flax"""] ) @classmethod def A__ ( cls: Tuple ,*lowerCamelCase_: Tuple ,**lowerCamelCase_: Any ) -> int: requires_backends(cls ,["""flax"""] ) class _snake_case ( metaclass=__snake_case ): '''simple docstring''' A__ : Dict = ["flax"] def __init__( self: Dict ,*lowerCamelCase_: Optional[int] ,**lowerCamelCase_: List[Any] ) -> Any: requires_backends(self ,["""flax"""] ) @classmethod def A__ ( cls: Tuple ,*lowerCamelCase_: Optional[Any] ,**lowerCamelCase_: List[Any] ) -> str: requires_backends(cls ,["""flax"""] ) @classmethod def A__ ( cls: int ,*lowerCamelCase_: Optional[Any] ,**lowerCamelCase_: Optional[Any] ) -> int: requires_backends(cls ,["""flax"""] ) class _snake_case ( metaclass=__snake_case ): '''simple docstring''' A__ : List[str] = ["flax"] def __init__( self: str ,*lowerCamelCase_: List[str] ,**lowerCamelCase_: Optional[int] ) -> Union[str, Any]: requires_backends(self ,["""flax"""] ) @classmethod def A__ ( cls: Union[str, Any] ,*lowerCamelCase_: Any ,**lowerCamelCase_: Any ) -> Any: requires_backends(cls ,["""flax"""] ) @classmethod def A__ ( cls: Dict ,*lowerCamelCase_: int ,**lowerCamelCase_: Optional[Any] ) -> int: requires_backends(cls ,["""flax"""] ) class _snake_case ( metaclass=__snake_case ): '''simple docstring''' A__ : int = ["flax"] def __init__( self: Dict ,*lowerCamelCase_: Tuple ,**lowerCamelCase_: List[str] ) -> Optional[Any]: requires_backends(self ,["""flax"""] ) @classmethod def A__ ( cls: Optional[Any] ,*lowerCamelCase_: List[Any] ,**lowerCamelCase_: str ) -> Any: requires_backends(cls ,["""flax"""] ) @classmethod def A__ ( cls: Union[str, Any] ,*lowerCamelCase_: Dict ,**lowerCamelCase_: Optional[Any] ) -> str: requires_backends(cls ,["""flax"""] ) class _snake_case ( metaclass=__snake_case ): '''simple docstring''' A__ : Optional[int] = ["flax"] def __init__( self: str ,*lowerCamelCase_: Dict ,**lowerCamelCase_: Optional[int] ) -> Tuple: requires_backends(self ,["""flax"""] ) @classmethod def A__ ( cls: int ,*lowerCamelCase_: int ,**lowerCamelCase_: Tuple ) -> List[str]: requires_backends(cls ,["""flax"""] ) @classmethod def A__ ( cls: str ,*lowerCamelCase_: Union[str, Any] ,**lowerCamelCase_: Optional[Any] ) -> Any: requires_backends(cls ,["""flax"""] ) class _snake_case ( metaclass=__snake_case ): '''simple docstring''' A__ : List[Any] = ["flax"] def __init__( self: Union[str, Any] ,*lowerCamelCase_: Tuple ,**lowerCamelCase_: int ) -> List[Any]: requires_backends(self ,["""flax"""] ) @classmethod def A__ ( cls: Tuple ,*lowerCamelCase_: List[Any] ,**lowerCamelCase_: Dict ) -> Dict: requires_backends(cls ,["""flax"""] ) @classmethod def A__ ( cls: Dict ,*lowerCamelCase_: List[Any] ,**lowerCamelCase_: str ) -> Any: requires_backends(cls ,["""flax"""] ) class _snake_case ( metaclass=__snake_case ): '''simple docstring''' A__ : Tuple = ["flax"] def __init__( self: str ,*lowerCamelCase_: Any ,**lowerCamelCase_: int ) -> Tuple: requires_backends(self ,["""flax"""] ) @classmethod def A__ ( cls: Dict ,*lowerCamelCase_: Optional[int] ,**lowerCamelCase_: Union[str, Any] ) -> List[str]: requires_backends(cls ,["""flax"""] ) @classmethod def A__ ( cls: str ,*lowerCamelCase_: Union[str, Any] ,**lowerCamelCase_: Dict ) -> Optional[int]: requires_backends(cls ,["""flax"""] ) class _snake_case ( metaclass=__snake_case ): '''simple docstring''' A__ : str = ["flax"] def __init__( self: Optional[Any] ,*lowerCamelCase_: str ,**lowerCamelCase_: List[str] ) -> Optional[Any]: requires_backends(self ,["""flax"""] ) @classmethod def A__ ( cls: List[str] ,*lowerCamelCase_: Dict ,**lowerCamelCase_: int ) -> List[str]: requires_backends(cls ,["""flax"""] ) @classmethod def A__ ( cls: str ,*lowerCamelCase_: Optional[Any] ,**lowerCamelCase_: int ) -> Union[str, Any]: requires_backends(cls ,["""flax"""] ) class _snake_case ( metaclass=__snake_case ): '''simple docstring''' A__ : Union[str, Any] = ["flax"] def __init__( self: Any ,*lowerCamelCase_: Tuple ,**lowerCamelCase_: Optional[int] ) -> List[str]: requires_backends(self ,["""flax"""] ) @classmethod def A__ ( cls: Optional[int] ,*lowerCamelCase_: List[Any] ,**lowerCamelCase_: str ) -> Union[str, Any]: requires_backends(cls ,["""flax"""] ) @classmethod def A__ ( cls: List[Any] ,*lowerCamelCase_: Any ,**lowerCamelCase_: Any ) -> int: requires_backends(cls ,["""flax"""] ) class _snake_case ( metaclass=__snake_case ): '''simple docstring''' A__ : Tuple = ["flax"] def __init__( self: Any ,*lowerCamelCase_: Optional[Any] ,**lowerCamelCase_: Dict ) -> str: requires_backends(self ,["""flax"""] ) @classmethod def A__ ( cls: Tuple ,*lowerCamelCase_: Union[str, Any] ,**lowerCamelCase_: List[str] ) -> int: requires_backends(cls ,["""flax"""] ) @classmethod def A__ ( cls: List[Any] ,*lowerCamelCase_: str ,**lowerCamelCase_: str ) -> Any: requires_backends(cls ,["""flax"""] ) class _snake_case ( metaclass=__snake_case ): '''simple docstring''' A__ : Optional[Any] = ["flax"] def __init__( self: Dict ,*lowerCamelCase_: int ,**lowerCamelCase_: Optional[Any] ) -> Union[str, Any]: requires_backends(self ,["""flax"""] ) @classmethod def A__ ( cls: int ,*lowerCamelCase_: int ,**lowerCamelCase_: Tuple ) -> Union[str, Any]: requires_backends(cls ,["""flax"""] ) @classmethod def A__ ( cls: Optional[Any] ,*lowerCamelCase_: List[Any] ,**lowerCamelCase_: Optional[int] ) -> int: requires_backends(cls ,["""flax"""] ) class _snake_case ( metaclass=__snake_case ): '''simple docstring''' A__ : Optional[int] = ["flax"] def __init__( self: List[str] ,*lowerCamelCase_: Dict ,**lowerCamelCase_: Dict ) -> int: requires_backends(self ,["""flax"""] ) @classmethod def A__ ( cls: Dict ,*lowerCamelCase_: List[Any] ,**lowerCamelCase_: Dict ) -> Union[str, Any]: requires_backends(cls ,["""flax"""] ) @classmethod def A__ ( cls: int ,*lowerCamelCase_: Any ,**lowerCamelCase_: Any ) -> Optional[Any]: requires_backends(cls ,["""flax"""] )
345
1
from __future__ import annotations from math import gcd def lowerCamelCase_ ( _a : int , _a : int = 2 , _a : int = 1 , _a : int = 3 , ): '''simple docstring''' if num < 2: raise ValueError("""The input value cannot be less than 2""" ) # Because of the relationship between ``f(f(x))`` and ``f(x)``, this # algorithm struggles to find factors that are divisible by two. # As a workaround, we specifically check for two and even inputs. # See: https://math.stackexchange.com/a/2856214/165820 if num > 2 and num % 2 == 0: return 2 # Pollard's Rho algorithm requires a function that returns pseudorandom # values between 0 <= X < ``num``. It doesn't need to be random in the # sense that the output value is cryptographically secure or difficult # to calculate, it only needs to be random in the sense that all output # values should be equally likely to appear. # For this reason, Pollard suggested using ``f(x) = (x**2 - 1) % num`` # However, the success of Pollard's algorithm isn't guaranteed and is # determined in part by the initial seed and the chosen random function. # To make retries easier, we will instead use ``f(x) = (x**2 + C) % num`` # where ``C`` is a value that we can modify between each attempt. def rand_fn(_a : int , _a : int , _a : int ) -> int: return (pow(_a , 2 ) + step) % modulus for _ in range(_a ): # These track the position within the cycle detection logic. UpperCAmelCase_ : Optional[Any] = seed UpperCAmelCase_ : int = seed while True: # At each iteration, the tortoise moves one step and the hare moves two. UpperCAmelCase_ : Tuple = rand_fn(_a , _a , _a ) UpperCAmelCase_ : str = rand_fn(_a , _a , _a ) UpperCAmelCase_ : Tuple = rand_fn(_a , _a , _a ) # At some point both the tortoise and the hare will enter a cycle whose # length ``p`` is a divisor of ``num``. Once in that cycle, at some point # the tortoise and hare will end up on the same value modulo ``p``. # We can detect when this happens because the position difference between # the tortoise and the hare will share a common divisor with ``num``. UpperCAmelCase_ : Optional[Any] = gcd(hare - tortoise , _a ) if divisor == 1: # No common divisor yet, just keep searching. continue else: # We found a common divisor! if divisor == num: # Unfortunately, the divisor is ``num`` itself and is useless. break else: # The divisor is a nontrivial factor of ``num``! return divisor # If we made it here, then this attempt failed. # We need to pick a new starting seed for the tortoise and hare # in addition to a new step value for the random function. # To keep this example implementation deterministic, the # new values will be generated based on currently available # values instead of using something like ``random.randint``. # We can use the hare's position as the new seed. # This is actually what Richard Brent's the "optimized" variant does. UpperCAmelCase_ : Union[str, Any] = hare # The new step value for the random function can just be incremented. # At first the results will be similar to what the old function would # have produced, but the value will quickly diverge after a bit. step += 1 # We haven't found a divisor within the requested number of attempts. # We were unlucky or ``num`` itself is actually prime. return None if __name__ == "__main__": import argparse UpperCamelCase_ = argparse.ArgumentParser() parser.add_argument( '''num''', type=int, help='''The value to find a divisor of''', ) parser.add_argument( '''--attempts''', type=int, default=3, help='''The number of attempts before giving up''', ) UpperCamelCase_ = parser.parse_args() UpperCamelCase_ = pollard_rho(args.num, attempts=args.attempts) if divisor is None: print(F"{args.num} is probably prime") else: UpperCamelCase_ = args.num // divisor print(F"{args.num} = {divisor} * {quotient}")
345
import random from typing import Any def lowerCamelCase_ ( _a : list ): '''simple docstring''' for _ in range(len(_a ) ): UpperCAmelCase_ : Tuple = random.randint(0 , len(_a ) - 1 ) UpperCAmelCase_ : List[Any] = random.randint(0 , len(_a ) - 1 ) UpperCAmelCase_ , UpperCAmelCase_ : int = data[b], data[a] return data if __name__ == "__main__": UpperCamelCase_ = [0, 1, 2, 3, 4, 5, 6, 7] UpperCamelCase_ = ['''python''', '''says''', '''hello''', '''!'''] print('''Fisher-Yates Shuffle:''') print('''List''', integers, strings) print('''FY Shuffle''', fisher_yates_shuffle(integers), fisher_yates_shuffle(strings))
345
1
import argparse import json import os import torch from torch import nn from transformers import NllbMoeConfig, NllbMoeModel from transformers.modeling_utils import dtype_byte_size from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME def lowerCamelCase_ ( _a : int ): '''simple docstring''' UpperCAmelCase_ : Optional[Any] = [ """encoder.version""", """decoder.version""", """model.encoder.version""", """model.decoder.version""", """decoder.output_projection.weight""", """_float_tensor""", """encoder.embed_positions._float_tensor""", """decoder.embed_positions._float_tensor""", ] for k in ignore_keys: state_dict.pop(_a , _a ) def lowerCamelCase_ ( _a : Optional[int] ): '''simple docstring''' UpperCAmelCase_ , UpperCAmelCase_ : Dict = emb.weight.shape UpperCAmelCase_ : List[Any] = nn.Linear(_a , _a , bias=_a ) UpperCAmelCase_ : Optional[int] = emb.weight.data return lin_layer def lowerCamelCase_ ( _a : Union[str, Any] , _a : Dict=None ): '''simple docstring''' UpperCAmelCase_ : Optional[int] = {} for old_key in state_dict.keys(): UpperCAmelCase_ : Union[str, Any] = old_key if "moe_layer.experts." in key: if expert_idx is not None: UpperCAmelCase_ : Any = key.replace("""moe_layer.experts.0""" , F'''ffn.experts.expert_{expert_idx}''' ) else: UpperCAmelCase_ : List[str] = key.replace("""moe_layer.experts.""" , """ffn.experts.expert_""" ) if "gate" in key: UpperCAmelCase_ : List[str] = key.replace(""".moe_layer.gate.wg""" , """.ffn.router.classifier""" ) if "fc2" and "experts" not in key: UpperCAmelCase_ : List[Any] = key.replace(""".fc2.""" , """.ffn.fc2.""" ) if "fc1" and "experts" not in key: UpperCAmelCase_ : Optional[int] = key.replace(""".fc1.""" , """.ffn.fc1.""" ) if ".encoder_attn." in key: UpperCAmelCase_ : Union[str, Any] = key.replace(""".encoder_attn.""" , """.cross_attention.""" ) if "encoder_attn_layer_norm" in key: UpperCAmelCase_ : List[str] = key.replace("""encoder_attn_layer_norm""" , """cross_attention_layer_norm""" ) if "final_layer_norm" in key: UpperCAmelCase_ : Optional[int] = key.replace("""final_layer_norm""" , """ff_layer_norm""" ) UpperCAmelCase_ : Dict = state_dict[old_key] return new_dict def lowerCamelCase_ ( _a : Any , _a : str , _a : Dict , _a : Any , _a : str = WEIGHTS_NAME ): '''simple docstring''' UpperCAmelCase_ : List[Any] = [] UpperCAmelCase_ : Optional[Any] = 0 os.makedirs(_a , exist_ok=_a ) for expert in range(_a ): UpperCAmelCase_ : List[Any] = switch_checkpoint_path + F'''-rank-{expert}.pt''' if os.path.isfile(_a ): UpperCAmelCase_ : Union[str, Any] = torch.load(_a )["""model"""] remove_ignore_keys_(_a ) UpperCAmelCase_ : Any = rename_fairseq_keys(_a , _a ) UpperCAmelCase_ : int = os.path.join( _a , weights_name.replace(""".bin""" , F'''-{len(_a )+1:05d}-of-???.bin''' ) ) torch.save(_a , _a ) sharded_state_dicts.append(expert_state.keys() ) total_size += sum([value.numel() for key, value in expert_state.items()] ) * dtype_byte_size( expert_state[list(_a )[0]].dtype ) # Add the last block UpperCAmelCase_ : Any = os.path.join(_a , weights_name.replace(""".bin""" , F'''-{len(_a )+1:05d}-of-???.bin''' ) ) UpperCAmelCase_ : Any = torch.load(switch_checkpoint_path + """-shared.pt""" )["""model"""] remove_ignore_keys_(_a ) UpperCAmelCase_ : str = rename_fairseq_keys(_a , _a ) UpperCAmelCase_ : Optional[int] = shared_weights["""decoder.embed_tokens.weight"""] sharded_state_dicts.append(shared_weights.keys() ) # If we only have the shared weights (dummy model/experts saved on the same file) if len(_a ) == 1: UpperCAmelCase_ : Optional[int] = os.path.join(_a , _a ) torch.save(_a , _a ) return {weights_name: sharded_state_dicts[0]}, None else: torch.save(_a , _a ) # Otherwise, let's build the index UpperCAmelCase_ : Union[str, Any] = {} for idx, shard in enumerate(_a ): UpperCAmelCase_ : int = weights_name.replace(""".bin""" , F'''-{idx+1:05d}-of-{len(_a ):05d}.bin''' ) UpperCAmelCase_ : Optional[int] = os.path.join(_a , weights_name.replace(""".bin""" , F'''-{idx+1:05d}-of-???.bin''' ) ) os.rename(_a , os.path.join(_a , _a ) ) for key in shard: UpperCAmelCase_ : Dict = shard_file # Add the metadata UpperCAmelCase_ : Optional[int] = {"""total_size""": total_size} UpperCAmelCase_ : List[str] = {"""metadata""": metadata, """weight_map""": weight_map} with open(os.path.join(_a , _a ) , """w""" , encoding="""utf-8""" ) as f: UpperCAmelCase_ : Any = json.dumps(_a , indent=2 , sort_keys=_a ) + """\n""" f.write(_a ) return metadata, index if __name__ == "__main__": UpperCamelCase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--nllb_moe_checkpoint_path''', default='''/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000''', type=str, required=False, help='''Path to a directory containing a folder per layer. Follows the original Google format.''', ) parser.add_argument('''--dtype''', default='''float32''', type=str, required=False, help='''dtype of the saved model''') parser.add_argument( '''--pytorch_dump_folder_path''', default='''/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b''', type=str, required=False, help='''Path to the output pytorch model.''', ) UpperCamelCase_ = parser.parse_args() UpperCamelCase_ ,UpperCamelCase_ = shard_on_the_fly( args.nllb_moe_checkpoint_path, args.pytorch_dump_folder_path, 128, args.dtype, ) UpperCamelCase_ = NllbMoeConfig.from_pretrained( '''facebook/nllb-200-3.3B''', encoder_sparse_step=4, decoder_sparse_step=4, num_experts=128 ) config.save_pretrained(args.pytorch_dump_folder_path) UpperCamelCase_ = NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path) print('''Done''') model.save_pretrained(args.pytorch_dump_folder_path)
345
import flax.linen as nn import jax.numpy as jnp from .attention_flax import FlaxTransformeraDModel from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD class _snake_case ( nn.Module ): '''simple docstring''' A__ : int A__ : int A__ : float = 0.0 A__ : int = 1 A__ : int = 1 A__ : bool = True A__ : bool = False A__ : bool = False A__ : bool = False A__ : jnp.dtype = jnp.floataa def A__ ( self: Dict ) -> List[str]: UpperCAmelCase_ : Optional[int] = [] UpperCAmelCase_ : Optional[int] = [] for i in range(self.num_layers ): UpperCAmelCase_ : List[Any] = self.in_channels if i == 0 else self.out_channels UpperCAmelCase_ : List[Any] = FlaxResnetBlockaD( in_channels=lowerCamelCase_ ,out_channels=self.out_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,) resnets.append(lowerCamelCase_ ) UpperCAmelCase_ : Union[str, Any] = FlaxTransformeraDModel( in_channels=self.out_channels ,n_heads=self.num_attention_heads ,d_head=self.out_channels // self.num_attention_heads ,depth=1 ,use_linear_projection=self.use_linear_projection ,only_cross_attention=self.only_cross_attention ,use_memory_efficient_attention=self.use_memory_efficient_attention ,dtype=self.dtype ,) attentions.append(lowerCamelCase_ ) UpperCAmelCase_ : int = resnets UpperCAmelCase_ : Tuple = attentions if self.add_downsample: UpperCAmelCase_ : List[Any] = FlaxDownsampleaD(self.out_channels ,dtype=self.dtype ) def __call__( self: Optional[Any] ,lowerCamelCase_: Optional[int] ,lowerCamelCase_: str ,lowerCamelCase_: Optional[int] ,lowerCamelCase_: int=True ) -> int: UpperCAmelCase_ : List[Any] = () for resnet, attn in zip(self.resnets ,self.attentions ): UpperCAmelCase_ : str = resnet(lowerCamelCase_ ,lowerCamelCase_ ,deterministic=lowerCamelCase_ ) UpperCAmelCase_ : Union[str, Any] = attn(lowerCamelCase_ ,lowerCamelCase_ ,deterministic=lowerCamelCase_ ) output_states += (hidden_states,) if self.add_downsample: UpperCAmelCase_ : List[Any] = self.downsamplers_a(lowerCamelCase_ ) output_states += (hidden_states,) return hidden_states, output_states class _snake_case ( nn.Module ): '''simple docstring''' A__ : int A__ : int A__ : float = 0.0 A__ : int = 1 A__ : bool = True A__ : jnp.dtype = jnp.floataa def A__ ( self: Dict ) -> int: UpperCAmelCase_ : List[str] = [] for i in range(self.num_layers ): UpperCAmelCase_ : int = self.in_channels if i == 0 else self.out_channels UpperCAmelCase_ : Dict = FlaxResnetBlockaD( in_channels=lowerCamelCase_ ,out_channels=self.out_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,) resnets.append(lowerCamelCase_ ) UpperCAmelCase_ : Union[str, Any] = resnets if self.add_downsample: UpperCAmelCase_ : List[str] = FlaxDownsampleaD(self.out_channels ,dtype=self.dtype ) def __call__( self: Any ,lowerCamelCase_: List[Any] ,lowerCamelCase_: Any ,lowerCamelCase_: List[Any]=True ) -> Any: UpperCAmelCase_ : Union[str, Any] = () for resnet in self.resnets: UpperCAmelCase_ : Tuple = resnet(lowerCamelCase_ ,lowerCamelCase_ ,deterministic=lowerCamelCase_ ) output_states += (hidden_states,) if self.add_downsample: UpperCAmelCase_ : List[str] = self.downsamplers_a(lowerCamelCase_ ) output_states += (hidden_states,) return hidden_states, output_states class _snake_case ( nn.Module ): '''simple docstring''' A__ : int A__ : int A__ : int A__ : float = 0.0 A__ : int = 1 A__ : int = 1 A__ : bool = True A__ : bool = False A__ : bool = False A__ : bool = False A__ : jnp.dtype = jnp.floataa def A__ ( self: str ) -> Any: UpperCAmelCase_ : Dict = [] UpperCAmelCase_ : List[str] = [] for i in range(self.num_layers ): UpperCAmelCase_ : int = self.in_channels if (i == self.num_layers - 1) else self.out_channels UpperCAmelCase_ : int = self.prev_output_channel if i == 0 else self.out_channels UpperCAmelCase_ : Optional[Any] = FlaxResnetBlockaD( in_channels=resnet_in_channels + res_skip_channels ,out_channels=self.out_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,) resnets.append(lowerCamelCase_ ) UpperCAmelCase_ : int = FlaxTransformeraDModel( in_channels=self.out_channels ,n_heads=self.num_attention_heads ,d_head=self.out_channels // self.num_attention_heads ,depth=1 ,use_linear_projection=self.use_linear_projection ,only_cross_attention=self.only_cross_attention ,use_memory_efficient_attention=self.use_memory_efficient_attention ,dtype=self.dtype ,) attentions.append(lowerCamelCase_ ) UpperCAmelCase_ : List[str] = resnets UpperCAmelCase_ : Dict = attentions if self.add_upsample: UpperCAmelCase_ : Optional[Any] = FlaxUpsampleaD(self.out_channels ,dtype=self.dtype ) def __call__( self: Optional[int] ,lowerCamelCase_: List[Any] ,lowerCamelCase_: int ,lowerCamelCase_: Any ,lowerCamelCase_: str ,lowerCamelCase_: List[str]=True ) -> List[str]: for resnet, attn in zip(self.resnets ,self.attentions ): # pop res hidden states UpperCAmelCase_ : List[str] = res_hidden_states_tuple[-1] UpperCAmelCase_ : Union[str, Any] = res_hidden_states_tuple[:-1] UpperCAmelCase_ : Optional[Any] = jnp.concatenate((hidden_states, res_hidden_states) ,axis=-1 ) UpperCAmelCase_ : Tuple = resnet(lowerCamelCase_ ,lowerCamelCase_ ,deterministic=lowerCamelCase_ ) UpperCAmelCase_ : List[Any] = attn(lowerCamelCase_ ,lowerCamelCase_ ,deterministic=lowerCamelCase_ ) if self.add_upsample: UpperCAmelCase_ : Dict = self.upsamplers_a(lowerCamelCase_ ) return hidden_states class _snake_case ( nn.Module ): '''simple docstring''' A__ : int A__ : int A__ : int A__ : float = 0.0 A__ : int = 1 A__ : bool = True A__ : jnp.dtype = jnp.floataa def A__ ( self: Dict ) -> Dict: UpperCAmelCase_ : Any = [] for i in range(self.num_layers ): UpperCAmelCase_ : str = self.in_channels if (i == self.num_layers - 1) else self.out_channels UpperCAmelCase_ : Optional[int] = self.prev_output_channel if i == 0 else self.out_channels UpperCAmelCase_ : Any = FlaxResnetBlockaD( in_channels=resnet_in_channels + res_skip_channels ,out_channels=self.out_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,) resnets.append(lowerCamelCase_ ) UpperCAmelCase_ : str = resnets if self.add_upsample: UpperCAmelCase_ : Union[str, Any] = FlaxUpsampleaD(self.out_channels ,dtype=self.dtype ) def __call__( self: Dict ,lowerCamelCase_: Dict ,lowerCamelCase_: List[Any] ,lowerCamelCase_: Tuple ,lowerCamelCase_: Any=True ) -> List[str]: for resnet in self.resnets: # pop res hidden states UpperCAmelCase_ : Dict = res_hidden_states_tuple[-1] UpperCAmelCase_ : str = res_hidden_states_tuple[:-1] UpperCAmelCase_ : List[Any] = jnp.concatenate((hidden_states, res_hidden_states) ,axis=-1 ) UpperCAmelCase_ : List[str] = resnet(lowerCamelCase_ ,lowerCamelCase_ ,deterministic=lowerCamelCase_ ) if self.add_upsample: UpperCAmelCase_ : Optional[Any] = self.upsamplers_a(lowerCamelCase_ ) return hidden_states class _snake_case ( nn.Module ): '''simple docstring''' A__ : int A__ : float = 0.0 A__ : int = 1 A__ : int = 1 A__ : bool = False A__ : bool = False A__ : jnp.dtype = jnp.floataa def A__ ( self: Dict ) -> List[str]: # there is always at least one resnet UpperCAmelCase_ : List[Any] = [ FlaxResnetBlockaD( in_channels=self.in_channels ,out_channels=self.in_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,) ] UpperCAmelCase_ : Any = [] for _ in range(self.num_layers ): UpperCAmelCase_ : Optional[Any] = FlaxTransformeraDModel( in_channels=self.in_channels ,n_heads=self.num_attention_heads ,d_head=self.in_channels // self.num_attention_heads ,depth=1 ,use_linear_projection=self.use_linear_projection ,use_memory_efficient_attention=self.use_memory_efficient_attention ,dtype=self.dtype ,) attentions.append(lowerCamelCase_ ) UpperCAmelCase_ : Any = FlaxResnetBlockaD( in_channels=self.in_channels ,out_channels=self.in_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,) resnets.append(lowerCamelCase_ ) UpperCAmelCase_ : Dict = resnets UpperCAmelCase_ : Any = attentions def __call__( self: str ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: str ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: Union[str, Any]=True ) -> List[Any]: UpperCAmelCase_ : List[Any] = self.resnets[0](lowerCamelCase_ ,lowerCamelCase_ ) for attn, resnet in zip(self.attentions ,self.resnets[1:] ): UpperCAmelCase_ : Optional[Any] = attn(lowerCamelCase_ ,lowerCamelCase_ ,deterministic=lowerCamelCase_ ) UpperCAmelCase_ : Union[str, Any] = resnet(lowerCamelCase_ ,lowerCamelCase_ ,deterministic=lowerCamelCase_ ) return hidden_states
345
1
def lowerCamelCase_ ( _a : int ): '''simple docstring''' UpperCAmelCase_ : List[str] = [1] UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : int = 0, 0, 0 UpperCAmelCase_ : str = ugly_nums[ia] * 2 UpperCAmelCase_ : List[Any] = ugly_nums[ia] * 3 UpperCAmelCase_ : List[str] = ugly_nums[ia] * 5 for _ in range(1 , _a ): UpperCAmelCase_ : List[str] = min(_a , _a , _a ) ugly_nums.append(_a ) if next_num == next_a: ia += 1 UpperCAmelCase_ : Optional[Any] = ugly_nums[ia] * 2 if next_num == next_a: ia += 1 UpperCAmelCase_ : List[str] = ugly_nums[ia] * 3 if next_num == next_a: ia += 1 UpperCAmelCase_ : Optional[Any] = ugly_nums[ia] * 5 return ugly_nums[-1] if __name__ == "__main__": from doctest import testmod testmod(verbose=True) print(F"{ugly_numbers(200) = }")
345
import pickle import numpy as np from matplotlib import pyplot as plt class _snake_case : '''simple docstring''' def __init__( self: Any ,lowerCamelCase_: Dict ,lowerCamelCase_: Tuple ,lowerCamelCase_: Dict ,lowerCamelCase_: Tuple ,lowerCamelCase_: Any ,lowerCamelCase_: Tuple=0.2 ,lowerCamelCase_: Union[str, Any]=0.2 ) -> List[str]: UpperCAmelCase_ : List[Any] = bp_numa UpperCAmelCase_ : str = bp_numa UpperCAmelCase_ : List[Any] = bp_numa UpperCAmelCase_ : Optional[int] = conva_get[:2] UpperCAmelCase_ : List[Any] = conva_get[2] UpperCAmelCase_ : str = size_pa UpperCAmelCase_ : Optional[int] = rate_w UpperCAmelCase_ : Dict = rate_t UpperCAmelCase_ : List[Any] = [ np.mat(-1 * np.random.rand(self.conva[0] ,self.conva[0] ) + 0.5 ) for i in range(self.conva[1] ) ] UpperCAmelCase_ : int = np.mat(-1 * np.random.rand(self.num_bpa ,self.num_bpa ) + 0.5 ) UpperCAmelCase_ : int = np.mat(-1 * np.random.rand(self.num_bpa ,self.num_bpa ) + 0.5 ) UpperCAmelCase_ : Dict = -2 * np.random.rand(self.conva[1] ) + 1 UpperCAmelCase_ : str = -2 * np.random.rand(self.num_bpa ) + 1 UpperCAmelCase_ : Union[str, Any] = -2 * np.random.rand(self.num_bpa ) + 1 def A__ ( self: str ,lowerCamelCase_: Optional[Any] ) -> Tuple: # save model dict with pickle UpperCAmelCase_ : Dict = { """num_bp1""": self.num_bpa, """num_bp2""": self.num_bpa, """num_bp3""": self.num_bpa, """conv1""": self.conva, """step_conv1""": self.step_conva, """size_pooling1""": self.size_poolinga, """rate_weight""": self.rate_weight, """rate_thre""": self.rate_thre, """w_conv1""": self.w_conva, """wkj""": self.wkj, """vji""": self.vji, """thre_conv1""": self.thre_conva, """thre_bp2""": self.thre_bpa, """thre_bp3""": self.thre_bpa, } with open(lowerCamelCase_ ,"""wb""" ) as f: pickle.dump(lowerCamelCase_ ,lowerCamelCase_ ) print(F'''Model saved: {save_path}''' ) @classmethod def A__ ( cls: List[str] ,lowerCamelCase_: str ) -> List[str]: # read saved model with open(lowerCamelCase_ ,"""rb""" ) as f: UpperCAmelCase_ : Any = pickle.load(lowerCamelCase_ ) # noqa: S301 UpperCAmelCase_ : Union[str, Any] = model_dic.get("""conv1""" ) conv_get.append(model_dic.get("""step_conv1""" ) ) UpperCAmelCase_ : List[str] = model_dic.get("""size_pooling1""" ) UpperCAmelCase_ : Tuple = model_dic.get("""num_bp1""" ) UpperCAmelCase_ : Optional[Any] = model_dic.get("""num_bp2""" ) UpperCAmelCase_ : List[str] = model_dic.get("""num_bp3""" ) UpperCAmelCase_ : List[Any] = model_dic.get("""rate_weight""" ) UpperCAmelCase_ : Dict = model_dic.get("""rate_thre""" ) # create model instance UpperCAmelCase_ : List[Any] = CNN(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ) # modify model parameter UpperCAmelCase_ : Any = model_dic.get("""w_conv1""" ) UpperCAmelCase_ : int = model_dic.get("""wkj""" ) UpperCAmelCase_ : int = model_dic.get("""vji""" ) UpperCAmelCase_ : Optional[int] = model_dic.get("""thre_conv1""" ) UpperCAmelCase_ : List[str] = model_dic.get("""thre_bp2""" ) UpperCAmelCase_ : Dict = model_dic.get("""thre_bp3""" ) return conv_ins def A__ ( self: List[Any] ,lowerCamelCase_: Union[str, Any] ) -> Tuple: return 1 / (1 + np.exp(-1 * x )) def A__ ( self: Union[str, Any] ,lowerCamelCase_: Union[str, Any] ) -> Optional[Any]: return round(lowerCamelCase_ ,3 ) def A__ ( self: Tuple ,lowerCamelCase_: Any ,lowerCamelCase_: List[str] ,lowerCamelCase_: str ,lowerCamelCase_: Any ,lowerCamelCase_: Union[str, Any] ) -> Any: # convolution process UpperCAmelCase_ : Optional[Any] = convs[0] UpperCAmelCase_ : int = convs[1] UpperCAmelCase_ : int = np.shape(lowerCamelCase_ )[0] # get the data slice of original image data, data_focus UpperCAmelCase_ : Dict = [] for i_focus in range(0 ,size_data - size_conv + 1 ,lowerCamelCase_ ): for j_focus in range(0 ,size_data - size_conv + 1 ,lowerCamelCase_ ): UpperCAmelCase_ : Union[str, Any] = data[ i_focus : i_focus + size_conv, j_focus : j_focus + size_conv ] data_focus.append(lowerCamelCase_ ) # calculate the feature map of every single kernel, and saved as list of matrix UpperCAmelCase_ : Any = [] UpperCAmelCase_ : Tuple = int((size_data - size_conv) / conv_step + 1 ) for i_map in range(lowerCamelCase_ ): UpperCAmelCase_ : Optional[int] = [] for i_focus in range(len(lowerCamelCase_ ) ): UpperCAmelCase_ : int = ( np.sum(np.multiply(data_focus[i_focus] ,w_convs[i_map] ) ) - thre_convs[i_map] ) featuremap.append(self.sig(lowerCamelCase_ ) ) UpperCAmelCase_ : Union[str, Any] = np.asmatrix(lowerCamelCase_ ).reshape( lowerCamelCase_ ,lowerCamelCase_ ) data_featuremap.append(lowerCamelCase_ ) # expanding the data slice to One dimenssion UpperCAmelCase_ : Optional[Any] = [] for each_focus in data_focus: focusa_list.extend(self.Expand_Mat(lowerCamelCase_ ) ) UpperCAmelCase_ : Optional[int] = np.asarray(lowerCamelCase_ ) return focus_list, data_featuremap def A__ ( self: Tuple ,lowerCamelCase_: Optional[int] ,lowerCamelCase_: Tuple ,lowerCamelCase_: Optional[Any]="average_pool" ) -> List[Any]: # pooling process UpperCAmelCase_ : Optional[Any] = len(featuremaps[0] ) UpperCAmelCase_ : Any = int(size_map / size_pooling ) UpperCAmelCase_ : Optional[int] = [] for i_map in range(len(lowerCamelCase_ ) ): UpperCAmelCase_ : Any = featuremaps[i_map] UpperCAmelCase_ : Tuple = [] for i_focus in range(0 ,lowerCamelCase_ ,lowerCamelCase_ ): for j_focus in range(0 ,lowerCamelCase_ ,lowerCamelCase_ ): UpperCAmelCase_ : str = feature_map[ i_focus : i_focus + size_pooling, j_focus : j_focus + size_pooling, ] if pooling_type == "average_pool": # average pooling map_pooled.append(np.average(lowerCamelCase_ ) ) elif pooling_type == "max_pooling": # max pooling map_pooled.append(np.max(lowerCamelCase_ ) ) UpperCAmelCase_ : int = np.asmatrix(lowerCamelCase_ ).reshape(lowerCamelCase_ ,lowerCamelCase_ ) featuremap_pooled.append(lowerCamelCase_ ) return featuremap_pooled def A__ ( self: Union[str, Any] ,lowerCamelCase_: Tuple ) -> Optional[int]: # expanding three dimension data to one dimension list UpperCAmelCase_ : List[Any] = [] for i in range(len(lowerCamelCase_ ) ): UpperCAmelCase_ : Tuple = np.shape(data[i] ) UpperCAmelCase_ : Optional[int] = data[i].reshape(1 ,shapes[0] * shapes[1] ) UpperCAmelCase_ : Optional[int] = data_listed.getA().tolist()[0] data_expanded.extend(lowerCamelCase_ ) UpperCAmelCase_ : int = np.asarray(lowerCamelCase_ ) return data_expanded def A__ ( self: Optional[Any] ,lowerCamelCase_: Optional[int] ) -> Union[str, Any]: # expanding matrix to one dimension list UpperCAmelCase_ : List[Any] = np.asarray(lowerCamelCase_ ) UpperCAmelCase_ : str = np.shape(lowerCamelCase_ ) UpperCAmelCase_ : Dict = data_mat.reshape(1 ,shapes[0] * shapes[1] ) return data_expanded def A__ ( self: str ,lowerCamelCase_: Dict ,lowerCamelCase_: int ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: Any ) -> Union[str, Any]: UpperCAmelCase_ : Any = [] UpperCAmelCase_ : Tuple = 0 for i_map in range(lowerCamelCase_ ): UpperCAmelCase_ : Optional[Any] = np.ones((size_map, size_map) ) for i in range(0 ,lowerCamelCase_ ,lowerCamelCase_ ): for j in range(0 ,lowerCamelCase_ ,lowerCamelCase_ ): UpperCAmelCase_ : Any = pd_pool[ i_pool ] UpperCAmelCase_ : List[str] = i_pool + 1 UpperCAmelCase_ : Optional[Any] = np.multiply( lowerCamelCase_ ,np.multiply(out_map[i_map] ,(1 - out_map[i_map]) ) ) pd_all.append(lowerCamelCase_ ) return pd_all def A__ ( self: str ,lowerCamelCase_: int ,lowerCamelCase_: int ,lowerCamelCase_: List[Any] ,lowerCamelCase_: Any ,lowerCamelCase_: List[str] ,lowerCamelCase_: Any=bool ) -> Optional[int]: # model traning print("""----------------------Start Training-------------------------""" ) print((""" - - Shape: Train_Data """, np.shape(lowerCamelCase_ )) ) print((""" - - Shape: Teach_Data """, np.shape(lowerCamelCase_ )) ) UpperCAmelCase_ : str = 0 UpperCAmelCase_ : Tuple = [] UpperCAmelCase_ : Any = 10000 while rp < n_repeat and mse >= error_accuracy: UpperCAmelCase_ : List[str] = 0 print(F'''-------------Learning Time {rp}--------------''' ) for p in range(len(lowerCamelCase_ ) ): # print('------------Learning Image: %d--------------'%p) UpperCAmelCase_ : str = np.asmatrix(datas_train[p] ) UpperCAmelCase_ : Optional[Any] = np.asarray(datas_teach[p] ) UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = self.convolute( lowerCamelCase_ ,self.conva ,self.w_conva ,self.thre_conva ,conv_step=self.step_conva ,) UpperCAmelCase_ : List[Any] = self.pooling(lowerCamelCase_ ,self.size_poolinga ) UpperCAmelCase_ : int = np.shape(lowerCamelCase_ ) UpperCAmelCase_ : Dict = self._expand(lowerCamelCase_ ) UpperCAmelCase_ : Union[str, Any] = data_bp_input UpperCAmelCase_ : Optional[Any] = np.dot(lowerCamelCase_ ,self.vji.T ) - self.thre_bpa UpperCAmelCase_ : int = self.sig(lowerCamelCase_ ) UpperCAmelCase_ : Union[str, Any] = np.dot(lowerCamelCase_ ,self.wkj.T ) - self.thre_bpa UpperCAmelCase_ : Optional[Any] = self.sig(lowerCamelCase_ ) # --------------Model Leaning ------------------------ # calculate error and gradient--------------- UpperCAmelCase_ : List[str] = np.multiply( (data_teach - bp_outa) ,np.multiply(lowerCamelCase_ ,(1 - bp_outa) ) ) UpperCAmelCase_ : List[Any] = np.multiply( np.dot(lowerCamelCase_ ,self.wkj ) ,np.multiply(lowerCamelCase_ ,(1 - bp_outa) ) ) UpperCAmelCase_ : Any = np.dot(lowerCamelCase_ ,self.vji ) UpperCAmelCase_ : Tuple = pd_i_all / (self.size_poolinga * self.size_poolinga) UpperCAmelCase_ : List[str] = pd_conva_pooled.T.getA().tolist() UpperCAmelCase_ : str = self._calculate_gradient_from_pool( lowerCamelCase_ ,lowerCamelCase_ ,shape_featuremapa[0] ,shape_featuremapa[1] ,self.size_poolinga ,) # weight and threshold learning process--------- # convolution layer for k_conv in range(self.conva[1] ): UpperCAmelCase_ : List[str] = self._expand_mat(pd_conva_all[k_conv] ) UpperCAmelCase_ : Optional[Any] = self.rate_weight * np.dot(lowerCamelCase_ ,lowerCamelCase_ ) UpperCAmelCase_ : int = self.w_conva[k_conv] + delta_w.reshape( (self.conva[0], self.conva[0]) ) UpperCAmelCase_ : str = ( self.thre_conva[k_conv] - np.sum(pd_conva_all[k_conv] ) * self.rate_thre ) # all connected layer UpperCAmelCase_ : int = self.wkj + pd_k_all.T * bp_outa * self.rate_weight UpperCAmelCase_ : Tuple = self.vji + pd_j_all.T * bp_outa * self.rate_weight UpperCAmelCase_ : int = self.thre_bpa - pd_k_all * self.rate_thre UpperCAmelCase_ : str = self.thre_bpa - pd_j_all * self.rate_thre # calculate the sum error of all single image UpperCAmelCase_ : int = np.sum(abs(data_teach - bp_outa ) ) error_count += errors # print(' ----Teach ',data_teach) # print(' ----BP_output ',bp_out3) UpperCAmelCase_ : int = rp + 1 UpperCAmelCase_ : Any = error_count / patterns all_mse.append(lowerCamelCase_ ) def draw_error(): UpperCAmelCase_ : Any = [error_accuracy for i in range(int(n_repeat * 1.2 ) )] plt.plot(lowerCamelCase_ ,"""+-""" ) plt.plot(lowerCamelCase_ ,"""r--""" ) plt.xlabel("""Learning Times""" ) plt.ylabel("""All_mse""" ) plt.grid(lowerCamelCase_ ,alpha=0.5 ) plt.show() print("""------------------Training Complished---------------------""" ) print((""" - - Training epoch: """, rp, F''' - - Mse: {mse:.6f}''') ) if draw_e: draw_error() return mse def A__ ( self: Optional[int] ,lowerCamelCase_: Any ) -> Tuple: # model predict UpperCAmelCase_ : Union[str, Any] = [] print("""-------------------Start Testing-------------------------""" ) print((""" - - Shape: Test_Data """, np.shape(lowerCamelCase_ )) ) for p in range(len(lowerCamelCase_ ) ): UpperCAmelCase_ : int = np.asmatrix(datas_test[p] ) UpperCAmelCase_ , UpperCAmelCase_ : List[str] = self.convolute( lowerCamelCase_ ,self.conva ,self.w_conva ,self.thre_conva ,conv_step=self.step_conva ,) UpperCAmelCase_ : Optional[Any] = self.pooling(lowerCamelCase_ ,self.size_poolinga ) UpperCAmelCase_ : str = self._expand(lowerCamelCase_ ) UpperCAmelCase_ : str = data_bp_input UpperCAmelCase_ : Union[str, Any] = bp_outa * self.vji.T - self.thre_bpa UpperCAmelCase_ : Optional[int] = self.sig(lowerCamelCase_ ) UpperCAmelCase_ : Tuple = bp_outa * self.wkj.T - self.thre_bpa UpperCAmelCase_ : List[Any] = self.sig(lowerCamelCase_ ) produce_out.extend(bp_outa.getA().tolist() ) UpperCAmelCase_ : int = [list(map(self.do_round ,lowerCamelCase_ ) ) for each in produce_out] return np.asarray(lowerCamelCase_ ) def A__ ( self: Optional[Any] ,lowerCamelCase_: Dict ) -> Tuple: # return the data of image after convoluting process so we can check it out UpperCAmelCase_ : Optional[int] = np.asmatrix(lowerCamelCase_ ) UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = self.convolute( lowerCamelCase_ ,self.conva ,self.w_conva ,self.thre_conva ,conv_step=self.step_conva ,) UpperCAmelCase_ : Dict = self.pooling(lowerCamelCase_ ,self.size_poolinga ) return data_conveda, data_pooleda if __name__ == "__main__": pass
345
1
import math def lowerCamelCase_ ( _a : int ): '''simple docstring''' if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(_a ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def lowerCamelCase_ ( _a : int = 1_0001 ): '''simple docstring''' try: UpperCAmelCase_ : Dict = int(_a ) except (TypeError, ValueError): raise TypeError("""Parameter nth must be int or castable to int.""" ) from None if nth <= 0: raise ValueError("""Parameter nth must be greater than or equal to one.""" ) UpperCAmelCase_ : list[int] = [] UpperCAmelCase_ : Union[str, Any] = 2 while len(_a ) < nth: if is_prime(_a ): primes.append(_a ) num += 1 else: num += 1 return primes[len(_a ) - 1] if __name__ == "__main__": print(F"{solution() = }")
345
import json import os import unittest from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer from ...test_tokenization_common import TokenizerTesterMixin class _snake_case ( __snake_case , unittest.TestCase ): '''simple docstring''' A__ : Optional[Any] = CTRLTokenizer A__ : Optional[Any] = False A__ : str = False def A__ ( self: Optional[int] ) -> List[Any]: super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt UpperCAmelCase_ : Dict = ["""adapt""", """re@@""", """a@@""", """apt""", """c@@""", """t""", """<unk>"""] UpperCAmelCase_ : Union[str, Any] = dict(zip(lowerCamelCase_ ,range(len(lowerCamelCase_ ) ) ) ) UpperCAmelCase_ : List[Any] = ["""#version: 0.2""", """a p""", """ap t</w>""", """r e""", """a d""", """ad apt</w>""", """"""] UpperCAmelCase_ : Optional[Any] = {"""unk_token""": """<unk>"""} UpperCAmelCase_ : Union[str, Any] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""vocab_file"""] ) UpperCAmelCase_ : Optional[Any] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""merges_file"""] ) with open(self.vocab_file ,"""w""" ,encoding="""utf-8""" ) as fp: fp.write(json.dumps(lowerCamelCase_ ) + """\n""" ) with open(self.merges_file ,"""w""" ,encoding="""utf-8""" ) as fp: fp.write("""\n""".join(lowerCamelCase_ ) ) def A__ ( self: Optional[int] ,**lowerCamelCase_: Any ) -> str: kwargs.update(self.special_tokens_map ) return CTRLTokenizer.from_pretrained(self.tmpdirname ,**lowerCamelCase_ ) def A__ ( self: int ,lowerCamelCase_: int ) -> str: UpperCAmelCase_ : List[str] = """adapt react readapt apt""" UpperCAmelCase_ : List[Any] = """adapt react readapt apt""" return input_text, output_text def A__ ( self: Union[str, Any] ) -> Optional[int]: UpperCAmelCase_ : Union[str, Any] = CTRLTokenizer(self.vocab_file ,self.merges_file ,**self.special_tokens_map ) UpperCAmelCase_ : List[Any] = """adapt react readapt apt""" UpperCAmelCase_ : Optional[int] = """adapt re@@ a@@ c@@ t re@@ adapt apt""".split() UpperCAmelCase_ : Tuple = tokenizer.tokenize(lowerCamelCase_ ) self.assertListEqual(lowerCamelCase_ ,lowerCamelCase_ ) UpperCAmelCase_ : Union[str, Any] = tokens + [tokenizer.unk_token] UpperCAmelCase_ : List[str] = [0, 1, 2, 4, 5, 1, 0, 3, 6] self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase_ ) ,lowerCamelCase_ )
345
1
import argparse import torch from transformers import LxmertConfig, LxmertForPreTraining, load_tf_weights_in_lxmert from transformers.utils import logging logging.set_verbosity_info() def lowerCamelCase_ ( _a : Union[str, Any] , _a : Any , _a : List[Any] ): '''simple docstring''' UpperCAmelCase_ : Dict = LxmertConfig.from_json_file(_a ) print(F'''Building PyTorch model from configuration: {config}''' ) UpperCAmelCase_ : List[Any] = LxmertForPreTraining(_a ) # Load weights from tf checkpoint load_tf_weights_in_lxmert(_a , _a , _a ) # Save pytorch-model print(F'''Save PyTorch model to {pytorch_dump_path}''' ) torch.save(model.state_dict() , _a ) if __name__ == "__main__": UpperCamelCase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.''' ) parser.add_argument( '''--config_file''', default=None, type=str, required=True, help='''The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.''', ) parser.add_argument( '''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) UpperCamelCase_ = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
345
from __future__ import annotations from typing import Dict from ...configuration_utils import PretrainedConfig UpperCamelCase_ = { '''susnato/ernie-m-base_pytorch''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/config.json''', '''susnato/ernie-m-large_pytorch''': '''https://huggingface.co/susnato/ernie-m-large_pytorch/blob/main/config.json''', } class _snake_case ( __snake_case ): '''simple docstring''' A__ : Union[str, Any] = "ernie_m" A__ : Dict[str, str] = {"dropout": "classifier_dropout", "num_classes": "num_labels"} def __init__( self: str ,lowerCamelCase_: int = 250002 ,lowerCamelCase_: int = 768 ,lowerCamelCase_: int = 12 ,lowerCamelCase_: int = 12 ,lowerCamelCase_: int = 3072 ,lowerCamelCase_: str = "gelu" ,lowerCamelCase_: float = 0.1 ,lowerCamelCase_: float = 0.1 ,lowerCamelCase_: int = 514 ,lowerCamelCase_: float = 0.0_2 ,lowerCamelCase_: int = 1 ,lowerCamelCase_: float = 1e-05 ,lowerCamelCase_: Any=None ,lowerCamelCase_: List[Any]=False ,lowerCamelCase_: Tuple=0.0 ,**lowerCamelCase_: Optional[int] ,) -> Optional[Any]: super().__init__(pad_token_id=lowerCamelCase_ ,**lowerCamelCase_ ) UpperCAmelCase_ : Optional[Any] = vocab_size UpperCAmelCase_ : Any = hidden_size UpperCAmelCase_ : Optional[Any] = num_hidden_layers UpperCAmelCase_ : Union[str, Any] = num_attention_heads UpperCAmelCase_ : List[Any] = intermediate_size UpperCAmelCase_ : List[Any] = hidden_act UpperCAmelCase_ : Any = hidden_dropout_prob UpperCAmelCase_ : List[Any] = attention_probs_dropout_prob UpperCAmelCase_ : str = max_position_embeddings UpperCAmelCase_ : Union[str, Any] = initializer_range UpperCAmelCase_ : Union[str, Any] = layer_norm_eps UpperCAmelCase_ : List[Any] = classifier_dropout UpperCAmelCase_ : str = is_decoder UpperCAmelCase_ : List[str] = act_dropout
345
1
from itertools import product def lowerCamelCase_ ( _a : int , _a : int ): '''simple docstring''' UpperCAmelCase_ : List[Any] = sides_number UpperCAmelCase_ : int = max_face_number * dice_number UpperCAmelCase_ : str = [0] * (max_total + 1) UpperCAmelCase_ : Tuple = 1 UpperCAmelCase_ : Dict = range(_a , max_face_number + 1 ) for dice_numbers in product(_a , repeat=_a ): UpperCAmelCase_ : str = sum(_a ) totals_frequencies[total] += 1 return totals_frequencies def lowerCamelCase_ ( ): '''simple docstring''' UpperCAmelCase_ : Any = total_frequency_distribution( sides_number=4 , dice_number=9 ) UpperCAmelCase_ : List[Any] = total_frequency_distribution( sides_number=6 , dice_number=6 ) UpperCAmelCase_ : Union[str, Any] = 0 UpperCAmelCase_ : str = 9 UpperCAmelCase_ : List[str] = 4 * 9 UpperCAmelCase_ : List[str] = 6 for peter_total in range(_a , max_peter_total + 1 ): peter_wins_count += peter_totals_frequencies[peter_total] * sum( colin_totals_frequencies[min_colin_total:peter_total] ) UpperCAmelCase_ : Union[str, Any] = (4**9) * (6**6) UpperCAmelCase_ : Optional[Any] = peter_wins_count / total_games_number UpperCAmelCase_ : Optional[int] = round(_a , ndigits=7 ) return rounded_peter_win_probability if __name__ == "__main__": print(F"{solution() = }")
345
import logging import os from dataclasses import dataclass, field from functools import partial from pathlib import Path from tempfile import TemporaryDirectory from typing import List, Optional import faiss import torch from datasets import Features, Sequence, Value, load_dataset from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser UpperCamelCase_ = logging.getLogger(__name__) torch.set_grad_enabled(False) UpperCamelCase_ = '''cuda''' if torch.cuda.is_available() else '''cpu''' def lowerCamelCase_ ( _a : str , _a : Any=100 , _a : int=" " ): '''simple docstring''' UpperCAmelCase_ : Optional[int] = text.split(_a ) return [character.join(text[i : i + n] ).strip() for i in range(0 , len(_a ) , _a )] def lowerCamelCase_ ( _a : dict ): '''simple docstring''' UpperCAmelCase_ , UpperCAmelCase_ : Dict = [], [] for title, text in zip(documents["""title"""] , documents["""text"""] ): if text is not None: for passage in split_text(_a ): titles.append(title if title is not None else """""" ) texts.append(_a ) return {"title": titles, "text": texts} def lowerCamelCase_ ( _a : dict , _a : DPRContextEncoder , _a : DPRContextEncoderTokenizerFast ): '''simple docstring''' UpperCAmelCase_ : List[str] = ctx_tokenizer( documents["""title"""] , documents["""text"""] , truncation=_a , padding="""longest""" , return_tensors="""pt""" )["""input_ids"""] UpperCAmelCase_ : Tuple = ctx_encoder(input_ids.to(device=_a ) , return_dict=_a ).pooler_output return {"embeddings": embeddings.detach().cpu().numpy()} def lowerCamelCase_ ( _a : "RagExampleArguments" , _a : "ProcessingArguments" , _a : "IndexHnswArguments" , ): '''simple docstring''' logger.info("""Step 1 - Create the dataset""" ) ###################################### # The dataset needed for RAG must have three columns: # - title (string): title of the document # - text (string): text of a passage of the document # - embeddings (array of dimension d): DPR representation of the passage # Let's say you have documents in tab-separated csv files with columns "title" and "text" assert os.path.isfile(rag_example_args.csv_path ), "Please provide a valid path to a csv file" # You can load a Dataset object this way UpperCAmelCase_ : Optional[int] = load_dataset( """csv""" , data_files=[rag_example_args.csv_path] , split="""train""" , delimiter="""\t""" , column_names=["""title""", """text"""] ) # More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files # Then split the documents into passages of 100 words UpperCAmelCase_ : Tuple = dataset.map(_a , batched=_a , num_proc=processing_args.num_proc ) # And compute the embeddings UpperCAmelCase_ : List[str] = DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ).to(device=_a ) UpperCAmelCase_ : Dict = DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ) UpperCAmelCase_ : Any = Features( {"""text""": Value("""string""" ), """title""": Value("""string""" ), """embeddings""": Sequence(Value("""float32""" ) )} ) # optional, save as float32 instead of float64 to save space UpperCAmelCase_ : List[str] = dataset.map( partial(_a , ctx_encoder=_a , ctx_tokenizer=_a ) , batched=_a , batch_size=processing_args.batch_size , features=_a , ) # And finally save your dataset UpperCAmelCase_ : Union[str, Any] = os.path.join(rag_example_args.output_dir , """my_knowledge_dataset""" ) dataset.save_to_disk(_a ) # from datasets import load_from_disk # dataset = load_from_disk(passages_path) # to reload the dataset ###################################### logger.info("""Step 2 - Index the dataset""" ) ###################################### # Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search UpperCAmelCase_ : Union[str, Any] = faiss.IndexHNSWFlat(index_hnsw_args.d , index_hnsw_args.m , faiss.METRIC_INNER_PRODUCT ) dataset.add_faiss_index("""embeddings""" , custom_index=_a ) # And save the index UpperCAmelCase_ : Optional[Any] = os.path.join(rag_example_args.output_dir , """my_knowledge_dataset_hnsw_index.faiss""" ) dataset.get_index("""embeddings""" ).save(_a ) # dataset.load_faiss_index("embeddings", index_path) # to reload the index @dataclass class _snake_case : '''simple docstring''' A__ : str = field( default=str(Path(__snake_case ).parent / "test_run" / "dummy-kb" / "my_knowledge_dataset.csv" ) , metadata={"help": "Path to a tab-separated csv file with columns 'title' and 'text'"} , ) A__ : Optional[str] = field( default=__snake_case , metadata={"help": "Question that is passed as input to RAG. Default is 'What does Moses' rod turn into ?'."} , ) A__ : str = field( default="facebook/rag-sequence-nq" , metadata={"help": "The RAG model to use. Either 'facebook/rag-sequence-nq' or 'facebook/rag-token-nq'"} , ) A__ : str = field( default="facebook/dpr-ctx_encoder-multiset-base" , metadata={ "help": ( "The DPR context encoder model to use. Either 'facebook/dpr-ctx_encoder-single-nq-base' or" " 'facebook/dpr-ctx_encoder-multiset-base'" ) } , ) A__ : Optional[str] = field( default=str(Path(__snake_case ).parent / "test_run" / "dummy-kb" ) , metadata={"help": "Path to a directory where the dataset passages and the index will be saved"} , ) @dataclass class _snake_case : '''simple docstring''' A__ : Optional[int] = field( default=__snake_case , metadata={ "help": "The number of processes to use to split the documents into passages. Default is single process." } , ) A__ : int = field( default=16 , metadata={ "help": "The batch size to use when computing the passages embeddings using the DPR context encoder." } , ) @dataclass class _snake_case : '''simple docstring''' A__ : int = field( default=768 , metadata={"help": "The dimension of the embeddings to pass to the HNSW Faiss index."} , ) A__ : int = field( default=128 , metadata={ "help": ( "The number of bi-directional links created for every new element during the HNSW index construction." ) } , ) if __name__ == "__main__": logging.basicConfig(level=logging.WARNING) logger.setLevel(logging.INFO) UpperCamelCase_ = HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments)) UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ = parser.parse_args_into_dataclasses() with TemporaryDirectory() as tmp_dir: UpperCamelCase_ = rag_example_args.output_dir or tmp_dir main(rag_example_args, processing_args, index_hnsw_args)
345
1
import io import json import fsspec import pytest from datasets import Dataset, DatasetDict, Features, NamedSplit, Value from datasets.io.json import JsonDatasetReader, JsonDatasetWriter from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases def lowerCamelCase_ ( _a : Union[str, Any] , _a : int ): '''simple docstring''' assert isinstance(_a , _a ) assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("""keep_in_memory""" , [False, True] ) def lowerCamelCase_ ( _a : Optional[int] , _a : Union[str, Any] , _a : Optional[Any] ): '''simple docstring''' UpperCAmelCase_ : Optional[int] = tmp_path / """cache""" UpperCAmelCase_ : List[Any] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): UpperCAmelCase_ : List[Any] = JsonDatasetReader(_a , cache_dir=_a , keep_in_memory=_a ).read() _check_json_dataset(_a , _a ) @pytest.mark.parametrize( """features""" , [ None, {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}, {"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""}, {"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""}, {"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""}, ] , ) def lowerCamelCase_ ( _a : str , _a : int , _a : Dict ): '''simple docstring''' UpperCAmelCase_ : int = tmp_path / """cache""" UpperCAmelCase_ : Tuple = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} UpperCAmelCase_ : int = features.copy() if features else default_expected_features UpperCAmelCase_ : Dict = ( Features({feature: Value(_a ) for feature, dtype in features.items()} ) if features is not None else None ) UpperCAmelCase_ : int = JsonDatasetReader(_a , features=_a , cache_dir=_a ).read() _check_json_dataset(_a , _a ) @pytest.mark.parametrize( """features""" , [ None, {"""col_3""": """float64""", """col_1""": """string""", """col_2""": """int64"""}, ] , ) def lowerCamelCase_ ( _a : Any , _a : Dict , _a : int ): '''simple docstring''' UpperCAmelCase_ : Optional[Any] = tmp_path / """cache""" UpperCAmelCase_ : List[Any] = {"""col_3""": """float64""", """col_1""": """string""", """col_2""": """int64"""} UpperCAmelCase_ : Tuple = features.copy() if features else default_expected_features UpperCAmelCase_ : Optional[int] = ( Features({feature: Value(_a ) for feature, dtype in features.items()} ) if features is not None else None ) UpperCAmelCase_ : List[str] = JsonDatasetReader(_a , features=_a , cache_dir=_a ).read() assert isinstance(_a , _a ) assert dataset.num_rows == 2 assert dataset.num_columns == 3 assert dataset.column_names == ["col_3", "col_1", "col_2"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype def lowerCamelCase_ ( _a : Optional[Any] , _a : Union[str, Any] ): '''simple docstring''' UpperCAmelCase_ : Tuple = {"""col_2""": """int64""", """col_3""": """float64""", """col_1""": """string"""} UpperCAmelCase_ : Optional[int] = features.copy() UpperCAmelCase_ : Optional[Any] = ( Features({feature: Value(_a ) for feature, dtype in features.items()} ) if features is not None else None ) UpperCAmelCase_ : List[str] = tmp_path / """cache""" UpperCAmelCase_ : List[str] = JsonDatasetReader(_a , features=_a , cache_dir=_a ).read() assert isinstance(_a , _a ) assert dataset.num_rows == 2 assert dataset.num_columns == 3 assert dataset.column_names == ["col_2", "col_3", "col_1"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] ) def lowerCamelCase_ ( _a : Optional[Any] , _a : List[str] , _a : List[str] ): '''simple docstring''' UpperCAmelCase_ : Union[str, Any] = tmp_path / """cache""" UpperCAmelCase_ : str = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} UpperCAmelCase_ : Optional[Any] = JsonDatasetReader(_a , cache_dir=_a , split=_a ).read() _check_json_dataset(_a , _a ) assert dataset.split == split if split else "train" @pytest.mark.parametrize("""path_type""" , [str, list] ) def lowerCamelCase_ ( _a : Dict , _a : Optional[int] , _a : Optional[Any] ): '''simple docstring''' if issubclass(_a , _a ): UpperCAmelCase_ : List[str] = jsonl_path elif issubclass(_a , _a ): UpperCAmelCase_ : Tuple = [jsonl_path] UpperCAmelCase_ : int = tmp_path / """cache""" UpperCAmelCase_ : Any = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} UpperCAmelCase_ : Tuple = JsonDatasetReader(_a , cache_dir=_a ).read() _check_json_dataset(_a , _a ) def lowerCamelCase_ ( _a : List[str] , _a : Optional[int] , _a : List[Any]=("train",) ): '''simple docstring''' assert isinstance(_a , _a ) for split in splits: UpperCAmelCase_ : Tuple = dataset_dict[split] assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("""keep_in_memory""" , [False, True] ) def lowerCamelCase_ ( _a : List[Any] , _a : Optional[int] , _a : str ): '''simple docstring''' UpperCAmelCase_ : List[str] = tmp_path / """cache""" UpperCAmelCase_ : Dict = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): UpperCAmelCase_ : int = JsonDatasetReader({"""train""": jsonl_path} , cache_dir=_a , keep_in_memory=_a ).read() _check_json_datasetdict(_a , _a ) @pytest.mark.parametrize( """features""" , [ None, {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}, {"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""}, {"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""}, {"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""}, ] , ) def lowerCamelCase_ ( _a : Tuple , _a : Union[str, Any] , _a : Tuple ): '''simple docstring''' UpperCAmelCase_ : Any = tmp_path / """cache""" UpperCAmelCase_ : List[str] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} UpperCAmelCase_ : Any = features.copy() if features else default_expected_features UpperCAmelCase_ : List[Any] = ( Features({feature: Value(_a ) for feature, dtype in features.items()} ) if features is not None else None ) UpperCAmelCase_ : Any = JsonDatasetReader({"""train""": jsonl_path} , features=_a , cache_dir=_a ).read() _check_json_datasetdict(_a , _a ) @pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] ) def lowerCamelCase_ ( _a : Any , _a : str , _a : str ): '''simple docstring''' if split: UpperCAmelCase_ : Optional[Any] = {split: jsonl_path} else: UpperCAmelCase_ : Tuple = """train""" UpperCAmelCase_ : Union[str, Any] = {"""train""": jsonl_path, """test""": jsonl_path} UpperCAmelCase_ : Tuple = tmp_path / """cache""" UpperCAmelCase_ : Dict = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} UpperCAmelCase_ : Union[str, Any] = JsonDatasetReader(_a , cache_dir=_a ).read() _check_json_datasetdict(_a , _a , splits=list(path.keys() ) ) assert all(dataset[split].split == split for split in path.keys() ) def lowerCamelCase_ ( _a : str ): '''simple docstring''' return json.load(_a ) def lowerCamelCase_ ( _a : Union[str, Any] ): '''simple docstring''' return [json.loads(_a ) for line in buffer] class _snake_case : '''simple docstring''' @pytest.mark.parametrize("""lines, load_json_function""" ,[(True, load_json_lines), (False, load_json)] ) def A__ ( self: Dict ,lowerCamelCase_: List[Any] ,lowerCamelCase_: List[str] ,lowerCamelCase_: Optional[int] ) -> Any: with io.BytesIO() as buffer: JsonDatasetWriter(lowerCamelCase_ ,lowerCamelCase_ ,lines=lowerCamelCase_ ).write() buffer.seek(0 ) UpperCAmelCase_ : List[str] = load_json_function(lowerCamelCase_ ) assert isinstance(lowerCamelCase_ ,lowerCamelCase_ ) assert isinstance(exported_content[0] ,lowerCamelCase_ ) assert len(lowerCamelCase_ ) == 10 @pytest.mark.parametrize( """orient, container, keys, len_at""" ,[ ("""records""", list, {"""tokens""", """labels""", """answers""", """id"""}, None), ("""split""", dict, {"""columns""", """data"""}, """data"""), ("""index""", dict, set("""0123456789""" ), None), ("""columns""", dict, {"""tokens""", """labels""", """answers""", """id"""}, """tokens"""), ("""values""", list, None, None), ("""table""", dict, {"""schema""", """data"""}, """data"""), ] ,) def A__ ( self: List[str] ,lowerCamelCase_: Optional[int] ,lowerCamelCase_: Tuple ,lowerCamelCase_: List[Any] ,lowerCamelCase_: Optional[int] ,lowerCamelCase_: Optional[int] ) -> Tuple: with io.BytesIO() as buffer: JsonDatasetWriter(lowerCamelCase_ ,lowerCamelCase_ ,lines=lowerCamelCase_ ,orient=lowerCamelCase_ ).write() buffer.seek(0 ) UpperCAmelCase_ : Dict = load_json(lowerCamelCase_ ) assert isinstance(lowerCamelCase_ ,lowerCamelCase_ ) if keys: if container is dict: assert exported_content.keys() == keys else: assert exported_content[0].keys() == keys else: assert not hasattr(lowerCamelCase_ ,"""keys""" ) and not hasattr(exported_content[0] ,"""keys""" ) if len_at: assert len(exported_content[len_at] ) == 10 else: assert len(lowerCamelCase_ ) == 10 @pytest.mark.parametrize("""lines, load_json_function""" ,[(True, load_json_lines), (False, load_json)] ) def A__ ( self: Dict ,lowerCamelCase_: List[Any] ,lowerCamelCase_: Any ,lowerCamelCase_: Tuple ) -> int: with io.BytesIO() as buffer: JsonDatasetWriter(lowerCamelCase_ ,lowerCamelCase_ ,lines=lowerCamelCase_ ,num_proc=2 ).write() buffer.seek(0 ) UpperCAmelCase_ : List[Any] = load_json_function(lowerCamelCase_ ) assert isinstance(lowerCamelCase_ ,lowerCamelCase_ ) assert isinstance(exported_content[0] ,lowerCamelCase_ ) assert len(lowerCamelCase_ ) == 10 @pytest.mark.parametrize( """orient, container, keys, len_at""" ,[ ("""records""", list, {"""tokens""", """labels""", """answers""", """id"""}, None), ("""split""", dict, {"""columns""", """data"""}, """data"""), ("""index""", dict, set("""0123456789""" ), None), ("""columns""", dict, {"""tokens""", """labels""", """answers""", """id"""}, """tokens"""), ("""values""", list, None, None), ("""table""", dict, {"""schema""", """data"""}, """data"""), ] ,) def A__ ( self: Union[str, Any] ,lowerCamelCase_: str ,lowerCamelCase_: int ,lowerCamelCase_: str ,lowerCamelCase_: Dict ,lowerCamelCase_: Optional[Any] ) -> Any: with io.BytesIO() as buffer: JsonDatasetWriter(lowerCamelCase_ ,lowerCamelCase_ ,lines=lowerCamelCase_ ,orient=lowerCamelCase_ ,num_proc=2 ).write() buffer.seek(0 ) UpperCAmelCase_ : str = load_json(lowerCamelCase_ ) assert isinstance(lowerCamelCase_ ,lowerCamelCase_ ) if keys: if container is dict: assert exported_content.keys() == keys else: assert exported_content[0].keys() == keys else: assert not hasattr(lowerCamelCase_ ,"""keys""" ) and not hasattr(exported_content[0] ,"""keys""" ) if len_at: assert len(exported_content[len_at] ) == 10 else: assert len(lowerCamelCase_ ) == 10 def A__ ( self: Tuple ,lowerCamelCase_: Union[str, Any] ) -> Optional[Any]: with pytest.raises(lowerCamelCase_ ): with io.BytesIO() as buffer: JsonDatasetWriter(lowerCamelCase_ ,lowerCamelCase_ ,num_proc=0 ) @pytest.mark.parametrize("""compression, extension""" ,[("""gzip""", """gz"""), ("""bz2""", """bz2"""), ("""xz""", """xz""")] ) def A__ ( self: Tuple ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: List[Any] ,lowerCamelCase_: Optional[int] ,lowerCamelCase_: int ) -> Any: UpperCAmelCase_ : Any = tmp_path_factory.mktemp("""data""" ) / F'''test.json.{extension}''' UpperCAmelCase_ : Optional[int] = str(shared_datadir / F'''test_file.json.{extension}''' ) JsonDatasetWriter(lowerCamelCase_ ,lowerCamelCase_ ,compression=lowerCamelCase_ ).write() with fsspec.open(lowerCamelCase_ ,"""rb""" ,compression="""infer""" ) as f: UpperCAmelCase_ : Dict = f.read() with fsspec.open(lowerCamelCase_ ,"""rb""" ,compression="""infer""" ) as f: UpperCAmelCase_ : Any = f.read() assert exported_content == original_content
345
import gc import unittest import torch from parameterized import parameterized from diffusers import AutoencoderKL from diffusers.utils import floats_tensor, load_hf_numpy, require_torch_gpu, slow, torch_all_close, torch_device from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import enable_full_determinism from .test_modeling_common import ModelTesterMixin, UNetTesterMixin enable_full_determinism() class _snake_case ( __snake_case , __snake_case , unittest.TestCase ): '''simple docstring''' A__ : Dict = AutoencoderKL A__ : Optional[int] = "sample" A__ : Tuple = 1E-2 @property def A__ ( self: List[Any] ) -> Union[str, Any]: UpperCAmelCase_ : Tuple = 4 UpperCAmelCase_ : str = 3 UpperCAmelCase_ : Any = (32, 32) UpperCAmelCase_ : Optional[int] = floats_tensor((batch_size, num_channels) + sizes ).to(lowerCamelCase_ ) return {"sample": image} @property def A__ ( self: List[str] ) -> Tuple: return (3, 32, 32) @property def A__ ( self: Optional[Any] ) -> Any: return (3, 32, 32) def A__ ( self: Any ) -> Tuple: UpperCAmelCase_ : List[Any] = { """block_out_channels""": [32, 64], """in_channels""": 3, """out_channels""": 3, """down_block_types""": ["""DownEncoderBlock2D""", """DownEncoderBlock2D"""], """up_block_types""": ["""UpDecoderBlock2D""", """UpDecoderBlock2D"""], """latent_channels""": 4, } UpperCAmelCase_ : int = self.dummy_input return init_dict, inputs_dict def A__ ( self: Optional[Any] ) -> int: pass def A__ ( self: str ) -> Any: pass @unittest.skipIf(torch_device == """mps""" ,"""Gradient checkpointing skipped on MPS""" ) def A__ ( self: Union[str, Any] ) -> Dict: # enable deterministic behavior for gradient checkpointing UpperCAmelCase_ , UpperCAmelCase_ : List[str] = self.prepare_init_args_and_inputs_for_common() UpperCAmelCase_ : List[Any] = self.model_class(**lowerCamelCase_ ) model.to(lowerCamelCase_ ) assert not model.is_gradient_checkpointing and model.training UpperCAmelCase_ : Optional[Any] = model(**lowerCamelCase_ ).sample # run the backwards pass on the model. For backwards pass, for simplicity purpose, # we won't calculate the loss and rather backprop on out.sum() model.zero_grad() UpperCAmelCase_ : Any = torch.randn_like(lowerCamelCase_ ) UpperCAmelCase_ : Optional[int] = (out - labels).mean() loss.backward() # re-instantiate the model now enabling gradient checkpointing UpperCAmelCase_ : str = self.model_class(**lowerCamelCase_ ) # clone model model_a.load_state_dict(model.state_dict() ) model_a.to(lowerCamelCase_ ) model_a.enable_gradient_checkpointing() assert model_a.is_gradient_checkpointing and model_a.training UpperCAmelCase_ : Optional[int] = model_a(**lowerCamelCase_ ).sample # run the backwards pass on the model. For backwards pass, for simplicity purpose, # we won't calculate the loss and rather backprop on out.sum() model_a.zero_grad() UpperCAmelCase_ : Dict = (out_a - labels).mean() loss_a.backward() # compare the output and parameters gradients self.assertTrue((loss - loss_a).abs() < 1e-5 ) UpperCAmelCase_ : Dict = dict(model.named_parameters() ) UpperCAmelCase_ : Union[str, Any] = dict(model_a.named_parameters() ) for name, param in named_params.items(): self.assertTrue(torch_all_close(param.grad.data ,named_params_a[name].grad.data ,atol=5e-5 ) ) def A__ ( self: Optional[Any] ) -> str: UpperCAmelCase_ , UpperCAmelCase_ : int = AutoencoderKL.from_pretrained("""fusing/autoencoder-kl-dummy""" ,output_loading_info=lowerCamelCase_ ) self.assertIsNotNone(lowerCamelCase_ ) self.assertEqual(len(loading_info["""missing_keys"""] ) ,0 ) model.to(lowerCamelCase_ ) UpperCAmelCase_ : Dict = model(**self.dummy_input ) assert image is not None, "Make sure output is not None" def A__ ( self: Optional[int] ) -> int: UpperCAmelCase_ : Dict = AutoencoderKL.from_pretrained("""fusing/autoencoder-kl-dummy""" ) UpperCAmelCase_ : Tuple = model.to(lowerCamelCase_ ) model.eval() if torch_device == "mps": UpperCAmelCase_ : Tuple = torch.manual_seed(0 ) else: UpperCAmelCase_ : Optional[int] = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 ) UpperCAmelCase_ : str = torch.randn( 1 ,model.config.in_channels ,model.config.sample_size ,model.config.sample_size ,generator=torch.manual_seed(0 ) ,) UpperCAmelCase_ : int = image.to(lowerCamelCase_ ) with torch.no_grad(): UpperCAmelCase_ : Dict = model(lowerCamelCase_ ,sample_posterior=lowerCamelCase_ ,generator=lowerCamelCase_ ).sample UpperCAmelCase_ : Optional[int] = output[0, -1, -3:, -3:].flatten().cpu() # Since the VAE Gaussian prior's generator is seeded on the appropriate device, # the expected output slices are not the same for CPU and GPU. if torch_device == "mps": UpperCAmelCase_ : Tuple = torch.tensor( [ -4.0078e-01, -3.8323e-04, -1.2681e-01, -1.1462e-01, 2.0095e-01, 1.0893e-01, -8.8247e-02, -3.0361e-01, -9.8644e-03, ] ) elif torch_device == "cpu": UpperCAmelCase_ : List[str] = torch.tensor( [-0.1_3_5_2, 0.0_8_7_8, 0.0_4_1_9, -0.0_8_1_8, -0.1_0_6_9, 0.0_6_8_8, -0.1_4_5_8, -0.4_4_4_6, -0.0_0_2_6] ) else: UpperCAmelCase_ : List[str] = torch.tensor( [-0.2_4_2_1, 0.4_6_4_2, 0.2_5_0_7, -0.0_4_3_8, 0.0_6_8_2, 0.3_1_6_0, -0.2_0_1_8, -0.0_7_2_7, 0.2_4_8_5] ) self.assertTrue(torch_all_close(lowerCamelCase_ ,lowerCamelCase_ ,rtol=1e-2 ) ) @slow class _snake_case ( unittest.TestCase ): '''simple docstring''' def A__ ( self: Any ,lowerCamelCase_: List[Any] ,lowerCamelCase_: Any ) -> Optional[Any]: return F'''gaussian_noise_s={seed}_shape={'_'.join([str(lowerCamelCase_ ) for s in shape] )}.npy''' def A__ ( self: Union[str, Any] ) -> Optional[int]: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def A__ ( self: List[str] ,lowerCamelCase_: Optional[int]=0 ,lowerCamelCase_: List[Any]=(4, 3, 512, 512) ,lowerCamelCase_: Optional[Any]=False ) -> Optional[int]: UpperCAmelCase_ : Tuple = torch.floataa if fpaa else torch.floataa UpperCAmelCase_ : Tuple = torch.from_numpy(load_hf_numpy(self.get_file_format(lowerCamelCase_ ,lowerCamelCase_ ) ) ).to(lowerCamelCase_ ).to(lowerCamelCase_ ) return image def A__ ( self: List[Any] ,lowerCamelCase_: List[str]="CompVis/stable-diffusion-v1-4" ,lowerCamelCase_: Union[str, Any]=False ) -> Any: UpperCAmelCase_ : Optional[Any] = """fp16""" if fpaa else None UpperCAmelCase_ : str = torch.floataa if fpaa else torch.floataa UpperCAmelCase_ : int = AutoencoderKL.from_pretrained( lowerCamelCase_ ,subfolder="""vae""" ,torch_dtype=lowerCamelCase_ ,revision=lowerCamelCase_ ,) model.to(lowerCamelCase_ ).eval() return model def A__ ( self: Dict ,lowerCamelCase_: Union[str, Any]=0 ) -> Optional[int]: if torch_device == "mps": return torch.manual_seed(lowerCamelCase_ ) return torch.Generator(device=lowerCamelCase_ ).manual_seed(lowerCamelCase_ ) @parameterized.expand( [ # fmt: off [33, [-0.1_6_0_3, 0.9_8_7_8, -0.0_4_9_5, -0.0_7_9_0, -0.2_7_0_9, 0.8_3_7_5, -0.2_0_6_0, -0.0_8_2_4], [-0.2_3_9_5, 0.0_0_9_8, 0.0_1_0_2, -0.0_7_0_9, -0.2_8_4_0, -0.0_2_7_4, -0.0_7_1_8, -0.1_8_2_4]], [47, [-0.2_3_7_6, 0.1_1_6_8, 0.1_3_3_2, -0.4_8_4_0, -0.2_5_0_8, -0.0_7_9_1, -0.0_4_9_3, -0.4_0_8_9], [0.0_3_5_0, 0.0_8_4_7, 0.0_4_6_7, 0.0_3_4_4, -0.0_8_4_2, -0.0_5_4_7, -0.0_6_3_3, -0.1_1_3_1]], # fmt: on ] ) def A__ ( self: List[Any] ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: str ,lowerCamelCase_: Dict ) -> Tuple: UpperCAmelCase_ : List[Any] = self.get_sd_vae_model() UpperCAmelCase_ : int = self.get_sd_image(lowerCamelCase_ ) UpperCAmelCase_ : Optional[int] = self.get_generator(lowerCamelCase_ ) with torch.no_grad(): UpperCAmelCase_ : Union[str, Any] = model(lowerCamelCase_ ,generator=lowerCamelCase_ ,sample_posterior=lowerCamelCase_ ).sample assert sample.shape == image.shape UpperCAmelCase_ : Optional[Any] = sample[-1, -2:, -2:, :2].flatten().float().cpu() UpperCAmelCase_ : Tuple = torch.tensor(expected_slice_mps if torch_device == """mps""" else expected_slice ) assert torch_all_close(lowerCamelCase_ ,lowerCamelCase_ ,atol=3e-3 ) @parameterized.expand( [ # fmt: off [33, [-0.0_5_1_3, 0.0_2_8_9, 1.3_7_9_9, 0.2_1_6_6, -0.2_5_7_3, -0.0_8_7_1, 0.5_1_0_3, -0.0_9_9_9]], [47, [-0.4_1_2_8, -0.1_3_2_0, -0.3_7_0_4, 0.1_9_6_5, -0.4_1_1_6, -0.2_3_3_2, -0.3_3_4_0, 0.2_2_4_7]], # fmt: on ] ) @require_torch_gpu def A__ ( self: Union[str, Any] ,lowerCamelCase_: Any ,lowerCamelCase_: List[str] ) -> Tuple: UpperCAmelCase_ : List[str] = self.get_sd_vae_model(fpaa=lowerCamelCase_ ) UpperCAmelCase_ : Any = self.get_sd_image(lowerCamelCase_ ,fpaa=lowerCamelCase_ ) UpperCAmelCase_ : Union[str, Any] = self.get_generator(lowerCamelCase_ ) with torch.no_grad(): UpperCAmelCase_ : Union[str, Any] = model(lowerCamelCase_ ,generator=lowerCamelCase_ ,sample_posterior=lowerCamelCase_ ).sample assert sample.shape == image.shape UpperCAmelCase_ : Tuple = sample[-1, -2:, :2, -2:].flatten().float().cpu() UpperCAmelCase_ : Optional[int] = torch.tensor(lowerCamelCase_ ) assert torch_all_close(lowerCamelCase_ ,lowerCamelCase_ ,atol=1e-2 ) @parameterized.expand( [ # fmt: off [33, [-0.1_6_0_9, 0.9_8_6_6, -0.0_4_8_7, -0.0_7_7_7, -0.2_7_1_6, 0.8_3_6_8, -0.2_0_5_5, -0.0_8_1_4], [-0.2_3_9_5, 0.0_0_9_8, 0.0_1_0_2, -0.0_7_0_9, -0.2_8_4_0, -0.0_2_7_4, -0.0_7_1_8, -0.1_8_2_4]], [47, [-0.2_3_7_7, 0.1_1_4_7, 0.1_3_3_3, -0.4_8_4_1, -0.2_5_0_6, -0.0_8_0_5, -0.0_4_9_1, -0.4_0_8_5], [0.0_3_5_0, 0.0_8_4_7, 0.0_4_6_7, 0.0_3_4_4, -0.0_8_4_2, -0.0_5_4_7, -0.0_6_3_3, -0.1_1_3_1]], # fmt: on ] ) def A__ ( self: Tuple ,lowerCamelCase_: List[Any] ,lowerCamelCase_: Optional[int] ,lowerCamelCase_: List[str] ) -> Dict: UpperCAmelCase_ : Optional[int] = self.get_sd_vae_model() UpperCAmelCase_ : Dict = self.get_sd_image(lowerCamelCase_ ) with torch.no_grad(): UpperCAmelCase_ : str = model(lowerCamelCase_ ).sample assert sample.shape == image.shape UpperCAmelCase_ : List[Any] = sample[-1, -2:, -2:, :2].flatten().float().cpu() UpperCAmelCase_ : Any = torch.tensor(expected_slice_mps if torch_device == """mps""" else expected_slice ) assert torch_all_close(lowerCamelCase_ ,lowerCamelCase_ ,atol=3e-3 ) @parameterized.expand( [ # fmt: off [13, [-0.2_0_5_1, -0.1_8_0_3, -0.2_3_1_1, -0.2_1_1_4, -0.3_2_9_2, -0.3_5_7_4, -0.2_9_5_3, -0.3_3_2_3]], [37, [-0.2_6_3_2, -0.2_6_2_5, -0.2_1_9_9, -0.2_7_4_1, -0.4_5_3_9, -0.4_9_9_0, -0.3_7_2_0, -0.4_9_2_5]], # fmt: on ] ) @require_torch_gpu def A__ ( self: Optional[Any] ,lowerCamelCase_: Tuple ,lowerCamelCase_: str ) -> Optional[Any]: UpperCAmelCase_ : List[str] = self.get_sd_vae_model() UpperCAmelCase_ : Optional[int] = self.get_sd_image(lowerCamelCase_ ,shape=(3, 4, 64, 64) ) with torch.no_grad(): UpperCAmelCase_ : str = model.decode(lowerCamelCase_ ).sample assert list(sample.shape ) == [3, 3, 512, 512] UpperCAmelCase_ : Any = sample[-1, -2:, :2, -2:].flatten().cpu() UpperCAmelCase_ : Union[str, Any] = torch.tensor(lowerCamelCase_ ) assert torch_all_close(lowerCamelCase_ ,lowerCamelCase_ ,atol=1e-3 ) @parameterized.expand( [ # fmt: off [27, [-0.0_3_6_9, 0.0_2_0_7, -0.0_7_7_6, -0.0_6_8_2, -0.1_7_4_7, -0.1_9_3_0, -0.1_4_6_5, -0.2_0_3_9]], [16, [-0.1_6_2_8, -0.2_1_3_4, -0.2_7_4_7, -0.2_6_4_2, -0.3_7_7_4, -0.4_4_0_4, -0.3_6_8_7, -0.4_2_7_7]], # fmt: on ] ) @require_torch_gpu def A__ ( self: str ,lowerCamelCase_: List[Any] ,lowerCamelCase_: Any ) -> Optional[Any]: UpperCAmelCase_ : Dict = self.get_sd_vae_model(fpaa=lowerCamelCase_ ) UpperCAmelCase_ : List[Any] = self.get_sd_image(lowerCamelCase_ ,shape=(3, 4, 64, 64) ,fpaa=lowerCamelCase_ ) with torch.no_grad(): UpperCAmelCase_ : List[str] = model.decode(lowerCamelCase_ ).sample assert list(sample.shape ) == [3, 3, 512, 512] UpperCAmelCase_ : str = sample[-1, -2:, :2, -2:].flatten().float().cpu() UpperCAmelCase_ : str = torch.tensor(lowerCamelCase_ ) assert torch_all_close(lowerCamelCase_ ,lowerCamelCase_ ,atol=5e-3 ) @parameterized.expand([(13,), (16,), (27,)] ) @require_torch_gpu @unittest.skipIf(not is_xformers_available() ,reason="""xformers is not required when using PyTorch 2.0.""" ) def A__ ( self: List[Any] ,lowerCamelCase_: Union[str, Any] ) -> int: UpperCAmelCase_ : Optional[Any] = self.get_sd_vae_model(fpaa=lowerCamelCase_ ) UpperCAmelCase_ : List[str] = self.get_sd_image(lowerCamelCase_ ,shape=(3, 4, 64, 64) ,fpaa=lowerCamelCase_ ) with torch.no_grad(): UpperCAmelCase_ : Optional[Any] = model.decode(lowerCamelCase_ ).sample model.enable_xformers_memory_efficient_attention() with torch.no_grad(): UpperCAmelCase_ : List[str] = model.decode(lowerCamelCase_ ).sample assert list(sample.shape ) == [3, 3, 512, 512] assert torch_all_close(lowerCamelCase_ ,lowerCamelCase_ ,atol=1e-1 ) @parameterized.expand([(13,), (16,), (37,)] ) @require_torch_gpu @unittest.skipIf(not is_xformers_available() ,reason="""xformers is not required when using PyTorch 2.0.""" ) def A__ ( self: Optional[Any] ,lowerCamelCase_: Dict ) -> Union[str, Any]: UpperCAmelCase_ : Tuple = self.get_sd_vae_model() UpperCAmelCase_ : Any = self.get_sd_image(lowerCamelCase_ ,shape=(3, 4, 64, 64) ) with torch.no_grad(): UpperCAmelCase_ : Union[str, Any] = model.decode(lowerCamelCase_ ).sample model.enable_xformers_memory_efficient_attention() with torch.no_grad(): UpperCAmelCase_ : Optional[Any] = model.decode(lowerCamelCase_ ).sample assert list(sample.shape ) == [3, 3, 512, 512] assert torch_all_close(lowerCamelCase_ ,lowerCamelCase_ ,atol=1e-2 ) @parameterized.expand( [ # fmt: off [33, [-0.3_0_0_1, 0.0_9_1_8, -2.6_9_8_4, -3.9_7_2_0, -3.2_0_9_9, -5.0_3_5_3, 1.7_3_3_8, -0.2_0_6_5, 3.4_2_6_7]], [47, [-1.5_0_3_0, -4.3_8_7_1, -6.0_3_5_5, -9.1_1_5_7, -1.6_6_6_1, -2.7_8_5_3, 2.1_6_0_7, -5.0_8_2_3, 2.5_6_3_3]], # fmt: on ] ) def A__ ( self: Union[str, Any] ,lowerCamelCase_: Any ,lowerCamelCase_: Union[str, Any] ) -> Union[str, Any]: UpperCAmelCase_ : Dict = self.get_sd_vae_model() UpperCAmelCase_ : Optional[Any] = self.get_sd_image(lowerCamelCase_ ) UpperCAmelCase_ : str = self.get_generator(lowerCamelCase_ ) with torch.no_grad(): UpperCAmelCase_ : int = model.encode(lowerCamelCase_ ).latent_dist UpperCAmelCase_ : Optional[Any] = dist.sample(generator=lowerCamelCase_ ) assert list(sample.shape ) == [image.shape[0], 4] + [i // 8 for i in image.shape[2:]] UpperCAmelCase_ : Tuple = sample[0, -1, -3:, -3:].flatten().cpu() UpperCAmelCase_ : Optional[Any] = torch.tensor(lowerCamelCase_ ) UpperCAmelCase_ : List[Any] = 3e-3 if torch_device != """mps""" else 1e-2 assert torch_all_close(lowerCamelCase_ ,lowerCamelCase_ ,atol=lowerCamelCase_ )
345
1
import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCamelCase_ = logging.get_logger(__name__) UpperCamelCase_ = { '''BridgeTower/bridgetower-base''': '''https://huggingface.co/BridgeTower/bridgetower-base/blob/main/config.json''', '''BridgeTower/bridgetower-base-itm-mlm''': ( '''https://huggingface.co/BridgeTower/bridgetower-base-itm-mlm/blob/main/config.json''' ), } class _snake_case ( __snake_case ): '''simple docstring''' A__ : List[str] = "bridgetower_vision_model" def __init__( self: List[str] ,lowerCamelCase_: Union[str, Any]=768 ,lowerCamelCase_: Optional[Any]=12 ,lowerCamelCase_: Union[str, Any]=3 ,lowerCamelCase_: Tuple=16 ,lowerCamelCase_: Any=288 ,lowerCamelCase_: List[str]=1 ,lowerCamelCase_: int=1e-05 ,lowerCamelCase_: Optional[Any]=False ,lowerCamelCase_: Optional[Any]=True ,lowerCamelCase_: Dict=False ,**lowerCamelCase_: Dict ,) -> Union[str, Any]: super().__init__(**lowerCamelCase_ ) UpperCAmelCase_ : Union[str, Any] = hidden_size UpperCAmelCase_ : Optional[int] = num_hidden_layers UpperCAmelCase_ : int = num_channels UpperCAmelCase_ : str = patch_size UpperCAmelCase_ : int = image_size UpperCAmelCase_ : Tuple = initializer_factor UpperCAmelCase_ : Tuple = layer_norm_eps UpperCAmelCase_ : List[str] = stop_gradient UpperCAmelCase_ : Any = share_layernorm UpperCAmelCase_ : Optional[Any] = remove_last_layer @classmethod def A__ ( cls: Optional[Any] ,lowerCamelCase_: Union[str, os.PathLike] ,**lowerCamelCase_: Tuple ) -> "PretrainedConfig": UpperCAmelCase_ , UpperCAmelCase_ : List[str] = cls.get_config_dict(lowerCamelCase_ ,**lowerCamelCase_ ) if config_dict.get("""model_type""" ) == "bridgetower": UpperCAmelCase_ : List[str] = config_dict["""text_config"""] if "model_type" in config_dict and hasattr(cls ,"""model_type""" ) and config_dict["model_type"] != cls.model_type: logger.warning( F'''You are using a model of type {config_dict['model_type']} to instantiate a model of type ''' F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' ) return cls.from_dict(lowerCamelCase_ ,**lowerCamelCase_ ) class _snake_case ( __snake_case ): '''simple docstring''' A__ : List[str] = "bridgetower_text_model" def __init__( self: Optional[int] ,lowerCamelCase_: int=50265 ,lowerCamelCase_: List[Any]=768 ,lowerCamelCase_: Tuple=12 ,lowerCamelCase_: Dict=12 ,lowerCamelCase_: int=1 ,lowerCamelCase_: str=3072 ,lowerCamelCase_: List[str]="gelu" ,lowerCamelCase_: Optional[Any]=0.1 ,lowerCamelCase_: Union[str, Any]=0.1 ,lowerCamelCase_: Tuple=514 ,lowerCamelCase_: Union[str, Any]=1 ,lowerCamelCase_: Tuple=1e-05 ,lowerCamelCase_: Any=1 ,lowerCamelCase_: Optional[int]=0 ,lowerCamelCase_: Union[str, Any]=2 ,lowerCamelCase_: Tuple="absolute" ,lowerCamelCase_: Tuple=True ,**lowerCamelCase_: Union[str, Any] ,) -> str: super().__init__(**lowerCamelCase_ ) UpperCAmelCase_ : Optional[Any] = vocab_size UpperCAmelCase_ : Optional[Any] = hidden_size UpperCAmelCase_ : Tuple = num_hidden_layers UpperCAmelCase_ : Optional[int] = num_attention_heads UpperCAmelCase_ : str = hidden_act UpperCAmelCase_ : List[str] = initializer_factor UpperCAmelCase_ : List[str] = intermediate_size UpperCAmelCase_ : Optional[int] = hidden_dropout_prob UpperCAmelCase_ : Any = attention_probs_dropout_prob UpperCAmelCase_ : Union[str, Any] = max_position_embeddings UpperCAmelCase_ : Any = type_vocab_size UpperCAmelCase_ : int = layer_norm_eps UpperCAmelCase_ : Tuple = position_embedding_type UpperCAmelCase_ : List[Any] = use_cache UpperCAmelCase_ : int = pad_token_id UpperCAmelCase_ : int = bos_token_id UpperCAmelCase_ : List[Any] = eos_token_id @classmethod def A__ ( cls: Optional[int] ,lowerCamelCase_: Union[str, os.PathLike] ,**lowerCamelCase_: List[str] ) -> "PretrainedConfig": UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = cls.get_config_dict(lowerCamelCase_ ,**lowerCamelCase_ ) if config_dict.get("""model_type""" ) == "bridgetower": UpperCAmelCase_ : Tuple = config_dict["""text_config"""] if "model_type" in config_dict and hasattr(cls ,"""model_type""" ) and config_dict["model_type"] != cls.model_type: logger.warning( F'''You are using a model of type {config_dict['model_type']} to instantiate a model of type ''' F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' ) return cls.from_dict(lowerCamelCase_ ,**lowerCamelCase_ ) class _snake_case ( __snake_case ): '''simple docstring''' A__ : str = "bridgetower" def __init__( self: Dict ,lowerCamelCase_: List[Any]=True ,lowerCamelCase_: List[Any]="gelu" ,lowerCamelCase_: int=768 ,lowerCamelCase_: List[str]=1 ,lowerCamelCase_: Tuple=1e-05 ,lowerCamelCase_: Any=False ,lowerCamelCase_: Dict="add" ,lowerCamelCase_: Optional[int]=12 ,lowerCamelCase_: Optional[int]=6 ,lowerCamelCase_: List[Any]=False ,lowerCamelCase_: Any=False ,lowerCamelCase_: List[Any]=None ,lowerCamelCase_: List[Any]=None ,**lowerCamelCase_: int ,) -> Optional[Any]: # TODO: remove this once the Hub files are updated. UpperCAmelCase_ : Union[str, Any] = kwargs.pop("""text_config_dict""" ,lowerCamelCase_ ) UpperCAmelCase_ : Optional[int] = kwargs.pop("""vision_config_dict""" ,lowerCamelCase_ ) super().__init__(**lowerCamelCase_ ) UpperCAmelCase_ : Optional[int] = share_cross_modal_transformer_layers UpperCAmelCase_ : Dict = hidden_act UpperCAmelCase_ : List[Any] = hidden_size UpperCAmelCase_ : Dict = initializer_factor UpperCAmelCase_ : List[Any] = layer_norm_eps UpperCAmelCase_ : int = share_link_tower_layers UpperCAmelCase_ : List[Any] = link_tower_type UpperCAmelCase_ : int = num_attention_heads UpperCAmelCase_ : List[str] = num_hidden_layers UpperCAmelCase_ : str = tie_word_embeddings UpperCAmelCase_ : int = init_layernorm_from_vision_encoder if text_config is None: UpperCAmelCase_ : Tuple = {} logger.info("""`text_config` is `None`. Initializing the `BridgeTowerTextConfig` with default values.""" ) if vision_config is None: UpperCAmelCase_ : List[Any] = {} logger.info("""`vision_config` is `None`. Initializing the `BridgeTowerVisionConfig` with default values.""" ) UpperCAmelCase_ : str = BridgeTowerTextConfig(**lowerCamelCase_ ) UpperCAmelCase_ : List[str] = BridgeTowerVisionConfig(**lowerCamelCase_ ) @classmethod def A__ ( cls: List[str] ,lowerCamelCase_: BridgeTowerTextConfig ,lowerCamelCase_: BridgeTowerVisionConfig ,**lowerCamelCase_: Dict ) -> Optional[Any]: return cls(text_config=text_config.to_dict() ,vision_config=vision_config.to_dict() ,**lowerCamelCase_ ) def A__ ( self: List[str] ) -> Any: UpperCAmelCase_ : Union[str, Any] = copy.deepcopy(self.__dict__ ) UpperCAmelCase_ : str = self.text_config.to_dict() UpperCAmelCase_ : List[Any] = self.vision_config.to_dict() UpperCAmelCase_ : Optional[int] = self.__class__.model_type return output
345
import copy from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import Audio, Features, Value from .base import TaskTemplate @dataclass(frozen=__snake_case ) class _snake_case ( __snake_case ): '''simple docstring''' A__ : str = field(default="automatic-speech-recognition" , metadata={"include_in_asdict_even_if_is_default": True} ) A__ : ClassVar[Features] = Features({"audio": Audio()} ) A__ : ClassVar[Features] = Features({"transcription": Value("string" )} ) A__ : str = "audio" A__ : str = "transcription" def A__ ( self: int ,lowerCamelCase_: Union[str, Any] ) -> Optional[Any]: if self.audio_column not in features: raise ValueError(F'''Column {self.audio_column} is not present in features.''' ) if not isinstance(features[self.audio_column] ,lowerCamelCase_ ): raise ValueError(F'''Column {self.audio_column} is not an Audio type.''' ) UpperCAmelCase_ : Any = copy.deepcopy(self ) UpperCAmelCase_ : Union[str, Any] = self.input_schema.copy() UpperCAmelCase_ : Any = features[self.audio_column] UpperCAmelCase_ : Union[str, Any] = input_schema return task_template @property def A__ ( self: List[str] ) -> Dict[str, str]: return {self.audio_column: "audio", self.transcription_column: "transcription"}
345
1
from typing import List, Optional, Tuple, Union import PIL import torch from torchvision import transforms from diffusers.pipeline_utils import DiffusionPipeline, ImagePipelineOutput from diffusers.schedulers import DDIMScheduler from diffusers.utils import randn_tensor UpperCamelCase_ = transforms.Compose( [ transforms.Resize((256, 256)), transforms.ToTensor(), transforms.Normalize([0.5], [0.5]), ] ) def lowerCamelCase_ ( _a : str ): '''simple docstring''' if isinstance(_a , torch.Tensor ): return image elif isinstance(_a , PIL.Image.Image ): UpperCAmelCase_ : List[str] = [image] UpperCAmelCase_ : Union[str, Any] = [trans(img.convert("""RGB""" ) ) for img in image] UpperCAmelCase_ : Any = torch.stack(_a ) return image class _snake_case ( __snake_case ): '''simple docstring''' def __init__( self: Dict ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: str ) -> Tuple: super().__init__() # make sure scheduler can always be converted to DDIM UpperCAmelCase_ : Any = DDIMScheduler.from_config(scheduler.config ) self.register_modules(unet=lowerCamelCase_ ,scheduler=lowerCamelCase_ ) def A__ ( self: List[Any] ,lowerCamelCase_: List[Any] ) -> Optional[int]: if strength < 0 or strength > 1: raise ValueError(F'''The value of strength should in [0.0, 1.0] but is {strength}''' ) def A__ ( self: List[Any] ,lowerCamelCase_: List[str] ,lowerCamelCase_: Any ,lowerCamelCase_: Optional[Any] ) -> Any: # get the original timestep using init_timestep UpperCAmelCase_ : Tuple = min(int(num_inference_steps * strength ) ,lowerCamelCase_ ) UpperCAmelCase_ : Optional[int] = max(num_inference_steps - init_timestep ,0 ) UpperCAmelCase_ : Tuple = self.scheduler.timesteps[t_start:] return timesteps, num_inference_steps - t_start def A__ ( self: Optional[Any] ,lowerCamelCase_: List[Any] ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: Optional[int] ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: Tuple ,lowerCamelCase_: Union[str, Any]=None ) -> List[str]: if not isinstance(lowerCamelCase_ ,(torch.Tensor, PIL.Image.Image, list) ): raise ValueError( F'''`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(lowerCamelCase_ )}''' ) UpperCAmelCase_ : Any = image.to(device=lowerCamelCase_ ,dtype=lowerCamelCase_ ) if isinstance(lowerCamelCase_ ,lowerCamelCase_ ) and len(lowerCamelCase_ ) != batch_size: raise ValueError( F'''You have passed a list of generators of length {len(lowerCamelCase_ )}, but requested an effective batch''' F''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' ) UpperCAmelCase_ : Tuple = init_latents.shape UpperCAmelCase_ : Any = randn_tensor(lowerCamelCase_ ,generator=lowerCamelCase_ ,device=lowerCamelCase_ ,dtype=lowerCamelCase_ ) # get latents print("""add noise to latents at timestep""" ,lowerCamelCase_ ) UpperCAmelCase_ : Any = self.scheduler.add_noise(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ) UpperCAmelCase_ : Dict = init_latents return latents @torch.no_grad() def __call__( self: List[str] ,lowerCamelCase_: Union[torch.FloatTensor, PIL.Image.Image] = None ,lowerCamelCase_: float = 0.8 ,lowerCamelCase_: int = 1 ,lowerCamelCase_: Optional[Union[torch.Generator, List[torch.Generator]]] = None ,lowerCamelCase_: float = 0.0 ,lowerCamelCase_: int = 50 ,lowerCamelCase_: Optional[bool] = None ,lowerCamelCase_: Optional[str] = "pil" ,lowerCamelCase_: bool = True ,) -> Union[ImagePipelineOutput, Tuple]: self.check_inputs(lowerCamelCase_ ) # 2. Preprocess image UpperCAmelCase_ : List[str] = preprocess(lowerCamelCase_ ) # 3. set timesteps self.scheduler.set_timesteps(lowerCamelCase_ ,device=self.device ) UpperCAmelCase_ , UpperCAmelCase_ : str = self.get_timesteps(lowerCamelCase_ ,lowerCamelCase_ ,self.device ) UpperCAmelCase_ : int = timesteps[:1].repeat(lowerCamelCase_ ) # 4. Prepare latent variables UpperCAmelCase_ : Dict = self.prepare_latents(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,self.unet.dtype ,self.device ,lowerCamelCase_ ) UpperCAmelCase_ : List[Any] = latents # 5. Denoising loop for t in self.progress_bar(lowerCamelCase_ ): # 1. predict noise model_output UpperCAmelCase_ : Tuple = self.unet(lowerCamelCase_ ,lowerCamelCase_ ).sample # 2. predict previous mean of image x_t-1 and add variance depending on eta # eta corresponds to η in paper and should be between [0, 1] # do x_t -> x_t-1 UpperCAmelCase_ : int = self.scheduler.step( lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,eta=lowerCamelCase_ ,use_clipped_model_output=lowerCamelCase_ ,generator=lowerCamelCase_ ,).prev_sample UpperCAmelCase_ : Tuple = (image / 2 + 0.5).clamp(0 ,1 ) UpperCAmelCase_ : Any = image.cpu().permute(0 ,2 ,3 ,1 ).numpy() if output_type == "pil": UpperCAmelCase_ : str = self.numpy_to_pil(lowerCamelCase_ ) if not return_dict: return (image, latent_timestep.item()) return ImagePipelineOutput(images=lowerCamelCase_ )
345
from collections import OrderedDict from typing import TYPE_CHECKING, Any, Mapping, Optional from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...onnx.utils import compute_effective_axis_dimension from ...utils import logging if TYPE_CHECKING: from ...processing_utils import ProcessorMixin from ...utils import TensorType UpperCamelCase_ = logging.get_logger(__name__) UpperCamelCase_ = { '''microsoft/layoutlmv3-base''': '''https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json''', } class _snake_case ( __snake_case ): '''simple docstring''' A__ : Optional[Any] = "layoutlmv3" def __init__( self: str ,lowerCamelCase_: Any=50265 ,lowerCamelCase_: int=768 ,lowerCamelCase_: Any=12 ,lowerCamelCase_: Any=12 ,lowerCamelCase_: List[Any]=3072 ,lowerCamelCase_: str="gelu" ,lowerCamelCase_: List[str]=0.1 ,lowerCamelCase_: Any=0.1 ,lowerCamelCase_: Tuple=512 ,lowerCamelCase_: Union[str, Any]=2 ,lowerCamelCase_: Dict=0.0_2 ,lowerCamelCase_: List[str]=1e-5 ,lowerCamelCase_: int=1 ,lowerCamelCase_: int=0 ,lowerCamelCase_: List[str]=2 ,lowerCamelCase_: Dict=1024 ,lowerCamelCase_: Tuple=128 ,lowerCamelCase_: Tuple=128 ,lowerCamelCase_: Dict=True ,lowerCamelCase_: Union[str, Any]=32 ,lowerCamelCase_: Union[str, Any]=128 ,lowerCamelCase_: Tuple=64 ,lowerCamelCase_: Tuple=256 ,lowerCamelCase_: List[str]=True ,lowerCamelCase_: Optional[int]=True ,lowerCamelCase_: Any=True ,lowerCamelCase_: Dict=224 ,lowerCamelCase_: Optional[int]=3 ,lowerCamelCase_: Optional[int]=16 ,lowerCamelCase_: Dict=None ,**lowerCamelCase_: str ,) -> List[Any]: super().__init__( vocab_size=lowerCamelCase_ ,hidden_size=lowerCamelCase_ ,num_hidden_layers=lowerCamelCase_ ,num_attention_heads=lowerCamelCase_ ,intermediate_size=lowerCamelCase_ ,hidden_act=lowerCamelCase_ ,hidden_dropout_prob=lowerCamelCase_ ,attention_probs_dropout_prob=lowerCamelCase_ ,max_position_embeddings=lowerCamelCase_ ,type_vocab_size=lowerCamelCase_ ,initializer_range=lowerCamelCase_ ,layer_norm_eps=lowerCamelCase_ ,pad_token_id=lowerCamelCase_ ,bos_token_id=lowerCamelCase_ ,eos_token_id=lowerCamelCase_ ,**lowerCamelCase_ ,) UpperCAmelCase_ : List[Any] = max_ad_position_embeddings UpperCAmelCase_ : Optional[int] = coordinate_size UpperCAmelCase_ : Optional[int] = shape_size UpperCAmelCase_ : Optional[Any] = has_relative_attention_bias UpperCAmelCase_ : Optional[int] = rel_pos_bins UpperCAmelCase_ : Union[str, Any] = max_rel_pos UpperCAmelCase_ : Dict = has_spatial_attention_bias UpperCAmelCase_ : Optional[int] = rel_ad_pos_bins UpperCAmelCase_ : Tuple = max_rel_ad_pos UpperCAmelCase_ : Union[str, Any] = text_embed UpperCAmelCase_ : Optional[Any] = visual_embed UpperCAmelCase_ : List[str] = input_size UpperCAmelCase_ : str = num_channels UpperCAmelCase_ : Optional[int] = patch_size UpperCAmelCase_ : Tuple = classifier_dropout class _snake_case ( __snake_case ): '''simple docstring''' A__ : Optional[Any] = version.parse("1.12" ) @property def A__ ( self: Dict ) -> Mapping[str, Mapping[int, str]]: # The order of inputs is different for question answering and sequence classification if self.task in ["question-answering", "sequence-classification"]: return OrderedDict( [ ("""input_ids""", {0: """batch""", 1: """sequence"""}), ("""attention_mask""", {0: """batch""", 1: """sequence"""}), ("""bbox""", {0: """batch""", 1: """sequence"""}), ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ] ) else: return OrderedDict( [ ("""input_ids""", {0: """batch""", 1: """sequence"""}), ("""bbox""", {0: """batch""", 1: """sequence"""}), ("""attention_mask""", {0: """batch""", 1: """sequence"""}), ("""pixel_values""", {0: """batch""", 1: """num_channels"""}), ] ) @property def A__ ( self: Any ) -> float: return 1e-5 @property def A__ ( self: int ) -> int: return 12 def A__ ( self: List[str] ,lowerCamelCase_: "ProcessorMixin" ,lowerCamelCase_: int = -1 ,lowerCamelCase_: int = -1 ,lowerCamelCase_: bool = False ,lowerCamelCase_: Optional["TensorType"] = None ,lowerCamelCase_: int = 3 ,lowerCamelCase_: int = 40 ,lowerCamelCase_: int = 40 ,) -> Mapping[str, Any]: setattr(processor.image_processor ,"""apply_ocr""" ,lowerCamelCase_ ) # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX UpperCAmelCase_ : List[str] = compute_effective_axis_dimension( lowerCamelCase_ ,fixed_dimension=OnnxConfig.default_fixed_batch ,num_token_to_add=0 ) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX UpperCAmelCase_ : int = processor.tokenizer.num_special_tokens_to_add(lowerCamelCase_ ) UpperCAmelCase_ : int = compute_effective_axis_dimension( lowerCamelCase_ ,fixed_dimension=OnnxConfig.default_fixed_sequence ,num_token_to_add=lowerCamelCase_ ) # Generate dummy inputs according to compute batch and sequence UpperCAmelCase_ : Optional[int] = [[""" """.join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size # Generate dummy bounding boxes UpperCAmelCase_ : List[Any] = [[[48, 84, 73, 128]]] * batch_size # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX # batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch) UpperCAmelCase_ : Any = self._generate_dummy_images(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ) UpperCAmelCase_ : Optional[Any] = dict( processor( lowerCamelCase_ ,text=lowerCamelCase_ ,boxes=lowerCamelCase_ ,return_tensors=lowerCamelCase_ ,) ) return inputs
345
1
import unittest from transformers import ( MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, Pipeline, ZeroShotClassificationPipeline, pipeline, ) from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow from .test_pipelines_common import ANY # These 2 model types require different inputs than those of the usual text models. UpperCamelCase_ = {'''LayoutLMv2Config''', '''LayoutLMv3Config'''} @is_pipeline_test class _snake_case ( unittest.TestCase ): '''simple docstring''' A__ : Optional[int] = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING A__ : Union[str, Any] = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING if model_mapping is not None: A__ : List[str] = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP} if tf_model_mapping is not None: A__ : str = { config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP } def A__ ( self: List[Any] ,lowerCamelCase_: Any ,lowerCamelCase_: Any ,lowerCamelCase_: Union[str, Any] ) -> Optional[int]: UpperCAmelCase_ : List[str] = ZeroShotClassificationPipeline( model=lowerCamelCase_ ,tokenizer=lowerCamelCase_ ,candidate_labels=["""polics""", """health"""] ) return classifier, ["Who are you voting for in 2020?", "My stomach hurts."] def A__ ( self: Optional[int] ,lowerCamelCase_: Any ,lowerCamelCase_: Union[str, Any] ) -> Any: UpperCAmelCase_ : Optional[int] = classifier("""Who are you voting for in 2020?""" ,candidate_labels="""politics""" ) self.assertEqual(lowerCamelCase_ ,{"""sequence""": ANY(lowerCamelCase_ ), """labels""": [ANY(lowerCamelCase_ )], """scores""": [ANY(lowerCamelCase_ )]} ) # No kwarg UpperCAmelCase_ : Dict = classifier("""Who are you voting for in 2020?""" ,["""politics"""] ) self.assertEqual(lowerCamelCase_ ,{"""sequence""": ANY(lowerCamelCase_ ), """labels""": [ANY(lowerCamelCase_ )], """scores""": [ANY(lowerCamelCase_ )]} ) UpperCAmelCase_ : Optional[Any] = classifier("""Who are you voting for in 2020?""" ,candidate_labels=["""politics"""] ) self.assertEqual(lowerCamelCase_ ,{"""sequence""": ANY(lowerCamelCase_ ), """labels""": [ANY(lowerCamelCase_ )], """scores""": [ANY(lowerCamelCase_ )]} ) UpperCAmelCase_ : Union[str, Any] = classifier("""Who are you voting for in 2020?""" ,candidate_labels="""politics, public health""" ) self.assertEqual( lowerCamelCase_ ,{"""sequence""": ANY(lowerCamelCase_ ), """labels""": [ANY(lowerCamelCase_ ), ANY(lowerCamelCase_ )], """scores""": [ANY(lowerCamelCase_ ), ANY(lowerCamelCase_ )]} ) self.assertAlmostEqual(sum(nested_simplify(outputs["""scores"""] ) ) ,1.0 ) UpperCAmelCase_ : Dict = classifier("""Who are you voting for in 2020?""" ,candidate_labels=["""politics""", """public health"""] ) self.assertEqual( lowerCamelCase_ ,{"""sequence""": ANY(lowerCamelCase_ ), """labels""": [ANY(lowerCamelCase_ ), ANY(lowerCamelCase_ )], """scores""": [ANY(lowerCamelCase_ ), ANY(lowerCamelCase_ )]} ) self.assertAlmostEqual(sum(nested_simplify(outputs["""scores"""] ) ) ,1.0 ) UpperCAmelCase_ : int = classifier( """Who are you voting for in 2020?""" ,candidate_labels="""politics""" ,hypothesis_template="""This text is about {}""" ) self.assertEqual(lowerCamelCase_ ,{"""sequence""": ANY(lowerCamelCase_ ), """labels""": [ANY(lowerCamelCase_ )], """scores""": [ANY(lowerCamelCase_ )]} ) # https://github.com/huggingface/transformers/issues/13846 UpperCAmelCase_ : Any = classifier(["""I am happy"""] ,["""positive""", """negative"""] ) self.assertEqual( lowerCamelCase_ ,[ {"""sequence""": ANY(lowerCamelCase_ ), """labels""": [ANY(lowerCamelCase_ ), ANY(lowerCamelCase_ )], """scores""": [ANY(lowerCamelCase_ ), ANY(lowerCamelCase_ )]} for i in range(1 ) ] ,) UpperCAmelCase_ : Optional[int] = classifier(["""I am happy""", """I am sad"""] ,["""positive""", """negative"""] ) self.assertEqual( lowerCamelCase_ ,[ {"""sequence""": ANY(lowerCamelCase_ ), """labels""": [ANY(lowerCamelCase_ ), ANY(lowerCamelCase_ )], """scores""": [ANY(lowerCamelCase_ ), ANY(lowerCamelCase_ )]} for i in range(2 ) ] ,) with self.assertRaises(lowerCamelCase_ ): classifier("""""" ,candidate_labels="""politics""" ) with self.assertRaises(lowerCamelCase_ ): classifier(lowerCamelCase_ ,candidate_labels="""politics""" ) with self.assertRaises(lowerCamelCase_ ): classifier("""Who are you voting for in 2020?""" ,candidate_labels="""""" ) with self.assertRaises(lowerCamelCase_ ): classifier("""Who are you voting for in 2020?""" ,candidate_labels=lowerCamelCase_ ) with self.assertRaises(lowerCamelCase_ ): classifier( """Who are you voting for in 2020?""" ,candidate_labels="""politics""" ,hypothesis_template="""Not formatting template""" ,) with self.assertRaises(lowerCamelCase_ ): classifier( """Who are you voting for in 2020?""" ,candidate_labels="""politics""" ,hypothesis_template=lowerCamelCase_ ,) self.run_entailment_id(lowerCamelCase_ ) def A__ ( self: str ,lowerCamelCase_: Pipeline ) -> Optional[Any]: UpperCAmelCase_ : List[Any] = zero_shot_classifier.model.config UpperCAmelCase_ : List[str] = config.labelaid UpperCAmelCase_ : Union[str, Any] = zero_shot_classifier.entailment_id UpperCAmelCase_ : str = {"""LABEL_0""": 0, """LABEL_1""": 1, """LABEL_2""": 2} self.assertEqual(zero_shot_classifier.entailment_id ,-1 ) UpperCAmelCase_ : Optional[Any] = {"""entailment""": 0, """neutral""": 1, """contradiction""": 2} self.assertEqual(zero_shot_classifier.entailment_id ,0 ) UpperCAmelCase_ : Tuple = {"""ENTAIL""": 0, """NON-ENTAIL""": 1} self.assertEqual(zero_shot_classifier.entailment_id ,0 ) UpperCAmelCase_ : str = {"""ENTAIL""": 2, """NEUTRAL""": 1, """CONTR""": 0} self.assertEqual(zero_shot_classifier.entailment_id ,2 ) UpperCAmelCase_ : Optional[Any] = original_labelaid self.assertEqual(lowerCamelCase_ ,zero_shot_classifier.entailment_id ) @require_torch def A__ ( self: Any ) -> List[Any]: UpperCAmelCase_ : Optional[Any] = pipeline( """zero-shot-classification""" ,model="""sshleifer/tiny-distilbert-base-cased-distilled-squad""" ,framework="""pt""" ,) # There was a regression in 4.10 for this # Adding a test so we don't make the mistake again. # https://github.com/huggingface/transformers/issues/13381#issuecomment-912343499 zero_shot_classifier( """Who are you voting for in 2020?""" * 100 ,candidate_labels=["""politics""", """public health""", """science"""] ) @require_torch def A__ ( self: List[str] ) -> Dict: UpperCAmelCase_ : Optional[int] = pipeline( """zero-shot-classification""" ,model="""sshleifer/tiny-distilbert-base-cased-distilled-squad""" ,framework="""pt""" ,) UpperCAmelCase_ : Optional[Any] = zero_shot_classifier( """Who are you voting for in 2020?""" ,candidate_labels=["""politics""", """public health""", """science"""] ) self.assertEqual( nested_simplify(lowerCamelCase_ ) ,{ """sequence""": """Who are you voting for in 2020?""", """labels""": ["""science""", """public health""", """politics"""], """scores""": [0.3_3_3, 0.3_3_3, 0.3_3_3], } ,) @require_tf def A__ ( self: str ) -> Dict: UpperCAmelCase_ : Optional[int] = pipeline( """zero-shot-classification""" ,model="""sshleifer/tiny-distilbert-base-cased-distilled-squad""" ,framework="""tf""" ,) UpperCAmelCase_ : Dict = zero_shot_classifier( """Who are you voting for in 2020?""" ,candidate_labels=["""politics""", """public health""", """science"""] ) self.assertEqual( nested_simplify(lowerCamelCase_ ) ,{ """sequence""": """Who are you voting for in 2020?""", """labels""": ["""science""", """public health""", """politics"""], """scores""": [0.3_3_3, 0.3_3_3, 0.3_3_3], } ,) @slow @require_torch def A__ ( self: List[str] ) -> Dict: UpperCAmelCase_ : Optional[Any] = pipeline("""zero-shot-classification""" ,model="""roberta-large-mnli""" ,framework="""pt""" ) UpperCAmelCase_ : List[str] = zero_shot_classifier( """Who are you voting for in 2020?""" ,candidate_labels=["""politics""", """public health""", """science"""] ) self.assertEqual( nested_simplify(lowerCamelCase_ ) ,{ """sequence""": """Who are you voting for in 2020?""", """labels""": ["""politics""", """public health""", """science"""], """scores""": [0.9_7_6, 0.0_1_5, 0.0_0_9], } ,) UpperCAmelCase_ : Any = zero_shot_classifier( """The dominant sequence transduction models are based on complex recurrent or convolutional neural networks""" """ in an encoder-decoder configuration. The best performing models also connect the encoder and decoder""" """ through an attention mechanism. We propose a new simple network architecture, the Transformer, based""" """ solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two""" """ machine translation tasks show these models to be superior in quality while being more parallelizable""" """ and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014""" """ English-to-German translation task, improving over the existing best results, including ensembles by""" """ over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new""" """ single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small""" """ fraction of the training costs of the best models from the literature. We show that the Transformer""" """ generalizes well to other tasks by applying it successfully to English constituency parsing both with""" """ large and limited training data.""" ,candidate_labels=["""machine learning""", """statistics""", """translation""", """vision"""] ,multi_label=lowerCamelCase_ ,) self.assertEqual( nested_simplify(lowerCamelCase_ ) ,{ """sequence""": ( """The dominant sequence transduction models are based on complex recurrent or convolutional neural""" """ networks in an encoder-decoder configuration. The best performing models also connect the""" """ encoder and decoder through an attention mechanism. We propose a new simple network""" """ architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence""" """ and convolutions entirely. Experiments on two machine translation tasks show these models to be""" """ superior in quality while being more parallelizable and requiring significantly less time to""" """ train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,""" """ improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014""" """ English-to-French translation task, our model establishes a new single-model state-of-the-art""" """ BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training""" """ costs of the best models from the literature. We show that the Transformer generalizes well to""" """ other tasks by applying it successfully to English constituency parsing both with large and""" """ limited training data.""" ), """labels""": ["""translation""", """machine learning""", """vision""", """statistics"""], """scores""": [0.8_1_7, 0.7_1_3, 0.0_1_8, 0.0_1_8], } ,) @slow @require_tf def A__ ( self: int ) -> List[str]: UpperCAmelCase_ : List[Any] = pipeline("""zero-shot-classification""" ,model="""roberta-large-mnli""" ,framework="""tf""" ) UpperCAmelCase_ : Union[str, Any] = zero_shot_classifier( """Who are you voting for in 2020?""" ,candidate_labels=["""politics""", """public health""", """science"""] ) self.assertEqual( nested_simplify(lowerCamelCase_ ) ,{ """sequence""": """Who are you voting for in 2020?""", """labels""": ["""politics""", """public health""", """science"""], """scores""": [0.9_7_6, 0.0_1_5, 0.0_0_9], } ,) UpperCAmelCase_ : Optional[int] = zero_shot_classifier( """The dominant sequence transduction models are based on complex recurrent or convolutional neural networks""" """ in an encoder-decoder configuration. The best performing models also connect the encoder and decoder""" """ through an attention mechanism. We propose a new simple network architecture, the Transformer, based""" """ solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two""" """ machine translation tasks show these models to be superior in quality while being more parallelizable""" """ and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014""" """ English-to-German translation task, improving over the existing best results, including ensembles by""" """ over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new""" """ single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small""" """ fraction of the training costs of the best models from the literature. We show that the Transformer""" """ generalizes well to other tasks by applying it successfully to English constituency parsing both with""" """ large and limited training data.""" ,candidate_labels=["""machine learning""", """statistics""", """translation""", """vision"""] ,multi_label=lowerCamelCase_ ,) self.assertEqual( nested_simplify(lowerCamelCase_ ) ,{ """sequence""": ( """The dominant sequence transduction models are based on complex recurrent or convolutional neural""" """ networks in an encoder-decoder configuration. The best performing models also connect the""" """ encoder and decoder through an attention mechanism. We propose a new simple network""" """ architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence""" """ and convolutions entirely. Experiments on two machine translation tasks show these models to be""" """ superior in quality while being more parallelizable and requiring significantly less time to""" """ train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,""" """ improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014""" """ English-to-French translation task, our model establishes a new single-model state-of-the-art""" """ BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training""" """ costs of the best models from the literature. We show that the Transformer generalizes well to""" """ other tasks by applying it successfully to English constituency parsing both with large and""" """ limited training data.""" ), """labels""": ["""translation""", """machine learning""", """vision""", """statistics"""], """scores""": [0.8_1_7, 0.7_1_3, 0.0_1_8, 0.0_1_8], } ,)
345
import argparse from argparse import Namespace import torch from torch import nn from transformers import XGLMConfig, XGLMForCausalLM def lowerCamelCase_ ( _a : List[Any] ): '''simple docstring''' UpperCAmelCase_ : Optional[int] = [ """decoder.version""", """decoder.output_projection.weight""", """_float_tensor""", """decoder.embed_positions._float_tensor""", ] for k in ignore_keys: state_dict.pop(_a , _a ) def lowerCamelCase_ ( _a : Any ): '''simple docstring''' UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = emb.weight.shape UpperCAmelCase_ : Tuple = nn.Linear(_a , _a , bias=_a ) UpperCAmelCase_ : List[Any] = emb.weight.data return lin_layer def lowerCamelCase_ ( _a : Dict ): '''simple docstring''' UpperCAmelCase_ : int = torch.load(_a , map_location="""cpu""" ) UpperCAmelCase_ : Dict = Namespace(**checkpoint["""cfg"""]["""model"""] ) UpperCAmelCase_ : Optional[int] = checkpoint["""model"""] remove_ignore_keys_(_a ) UpperCAmelCase_ : str = state_dict["""decoder.embed_tokens.weight"""].shape[0] UpperCAmelCase_ : List[str] = {key.replace("""decoder""" , """model""" ): val for key, val in state_dict.items()} UpperCAmelCase_ : int = XGLMConfig( vocab_size=_a , max_position_embeddings=args.max_target_positions , num_layers=args.decoder_layers , attention_heads=args.decoder_attention_heads , ffn_dim=args.decoder_ffn_embed_dim , d_model=args.decoder_embed_dim , layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="""gelu""" , scale_embedding=not args.no_scale_embedding , tie_word_embeddings=args.share_decoder_input_output_embed , ) UpperCAmelCase_ : List[str] = XGLMForCausalLM(_a ) UpperCAmelCase_ : Tuple = model.load_state_dict(_a , strict=_a ) print(_a ) UpperCAmelCase_ : Optional[Any] = make_linear_from_emb(model.model.embed_tokens ) return model if __name__ == "__main__": UpperCamelCase_ = argparse.ArgumentParser() # Required parameters parser.add_argument('''fairseq_path''', type=str, help='''path to a model.pt on local filesystem.''') parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') UpperCamelCase_ = parser.parse_args() UpperCamelCase_ = convert_fairseq_xglm_checkpoint_from_disk(args.fairseq_path) model.save_pretrained(args.pytorch_dump_folder_path)
345
1
import argparse import datetime def lowerCamelCase_ ( _a : str ): '''simple docstring''' UpperCAmelCase_ : Optional[int] = { """0""": """Sunday""", """1""": """Monday""", """2""": """Tuesday""", """3""": """Wednesday""", """4""": """Thursday""", """5""": """Friday""", """6""": """Saturday""", } UpperCAmelCase_ : Tuple = {0: 1, 1: 2, 2: 3, 3: 4, 4: 5, 5: 6, 6: 0} # Validate if not 0 < len(_a ) < 11: raise ValueError("""Must be 10 characters long""" ) # Get month UpperCAmelCase_ : int = int(date_input[0] + date_input[1] ) # Validate if not 0 < m < 13: raise ValueError("""Month must be between 1 - 12""" ) UpperCAmelCase_ : str = date_input[2] # Validate if sep_a not in ["-", "/"]: raise ValueError("""Date separator must be '-' or '/'""" ) # Get day UpperCAmelCase_ : int = int(date_input[3] + date_input[4] ) # Validate if not 0 < d < 32: raise ValueError("""Date must be between 1 - 31""" ) # Get second separator UpperCAmelCase_ : str = date_input[5] # Validate if sep_a not in ["-", "/"]: raise ValueError("""Date separator must be '-' or '/'""" ) # Get year UpperCAmelCase_ : int = int(date_input[6] + date_input[7] + date_input[8] + date_input[9] ) # Arbitrary year range if not 45 < y < 8500: raise ValueError( """Year out of range. There has to be some sort of limit...right?""" ) # Get datetime obj for validation UpperCAmelCase_ : int = datetime.date(int(_a ) , int(_a ) , int(_a ) ) # Start math if m <= 2: UpperCAmelCase_ : str = y - 1 UpperCAmelCase_ : Optional[int] = m + 12 # maths var UpperCAmelCase_ : int = int(str(_a )[:2] ) UpperCAmelCase_ : int = int(str(_a )[2:] ) UpperCAmelCase_ : int = int(2.6 * m - 5.3_9 ) UpperCAmelCase_ : int = int(c / 4 ) UpperCAmelCase_ : int = int(k / 4 ) UpperCAmelCase_ : int = int(d + k ) UpperCAmelCase_ : int = int(t + u + v + x ) UpperCAmelCase_ : int = int(z - (2 * c) ) UpperCAmelCase_ : int = round(w % 7 ) # End math # Validate math if f != convert_datetime_days[dt_ck.weekday()]: raise AssertionError("""The date was evaluated incorrectly. Contact developer.""" ) # Response UpperCAmelCase_ : str = F'''Your date {date_input}, is a {days[str(_a )]}!''' return response if __name__ == "__main__": import doctest doctest.testmod() UpperCamelCase_ = argparse.ArgumentParser( description=( '''Find out what day of the week nearly any date is or was. Enter ''' '''date as a string in the mm-dd-yyyy or mm/dd/yyyy format''' ) ) parser.add_argument( '''date_input''', type=str, help='''Date as a string (mm-dd-yyyy or mm/dd/yyyy)''' ) UpperCamelCase_ = parser.parse_args() zeller(args.date_input)
345
import collections import inspect import unittest from transformers import FocalNetConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( FocalNetBackbone, FocalNetForImageClassification, FocalNetForMaskedImageModeling, FocalNetModel, ) from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class _snake_case : '''simple docstring''' def __init__( self: List[Any] ,lowerCamelCase_: Tuple ,lowerCamelCase_: Union[str, Any]=13 ,lowerCamelCase_: Optional[int]=32 ,lowerCamelCase_: List[str]=2 ,lowerCamelCase_: Optional[Any]=3 ,lowerCamelCase_: int=16 ,lowerCamelCase_: Optional[Any]=[32, 64, 128] ,lowerCamelCase_: Optional[int]=[1, 2, 1] ,lowerCamelCase_: Union[str, Any]=[2, 2, 4] ,lowerCamelCase_: int=2 ,lowerCamelCase_: List[str]=2.0 ,lowerCamelCase_: List[Any]=True ,lowerCamelCase_: List[str]=0.0 ,lowerCamelCase_: List[str]=0.0 ,lowerCamelCase_: Optional[int]=0.1 ,lowerCamelCase_: Optional[int]="gelu" ,lowerCamelCase_: Any=False ,lowerCamelCase_: Dict=True ,lowerCamelCase_: Union[str, Any]=0.0_2 ,lowerCamelCase_: int=1e-5 ,lowerCamelCase_: int=True ,lowerCamelCase_: Tuple=None ,lowerCamelCase_: str=True ,lowerCamelCase_: Dict=10 ,lowerCamelCase_: str=8 ,lowerCamelCase_: Union[str, Any]=["stage1", "stage2"] ,lowerCamelCase_: Optional[Any]=[1, 2] ,) -> str: UpperCAmelCase_ : List[Any] = parent UpperCAmelCase_ : Tuple = batch_size UpperCAmelCase_ : Any = image_size UpperCAmelCase_ : str = patch_size UpperCAmelCase_ : List[str] = num_channels UpperCAmelCase_ : Dict = embed_dim UpperCAmelCase_ : Dict = hidden_sizes UpperCAmelCase_ : str = depths UpperCAmelCase_ : int = num_heads UpperCAmelCase_ : List[Any] = window_size UpperCAmelCase_ : Union[str, Any] = mlp_ratio UpperCAmelCase_ : int = qkv_bias UpperCAmelCase_ : List[str] = hidden_dropout_prob UpperCAmelCase_ : Union[str, Any] = attention_probs_dropout_prob UpperCAmelCase_ : Optional[int] = drop_path_rate UpperCAmelCase_ : Union[str, Any] = hidden_act UpperCAmelCase_ : List[Any] = use_absolute_embeddings UpperCAmelCase_ : List[Any] = patch_norm UpperCAmelCase_ : int = layer_norm_eps UpperCAmelCase_ : int = initializer_range UpperCAmelCase_ : Optional[Any] = is_training UpperCAmelCase_ : Optional[Any] = scope UpperCAmelCase_ : Union[str, Any] = use_labels UpperCAmelCase_ : Union[str, Any] = type_sequence_label_size UpperCAmelCase_ : Optional[int] = encoder_stride UpperCAmelCase_ : Optional[int] = out_features UpperCAmelCase_ : Optional[int] = out_indices def A__ ( self: Union[str, Any] ) -> List[Any]: UpperCAmelCase_ : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) UpperCAmelCase_ : int = None if self.use_labels: UpperCAmelCase_ : str = ids_tensor([self.batch_size] ,self.type_sequence_label_size ) UpperCAmelCase_ : Any = self.get_config() return config, pixel_values, labels def A__ ( self: List[Any] ) -> Tuple: return FocalNetConfig( image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,embed_dim=self.embed_dim ,hidden_sizes=self.hidden_sizes ,depths=self.depths ,num_heads=self.num_heads ,window_size=self.window_size ,mlp_ratio=self.mlp_ratio ,qkv_bias=self.qkv_bias ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,drop_path_rate=self.drop_path_rate ,hidden_act=self.hidden_act ,use_absolute_embeddings=self.use_absolute_embeddings ,path_norm=self.patch_norm ,layer_norm_eps=self.layer_norm_eps ,initializer_range=self.initializer_range ,encoder_stride=self.encoder_stride ,out_features=self.out_features ,out_indices=self.out_indices ,) def A__ ( self: Dict ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: str ,lowerCamelCase_: str ) -> List[str]: UpperCAmelCase_ : Optional[int] = FocalNetModel(config=lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() UpperCAmelCase_ : List[Any] = model(lowerCamelCase_ ) UpperCAmelCase_ : Dict = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1)) UpperCAmelCase_ : Optional[Any] = int(config.embed_dim * 2 ** (len(config.depths ) - 1) ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, expected_seq_len, expected_dim) ) def A__ ( self: Union[str, Any] ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: Any ,lowerCamelCase_: Optional[int] ) -> List[str]: UpperCAmelCase_ : List[str] = FocalNetBackbone(config=lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() UpperCAmelCase_ : Tuple = model(lowerCamelCase_ ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) ,len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) ,[self.batch_size, self.image_size, 8, 8] ) # verify channels self.parent.assertEqual(len(model.channels ) ,len(config.out_features ) ) self.parent.assertListEqual(model.channels ,config.hidden_sizes[:-1] ) # verify backbone works with out_features=None UpperCAmelCase_ : Union[str, Any] = None UpperCAmelCase_ : List[str] = FocalNetBackbone(config=lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() UpperCAmelCase_ : Tuple = model(lowerCamelCase_ ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) ,1 ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) ,[self.batch_size, self.image_size * 2, 4, 4] ) # verify channels self.parent.assertEqual(len(model.channels ) ,1 ) self.parent.assertListEqual(model.channels ,[config.hidden_sizes[-1]] ) def A__ ( self: Optional[int] ,lowerCamelCase_: List[str] ,lowerCamelCase_: Tuple ,lowerCamelCase_: Union[str, Any] ) -> List[Any]: UpperCAmelCase_ : Any = FocalNetForMaskedImageModeling(config=lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() UpperCAmelCase_ : Optional[Any] = model(lowerCamelCase_ ) self.parent.assertEqual( result.reconstruction.shape ,(self.batch_size, self.num_channels, self.image_size, self.image_size) ) # test greyscale images UpperCAmelCase_ : int = 1 UpperCAmelCase_ : List[str] = FocalNetForMaskedImageModeling(lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() UpperCAmelCase_ : int = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) UpperCAmelCase_ : Optional[int] = model(lowerCamelCase_ ) self.parent.assertEqual(result.reconstruction.shape ,(self.batch_size, 1, self.image_size, self.image_size) ) def A__ ( self: List[str] ,lowerCamelCase_: List[str] ,lowerCamelCase_: List[str] ,lowerCamelCase_: Any ) -> int: UpperCAmelCase_ : List[Any] = self.type_sequence_label_size UpperCAmelCase_ : int = FocalNetForImageClassification(lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() UpperCAmelCase_ : Union[str, Any] = model(lowerCamelCase_ ,labels=lowerCamelCase_ ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) ) # test greyscale images UpperCAmelCase_ : List[Any] = 1 UpperCAmelCase_ : Optional[int] = FocalNetForImageClassification(lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() UpperCAmelCase_ : List[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) UpperCAmelCase_ : List[str] = model(lowerCamelCase_ ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) ) def A__ ( self: Union[str, Any] ) -> Optional[int]: UpperCAmelCase_ : List[Any] = self.prepare_config_and_inputs() UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = config_and_inputs UpperCAmelCase_ : int = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class _snake_case ( __snake_case , __snake_case , unittest.TestCase ): '''simple docstring''' A__ : List[Any] = ( ( FocalNetModel, FocalNetForImageClassification, FocalNetForMaskedImageModeling, FocalNetBackbone, ) if is_torch_available() else () ) A__ : Union[str, Any] = ( {"feature-extraction": FocalNetModel, "image-classification": FocalNetForImageClassification} if is_torch_available() else {} ) A__ : Optional[Any] = False A__ : Any = False A__ : List[str] = False A__ : Any = False A__ : Any = False def A__ ( self: List[str] ) -> Tuple: UpperCAmelCase_ : Dict = FocalNetModelTester(self ) UpperCAmelCase_ : int = ConfigTester(self ,config_class=lowerCamelCase_ ,embed_dim=37 ,has_text_modality=lowerCamelCase_ ) def A__ ( self: List[str] ) -> int: self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def A__ ( self: List[str] ) -> Union[str, Any]: return def A__ ( self: str ) -> List[str]: UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCamelCase_ ) def A__ ( self: Tuple ) -> int: UpperCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*lowerCamelCase_ ) def A__ ( self: Dict ) -> List[str]: UpperCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*lowerCamelCase_ ) def A__ ( self: int ) -> int: UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*lowerCamelCase_ ) @unittest.skip(reason="""FocalNet does not use inputs_embeds""" ) def A__ ( self: int ) -> Dict: pass @unittest.skip(reason="""FocalNet does not use feedforward chunking""" ) def A__ ( self: Optional[Any] ) -> Optional[Any]: pass def A__ ( self: Optional[Any] ) -> List[str]: UpperCAmelCase_ , UpperCAmelCase_ : str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes[:-1]: UpperCAmelCase_ : Optional[Any] = model_class(lowerCamelCase_ ) self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) ) UpperCAmelCase_ : List[Any] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(lowerCamelCase_ ,nn.Linear ) ) def A__ ( self: str ) -> Optional[int]: UpperCAmelCase_ , UpperCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes[:-1]: UpperCAmelCase_ : str = model_class(lowerCamelCase_ ) UpperCAmelCase_ : Dict = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCAmelCase_ : Any = [*signature.parameters.keys()] UpperCAmelCase_ : List[str] = ["""pixel_values"""] self.assertListEqual(arg_names[:1] ,lowerCamelCase_ ) def A__ ( self: Dict ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: List[str] ,lowerCamelCase_: Dict ,lowerCamelCase_: Any ) -> List[str]: UpperCAmelCase_ : Tuple = model_class(lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() with torch.no_grad(): UpperCAmelCase_ : Optional[int] = model(**self._prepare_for_class(lowerCamelCase_ ,lowerCamelCase_ ) ) UpperCAmelCase_ : Any = outputs.hidden_states UpperCAmelCase_ : List[Any] = getattr( self.model_tester ,"""expected_num_hidden_layers""" ,len(self.model_tester.depths ) + 1 ) self.assertEqual(len(lowerCamelCase_ ) ,lowerCamelCase_ ) # FocalNet has a different seq_length UpperCAmelCase_ : int = ( config.patch_size if isinstance(config.patch_size ,collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) UpperCAmelCase_ : Optional[int] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.assertListEqual( list(hidden_states[0].shape[-2:] ) ,[num_patches, self.model_tester.embed_dim] ,) UpperCAmelCase_ : Union[str, Any] = outputs.reshaped_hidden_states self.assertEqual(len(lowerCamelCase_ ) ,lowerCamelCase_ ) UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Tuple = reshaped_hidden_states[0].shape UpperCAmelCase_ : List[Any] = ( reshaped_hidden_states[0].view(lowerCamelCase_ ,lowerCamelCase_ ,height * width ).permute(0 ,2 ,1 ) ) self.assertListEqual( list(reshaped_hidden_states.shape[-2:] ) ,[num_patches, self.model_tester.embed_dim] ,) def A__ ( self: Any ) -> List[Any]: UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase_ : Optional[int] = ( self.model_tester.image_size if isinstance(self.model_tester.image_size ,collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) for model_class in self.all_model_classes[:-1]: UpperCAmelCase_ : str = True self.check_hidden_states_output(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] UpperCAmelCase_ : Union[str, Any] = True self.check_hidden_states_output(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ) def A__ ( self: List[str] ) -> str: UpperCAmelCase_ , UpperCAmelCase_ : int = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase_ : Tuple = 3 UpperCAmelCase_ : Tuple = ( self.model_tester.image_size if isinstance(self.model_tester.image_size ,collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) UpperCAmelCase_ : Union[str, Any] = ( config.patch_size if isinstance(config.patch_size ,collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) UpperCAmelCase_ : Union[str, Any] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0]) UpperCAmelCase_ : Any = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1]) for model_class in self.all_model_classes[:-1]: UpperCAmelCase_ : Optional[Any] = True self.check_hidden_states_output(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,(padded_height, padded_width) ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] UpperCAmelCase_ : Optional[int] = True self.check_hidden_states_output(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,(padded_height, padded_width) ) @slow def A__ ( self: Optional[int] ) -> Optional[Any]: for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase_ : Tuple = FocalNetModel.from_pretrained(lowerCamelCase_ ) self.assertIsNotNone(lowerCamelCase_ ) def A__ ( self: Optional[Any] ) -> Optional[int]: UpperCAmelCase_ , UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase_ : Optional[int] = _config_zero_init(lowerCamelCase_ ) for model_class in self.all_model_classes: UpperCAmelCase_ : List[Any] = model_class(config=lowerCamelCase_ ) for name, param in model.named_parameters(): if "embeddings" not in name and param.requires_grad: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item() ,[0.0, 1.0] ,msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' ,) @require_vision @require_torch class _snake_case ( unittest.TestCase ): '''simple docstring''' @cached_property def A__ ( self: Optional[int] ) -> str: # TODO update organization return AutoImageProcessor.from_pretrained("""microsoft/focalnet-tiny""" ) if is_vision_available() else None @slow def A__ ( self: List[Any] ) -> List[str]: UpperCAmelCase_ : Optional[int] = FocalNetForImageClassification.from_pretrained("""microsoft/focalnet-tiny""" ).to(lowerCamelCase_ ) UpperCAmelCase_ : Tuple = self.default_image_processor UpperCAmelCase_ : Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) UpperCAmelCase_ : Dict = image_processor(images=lowerCamelCase_ ,return_tensors="""pt""" ).to(lowerCamelCase_ ) # forward pass with torch.no_grad(): UpperCAmelCase_ : Dict = model(**lowerCamelCase_ ) # verify the logits UpperCAmelCase_ : str = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape ,lowerCamelCase_ ) UpperCAmelCase_ : List[Any] = torch.tensor([0.2_1_6_6, -0.4_3_6_8, 0.2_1_9_1] ).to(lowerCamelCase_ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] ,lowerCamelCase_ ,atol=1e-4 ) ) self.assertTrue(outputs.logits.argmax(dim=-1 ).item() ,281 ) @require_torch class _snake_case ( __snake_case , unittest.TestCase ): '''simple docstring''' A__ : List[Any] = (FocalNetBackbone,) if is_torch_available() else () A__ : int = FocalNetConfig A__ : List[str] = False def A__ ( self: Any ) -> Optional[int]: UpperCAmelCase_ : str = FocalNetModelTester(self )
345
1
import argparse import torch from transformers import ( EncodecConfig, EncodecFeatureExtractor, EncodecModel, logging, ) # checkpoints downloaded from: # https://dl.fbaipublicfiles.com/encodec/v0/encodec_24khz-d7cc33bc.th # https://huggingface.co/facebook/musicgen-small/resolve/main/compression_state_dict.bin # https://dl.fbaipublicfiles.com/encodec/v0/encodec_48khz-7e698e3e.th logging.set_verbosity_info() UpperCamelCase_ = logging.get_logger('''transformers.models.encodec''') UpperCamelCase_ = { '''quantizer.vq.layers.*._codebook.inited''': '''quantizer.layers.*.codebook.inited''', '''quantizer.vq.layers.*._codebook.cluster_size''': '''quantizer.layers.*.codebook.cluster_size''', '''quantizer.vq.layers.*._codebook.embed''': '''quantizer.layers.*.codebook.embed''', '''quantizer.vq.layers.*._codebook.embed_avg''': '''quantizer.layers.*.codebook.embed_avg''', } UpperCamelCase_ = { '''encoder.model.0.conv.conv''': '''encoder.layers.0.conv''', '''encoder.model.1.block.1.conv.conv''': '''encoder.layers.1.block.1.conv''', '''encoder.model.1.block.3.conv.conv''': '''encoder.layers.1.block.3.conv''', '''encoder.model.1.shortcut.conv.conv''': '''encoder.layers.1.shortcut.conv''', '''encoder.model.3.conv.conv''': '''encoder.layers.3.conv''', '''encoder.model.4.block.1.conv.conv''': '''encoder.layers.4.block.1.conv''', '''encoder.model.4.block.3.conv.conv''': '''encoder.layers.4.block.3.conv''', '''encoder.model.4.shortcut.conv.conv''': '''encoder.layers.4.shortcut.conv''', '''encoder.model.6.conv.conv''': '''encoder.layers.6.conv''', '''encoder.model.7.block.1.conv.conv''': '''encoder.layers.7.block.1.conv''', '''encoder.model.7.block.3.conv.conv''': '''encoder.layers.7.block.3.conv''', '''encoder.model.7.shortcut.conv.conv''': '''encoder.layers.7.shortcut.conv''', '''encoder.model.9.conv.conv''': '''encoder.layers.9.conv''', '''encoder.model.10.block.1.conv.conv''': '''encoder.layers.10.block.1.conv''', '''encoder.model.10.block.3.conv.conv''': '''encoder.layers.10.block.3.conv''', '''encoder.model.10.shortcut.conv.conv''': '''encoder.layers.10.shortcut.conv''', '''encoder.model.12.conv.conv''': '''encoder.layers.12.conv''', '''encoder.model.13.lstm''': '''encoder.layers.13.lstm''', '''encoder.model.15.conv.conv''': '''encoder.layers.15.conv''', } UpperCamelCase_ = { '''encoder.model.0.conv.norm''': '''encoder.layers.0.norm''', '''encoder.model.1.block.1.conv.norm''': '''encoder.layers.1.block.1.norm''', '''encoder.model.1.block.3.conv.norm''': '''encoder.layers.1.block.3.norm''', '''encoder.model.1.shortcut.conv.norm''': '''encoder.layers.1.shortcut.norm''', '''encoder.model.3.conv.norm''': '''encoder.layers.3.norm''', '''encoder.model.4.block.1.conv.norm''': '''encoder.layers.4.block.1.norm''', '''encoder.model.4.block.3.conv.norm''': '''encoder.layers.4.block.3.norm''', '''encoder.model.4.shortcut.conv.norm''': '''encoder.layers.4.shortcut.norm''', '''encoder.model.6.conv.norm''': '''encoder.layers.6.norm''', '''encoder.model.7.block.1.conv.norm''': '''encoder.layers.7.block.1.norm''', '''encoder.model.7.block.3.conv.norm''': '''encoder.layers.7.block.3.norm''', '''encoder.model.7.shortcut.conv.norm''': '''encoder.layers.7.shortcut.norm''', '''encoder.model.9.conv.norm''': '''encoder.layers.9.norm''', '''encoder.model.10.block.1.conv.norm''': '''encoder.layers.10.block.1.norm''', '''encoder.model.10.block.3.conv.norm''': '''encoder.layers.10.block.3.norm''', '''encoder.model.10.shortcut.conv.norm''': '''encoder.layers.10.shortcut.norm''', '''encoder.model.12.conv.norm''': '''encoder.layers.12.norm''', '''encoder.model.15.conv.norm''': '''encoder.layers.15.norm''', } UpperCamelCase_ = { '''decoder.model.0.conv.conv''': '''decoder.layers.0.conv''', '''decoder.model.1.lstm''': '''decoder.layers.1.lstm''', '''decoder.model.3.convtr.convtr''': '''decoder.layers.3.conv''', '''decoder.model.4.block.1.conv.conv''': '''decoder.layers.4.block.1.conv''', '''decoder.model.4.block.3.conv.conv''': '''decoder.layers.4.block.3.conv''', '''decoder.model.4.shortcut.conv.conv''': '''decoder.layers.4.shortcut.conv''', '''decoder.model.6.convtr.convtr''': '''decoder.layers.6.conv''', '''decoder.model.7.block.1.conv.conv''': '''decoder.layers.7.block.1.conv''', '''decoder.model.7.block.3.conv.conv''': '''decoder.layers.7.block.3.conv''', '''decoder.model.7.shortcut.conv.conv''': '''decoder.layers.7.shortcut.conv''', '''decoder.model.9.convtr.convtr''': '''decoder.layers.9.conv''', '''decoder.model.10.block.1.conv.conv''': '''decoder.layers.10.block.1.conv''', '''decoder.model.10.block.3.conv.conv''': '''decoder.layers.10.block.3.conv''', '''decoder.model.10.shortcut.conv.conv''': '''decoder.layers.10.shortcut.conv''', '''decoder.model.12.convtr.convtr''': '''decoder.layers.12.conv''', '''decoder.model.13.block.1.conv.conv''': '''decoder.layers.13.block.1.conv''', '''decoder.model.13.block.3.conv.conv''': '''decoder.layers.13.block.3.conv''', '''decoder.model.13.shortcut.conv.conv''': '''decoder.layers.13.shortcut.conv''', '''decoder.model.15.conv.conv''': '''decoder.layers.15.conv''', } UpperCamelCase_ = { '''decoder.model.0.conv.norm''': '''decoder.layers.0.norm''', '''decoder.model.3.convtr.norm''': '''decoder.layers.3.norm''', '''decoder.model.4.block.1.conv.norm''': '''decoder.layers.4.block.1.norm''', '''decoder.model.4.block.3.conv.norm''': '''decoder.layers.4.block.3.norm''', '''decoder.model.4.shortcut.conv.norm''': '''decoder.layers.4.shortcut.norm''', '''decoder.model.6.convtr.norm''': '''decoder.layers.6.norm''', '''decoder.model.7.block.1.conv.norm''': '''decoder.layers.7.block.1.norm''', '''decoder.model.7.block.3.conv.norm''': '''decoder.layers.7.block.3.norm''', '''decoder.model.7.shortcut.conv.norm''': '''decoder.layers.7.shortcut.norm''', '''decoder.model.9.convtr.norm''': '''decoder.layers.9.norm''', '''decoder.model.10.block.1.conv.norm''': '''decoder.layers.10.block.1.norm''', '''decoder.model.10.block.3.conv.norm''': '''decoder.layers.10.block.3.norm''', '''decoder.model.10.shortcut.conv.norm''': '''decoder.layers.10.shortcut.norm''', '''decoder.model.12.convtr.norm''': '''decoder.layers.12.norm''', '''decoder.model.13.block.1.conv.norm''': '''decoder.layers.13.block.1.norm''', '''decoder.model.13.block.3.conv.norm''': '''decoder.layers.13.block.3.norm''', '''decoder.model.13.shortcut.conv.norm''': '''decoder.layers.13.shortcut.norm''', '''decoder.model.15.conv.norm''': '''decoder.layers.15.norm''', } UpperCamelCase_ = { **MAPPING_QUANTIZER, **MAPPING_ENCODER, **MAPPING_DECODER, } UpperCamelCase_ = { **MAPPING_QUANTIZER, **MAPPING_ENCODER, **MAPPING_ENCODER_48K, **MAPPING_DECODER, **MAPPING_DECODER_48K, } UpperCamelCase_ = [] UpperCamelCase_ = [] def lowerCamelCase_ ( _a : Any , _a : Any , _a : Tuple , _a : List[Any] , _a : str ): '''simple docstring''' for attribute in key.split(""".""" ): UpperCAmelCase_ : Any = getattr(_a , _a ) if weight_type is not None: UpperCAmelCase_ : str = getattr(_a , _a ).shape else: UpperCAmelCase_ : Optional[int] = hf_pointer.shape if hf_shape != value.shape: raise ValueError( F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be''' F''' {value.shape} for {full_name}''' ) if weight_type == "weight": UpperCAmelCase_ : List[Any] = value elif weight_type == "weight_g": UpperCAmelCase_ : Tuple = value elif weight_type == "weight_v": UpperCAmelCase_ : str = value elif weight_type == "bias": UpperCAmelCase_ : str = value elif weight_type == "running_mean": UpperCAmelCase_ : Optional[Any] = value elif weight_type == "running_var": UpperCAmelCase_ : str = value elif weight_type == "num_batches_tracked": UpperCAmelCase_ : Optional[Any] = value elif weight_type == "weight_ih_l0": UpperCAmelCase_ : Dict = value elif weight_type == "weight_hh_l0": UpperCAmelCase_ : Union[str, Any] = value elif weight_type == "bias_ih_l0": UpperCAmelCase_ : Any = value elif weight_type == "bias_hh_l0": UpperCAmelCase_ : Optional[int] = value elif weight_type == "weight_ih_l1": UpperCAmelCase_ : str = value elif weight_type == "weight_hh_l1": UpperCAmelCase_ : int = value elif weight_type == "bias_ih_l1": UpperCAmelCase_ : int = value elif weight_type == "bias_hh_l1": UpperCAmelCase_ : Optional[Any] = value else: UpperCAmelCase_ : Dict = value logger.info(F'''{key + ('.' + weight_type if weight_type is not None else '')} was initialized from {full_name}.''' ) def lowerCamelCase_ ( _a : Optional[int] , _a : int ): '''simple docstring''' for key in ignore_keys: if key.endswith(""".*""" ): if name.startswith(key[:-1] ): return True elif ".*." in key: UpperCAmelCase_ , UpperCAmelCase_ : str = key.split(""".*.""" ) if prefix in name and suffix in name: return True elif key in name: return True return False def lowerCamelCase_ ( _a : List[Any] , _a : List[Any] , _a : Union[str, Any] ): '''simple docstring''' UpperCAmelCase_ : List[str] = [] if model_name == "encodec_24khz" or "encodec_32khz": UpperCAmelCase_ : Tuple = MAPPING_24K elif model_name == "encodec_48khz": UpperCAmelCase_ : List[str] = MAPPING_48K else: raise ValueError(F'''Unsupported model: {model_name}''' ) for name, value in orig_dict.items(): if should_ignore(_a , _a ): logger.info(F'''{name} was ignored''' ) continue UpperCAmelCase_ : List[str] = False for key, mapped_key in MAPPING.items(): if "*" in key: UpperCAmelCase_ , UpperCAmelCase_ : List[str] = key.split(""".*.""" ) if prefix in name and suffix in name: UpperCAmelCase_ : Optional[Any] = suffix if key in name: # HACK otherwise .embed gets initialized with .embed_avg too if key.endswith("""embed""" ) and name.endswith("""embed_avg""" ): continue UpperCAmelCase_ : int = True if "*" in mapped_key: UpperCAmelCase_ : List[str] = name.split(_a )[0].split(""".""" )[-2] UpperCAmelCase_ : int = mapped_key.replace("""*""" , _a ) if "weight_g" in name: UpperCAmelCase_ : List[str] = """weight_g""" elif "weight_v" in name: UpperCAmelCase_ : Tuple = """weight_v""" elif "weight_ih_l0" in name: UpperCAmelCase_ : Optional[Any] = """weight_ih_l0""" elif "weight_hh_l0" in name: UpperCAmelCase_ : List[str] = """weight_hh_l0""" elif "bias_ih_l0" in name: UpperCAmelCase_ : Dict = """bias_ih_l0""" elif "bias_hh_l0" in name: UpperCAmelCase_ : Union[str, Any] = """bias_hh_l0""" elif "weight_ih_l1" in name: UpperCAmelCase_ : Union[str, Any] = """weight_ih_l1""" elif "weight_hh_l1" in name: UpperCAmelCase_ : str = """weight_hh_l1""" elif "bias_ih_l1" in name: UpperCAmelCase_ : Optional[int] = """bias_ih_l1""" elif "bias_hh_l1" in name: UpperCAmelCase_ : Any = """bias_hh_l1""" elif "bias" in name: UpperCAmelCase_ : Union[str, Any] = """bias""" elif "weight" in name: UpperCAmelCase_ : List[Any] = """weight""" elif "running_mean" in name: UpperCAmelCase_ : Optional[int] = """running_mean""" elif "running_var" in name: UpperCAmelCase_ : Any = """running_var""" elif "num_batches_tracked" in name: UpperCAmelCase_ : Tuple = """num_batches_tracked""" else: UpperCAmelCase_ : Tuple = None set_recursively(_a , _a , _a , _a , _a ) continue if not is_used: unused_weights.append(_a ) logger.warning(F'''Unused weights: {unused_weights}''' ) @torch.no_grad() def lowerCamelCase_ ( _a : Union[str, Any] , _a : Any , _a : Optional[Any] , _a : int=None , _a : Optional[int]=None , ): '''simple docstring''' if config_path is not None: UpperCAmelCase_ : List[Any] = EncodecConfig.from_pretrained(_a ) else: UpperCAmelCase_ : str = EncodecConfig() if model_name == "encodec_24khz": pass # config is already correct elif model_name == "encodec_32khz": UpperCAmelCase_ : Dict = [8, 5, 4, 4] UpperCAmelCase_ : Dict = [2.2] UpperCAmelCase_ : Optional[int] = 64 UpperCAmelCase_ : Tuple = 3_2000 UpperCAmelCase_ : List[str] = 2048 UpperCAmelCase_ : Optional[int] = False UpperCAmelCase_ : Dict = False UpperCAmelCase_ : Tuple = False elif model_name == "encodec_48khz": UpperCAmelCase_ : Optional[Any] = [8, 5, 4, 2] UpperCAmelCase_ : str = [3.0, 6.0, 1_2.0, 2_4.0] UpperCAmelCase_ : int = 4_8000 UpperCAmelCase_ : int = 2 UpperCAmelCase_ : Optional[Any] = False UpperCAmelCase_ : Optional[int] = """time_group_norm""" UpperCAmelCase_ : Dict = True UpperCAmelCase_ : int = 1.0 UpperCAmelCase_ : Any = 0.0_1 else: raise ValueError(F'''Unknown model name: {model_name}''' ) UpperCAmelCase_ : int = EncodecModel(_a ) UpperCAmelCase_ : Tuple = EncodecFeatureExtractor( feature_size=config.audio_channels , sampling_rate=config.sampling_rate , chunk_length_s=config.chunk_length_s , overlap=config.overlap , ) feature_extractor.save_pretrained(_a ) UpperCAmelCase_ : Tuple = torch.load(_a ) if "best_state" in original_checkpoint: # we might have a training state saved, in which case discard the yaml results and just retain the weights UpperCAmelCase_ : Optional[int] = original_checkpoint["""best_state"""] recursively_load_weights(_a , _a , _a ) model.save_pretrained(_a ) if repo_id: print("""Pushing to the hub...""" ) feature_extractor.push_to_hub(_a ) model.push_to_hub(_a ) if __name__ == "__main__": UpperCamelCase_ = argparse.ArgumentParser() parser.add_argument( '''--model''', default='''encodec_24khz''', type=str, help='''The model to convert. Should be one of \'encodec_24khz\', \'encodec_32khz\', \'encodec_48khz\'.''', ) parser.add_argument('''--checkpoint_path''', required=True, default=None, type=str, help='''Path to original checkpoint''') parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''') parser.add_argument( '''--pytorch_dump_folder_path''', required=True, default=None, type=str, help='''Path to the output PyTorch model.''' ) parser.add_argument( '''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.''' ) UpperCamelCase_ = parser.parse_args() convert_checkpoint( args.model, args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.push_to_hub, )
345
import collections import inspect import unittest from transformers import SwinvaConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class _snake_case : '''simple docstring''' def __init__( self: Tuple ,lowerCamelCase_: List[str] ,lowerCamelCase_: int=13 ,lowerCamelCase_: int=32 ,lowerCamelCase_: Optional[int]=2 ,lowerCamelCase_: Any=3 ,lowerCamelCase_: str=16 ,lowerCamelCase_: Optional[Any]=[1, 2, 1] ,lowerCamelCase_: Tuple=[2, 2, 4] ,lowerCamelCase_: int=2 ,lowerCamelCase_: List[Any]=2.0 ,lowerCamelCase_: str=True ,lowerCamelCase_: Optional[int]=0.0 ,lowerCamelCase_: List[Any]=0.0 ,lowerCamelCase_: List[str]=0.1 ,lowerCamelCase_: Tuple="gelu" ,lowerCamelCase_: Union[str, Any]=False ,lowerCamelCase_: Union[str, Any]=True ,lowerCamelCase_: Optional[int]=0.0_2 ,lowerCamelCase_: int=1e-5 ,lowerCamelCase_: Optional[int]=True ,lowerCamelCase_: Union[str, Any]=None ,lowerCamelCase_: Union[str, Any]=True ,lowerCamelCase_: Optional[int]=10 ,lowerCamelCase_: Tuple=8 ,) -> List[Any]: UpperCAmelCase_ : List[str] = parent UpperCAmelCase_ : int = batch_size UpperCAmelCase_ : int = image_size UpperCAmelCase_ : Union[str, Any] = patch_size UpperCAmelCase_ : Optional[Any] = num_channels UpperCAmelCase_ : int = embed_dim UpperCAmelCase_ : Union[str, Any] = depths UpperCAmelCase_ : List[str] = num_heads UpperCAmelCase_ : int = window_size UpperCAmelCase_ : List[str] = mlp_ratio UpperCAmelCase_ : Tuple = qkv_bias UpperCAmelCase_ : Tuple = hidden_dropout_prob UpperCAmelCase_ : str = attention_probs_dropout_prob UpperCAmelCase_ : Tuple = drop_path_rate UpperCAmelCase_ : List[str] = hidden_act UpperCAmelCase_ : int = use_absolute_embeddings UpperCAmelCase_ : Any = patch_norm UpperCAmelCase_ : Optional[int] = layer_norm_eps UpperCAmelCase_ : Tuple = initializer_range UpperCAmelCase_ : Optional[Any] = is_training UpperCAmelCase_ : Dict = scope UpperCAmelCase_ : int = use_labels UpperCAmelCase_ : Optional[Any] = type_sequence_label_size UpperCAmelCase_ : List[str] = encoder_stride def A__ ( self: Any ) -> int: UpperCAmelCase_ : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) UpperCAmelCase_ : List[Any] = None if self.use_labels: UpperCAmelCase_ : Optional[int] = ids_tensor([self.batch_size] ,self.type_sequence_label_size ) UpperCAmelCase_ : str = self.get_config() return config, pixel_values, labels def A__ ( self: List[Any] ) -> Union[str, Any]: return SwinvaConfig( image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,embed_dim=self.embed_dim ,depths=self.depths ,num_heads=self.num_heads ,window_size=self.window_size ,mlp_ratio=self.mlp_ratio ,qkv_bias=self.qkv_bias ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,drop_path_rate=self.drop_path_rate ,hidden_act=self.hidden_act ,use_absolute_embeddings=self.use_absolute_embeddings ,path_norm=self.patch_norm ,layer_norm_eps=self.layer_norm_eps ,initializer_range=self.initializer_range ,encoder_stride=self.encoder_stride ,) def A__ ( self: Dict ,lowerCamelCase_: Tuple ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: List[str] ) -> str: UpperCAmelCase_ : str = SwinvaModel(config=lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() UpperCAmelCase_ : Optional[Any] = model(lowerCamelCase_ ) UpperCAmelCase_ : List[Any] = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1)) UpperCAmelCase_ : List[Any] = int(config.embed_dim * 2 ** (len(config.depths ) - 1) ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, expected_seq_len, expected_dim) ) def A__ ( self: List[Any] ,lowerCamelCase_: List[Any] ,lowerCamelCase_: int ,lowerCamelCase_: int ) -> int: UpperCAmelCase_ : Any = SwinvaForMaskedImageModeling(config=lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() UpperCAmelCase_ : Union[str, Any] = model(lowerCamelCase_ ) self.parent.assertEqual( result.logits.shape ,(self.batch_size, self.num_channels, self.image_size, self.image_size) ) # test greyscale images UpperCAmelCase_ : str = 1 UpperCAmelCase_ : Optional[Any] = SwinvaForMaskedImageModeling(lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() UpperCAmelCase_ : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) UpperCAmelCase_ : int = model(lowerCamelCase_ ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, 1, self.image_size, self.image_size) ) def A__ ( self: int ,lowerCamelCase_: int ,lowerCamelCase_: List[Any] ,lowerCamelCase_: Optional[Any] ) -> int: UpperCAmelCase_ : Union[str, Any] = self.type_sequence_label_size UpperCAmelCase_ : int = SwinvaForImageClassification(lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() UpperCAmelCase_ : Optional[int] = model(lowerCamelCase_ ,labels=lowerCamelCase_ ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) ) def A__ ( self: str ) -> Union[str, Any]: UpperCAmelCase_ : Optional[Any] = self.prepare_config_and_inputs() UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = config_and_inputs UpperCAmelCase_ : Optional[int] = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class _snake_case ( __snake_case , __snake_case , unittest.TestCase ): '''simple docstring''' A__ : Tuple = ( (SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else () ) A__ : Optional[Any] = ( {"feature-extraction": SwinvaModel, "image-classification": SwinvaForImageClassification} if is_torch_available() else {} ) A__ : List[Any] = False A__ : Tuple = False A__ : int = False A__ : Union[str, Any] = False def A__ ( self: List[str] ) -> Optional[Any]: UpperCAmelCase_ : Any = SwinvaModelTester(self ) UpperCAmelCase_ : str = ConfigTester(self ,config_class=lowerCamelCase_ ,embed_dim=37 ) def A__ ( self: Optional[int] ) -> List[Any]: self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def A__ ( self: Any ) -> Dict: UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCamelCase_ ) @unittest.skip(reason="""Got `CUDA error: misaligned address` with PyTorch 2.0.0.""" ) def A__ ( self: int ) -> Dict: pass @unittest.skip(reason="""Swinv2 does not use inputs_embeds""" ) def A__ ( self: Tuple ) -> List[str]: pass def A__ ( self: str ) -> List[Any]: UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase_ : int = model_class(lowerCamelCase_ ) self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) ) UpperCAmelCase_ : Tuple = model.get_output_embeddings() self.assertTrue(x is None or isinstance(lowerCamelCase_ ,nn.Linear ) ) def A__ ( self: Optional[Any] ) -> Optional[int]: UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase_ : Dict = model_class(lowerCamelCase_ ) UpperCAmelCase_ : Any = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCAmelCase_ : int = [*signature.parameters.keys()] UpperCAmelCase_ : Tuple = ["""pixel_values"""] self.assertListEqual(arg_names[:1] ,lowerCamelCase_ ) def A__ ( self: Union[str, Any] ) -> Optional[Any]: UpperCAmelCase_ , UpperCAmelCase_ : int = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase_ : Any = True for model_class in self.all_model_classes: UpperCAmelCase_ : Optional[Any] = True UpperCAmelCase_ : Union[str, Any] = False UpperCAmelCase_ : str = True UpperCAmelCase_ : List[Any] = model_class(lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() with torch.no_grad(): UpperCAmelCase_ : Optional[int] = model(**self._prepare_for_class(lowerCamelCase_ ,lowerCamelCase_ ) ) UpperCAmelCase_ : Optional[Any] = outputs.attentions UpperCAmelCase_ : List[str] = len(self.model_tester.depths ) self.assertEqual(len(lowerCamelCase_ ) ,lowerCamelCase_ ) # check that output_attentions also work using config del inputs_dict["output_attentions"] UpperCAmelCase_ : str = True UpperCAmelCase_ : Optional[Any] = config.window_size**2 UpperCAmelCase_ : Optional[int] = model_class(lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() with torch.no_grad(): UpperCAmelCase_ : Optional[Any] = model(**self._prepare_for_class(lowerCamelCase_ ,lowerCamelCase_ ) ) UpperCAmelCase_ : List[Any] = outputs.attentions self.assertEqual(len(lowerCamelCase_ ) ,lowerCamelCase_ ) self.assertListEqual( list(attentions[0].shape[-3:] ) ,[self.model_tester.num_heads[0], window_size_squared, window_size_squared] ,) UpperCAmelCase_ : Optional[Any] = len(lowerCamelCase_ ) # Check attention is always last and order is fine UpperCAmelCase_ : Tuple = True UpperCAmelCase_ : List[Any] = True UpperCAmelCase_ : Tuple = model_class(lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() with torch.no_grad(): UpperCAmelCase_ : Union[str, Any] = model(**self._prepare_for_class(lowerCamelCase_ ,lowerCamelCase_ ) ) if hasattr(self.model_tester ,"""num_hidden_states_types""" ): UpperCAmelCase_ : List[Any] = self.model_tester.num_hidden_states_types else: # also another +1 for reshaped_hidden_states UpperCAmelCase_ : List[str] = 2 self.assertEqual(out_len + added_hidden_states ,len(lowerCamelCase_ ) ) UpperCAmelCase_ : Any = outputs.attentions self.assertEqual(len(lowerCamelCase_ ) ,lowerCamelCase_ ) self.assertListEqual( list(self_attentions[0].shape[-3:] ) ,[self.model_tester.num_heads[0], window_size_squared, window_size_squared] ,) def A__ ( self: List[str] ,lowerCamelCase_: Dict ,lowerCamelCase_: Tuple ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: Optional[int] ) -> List[Any]: UpperCAmelCase_ : str = model_class(lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() with torch.no_grad(): UpperCAmelCase_ : int = model(**self._prepare_for_class(lowerCamelCase_ ,lowerCamelCase_ ) ) UpperCAmelCase_ : List[str] = outputs.hidden_states UpperCAmelCase_ : Optional[Any] = getattr( self.model_tester ,"""expected_num_hidden_layers""" ,len(self.model_tester.depths ) + 1 ) self.assertEqual(len(lowerCamelCase_ ) ,lowerCamelCase_ ) # Swinv2 has a different seq_length UpperCAmelCase_ : Optional[Any] = ( config.patch_size if isinstance(config.patch_size ,collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) UpperCAmelCase_ : int = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.assertListEqual( list(hidden_states[0].shape[-2:] ) ,[num_patches, self.model_tester.embed_dim] ,) UpperCAmelCase_ : Optional[int] = outputs.reshaped_hidden_states self.assertEqual(len(lowerCamelCase_ ) ,lowerCamelCase_ ) UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = reshaped_hidden_states[0].shape UpperCAmelCase_ : Optional[Any] = ( reshaped_hidden_states[0].view(lowerCamelCase_ ,lowerCamelCase_ ,height * width ).permute(0 ,2 ,1 ) ) self.assertListEqual( list(reshaped_hidden_states.shape[-2:] ) ,[num_patches, self.model_tester.embed_dim] ,) def A__ ( self: Any ) -> int: UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase_ : Dict = ( self.model_tester.image_size if isinstance(self.model_tester.image_size ,collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) for model_class in self.all_model_classes: UpperCAmelCase_ : Any = True self.check_hidden_states_output(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] UpperCAmelCase_ : str = True self.check_hidden_states_output(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ) def A__ ( self: List[str] ) -> Dict: UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase_ : Union[str, Any] = 3 UpperCAmelCase_ : Optional[int] = ( self.model_tester.image_size if isinstance(self.model_tester.image_size ,collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) UpperCAmelCase_ : List[str] = ( config.patch_size if isinstance(config.patch_size ,collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) UpperCAmelCase_ : List[Any] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0]) UpperCAmelCase_ : Optional[Any] = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1]) for model_class in self.all_model_classes: UpperCAmelCase_ : Optional[Any] = True self.check_hidden_states_output(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,(padded_height, padded_width) ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] UpperCAmelCase_ : List[str] = True self.check_hidden_states_output(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,(padded_height, padded_width) ) def A__ ( self: Optional[int] ) -> str: UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*lowerCamelCase_ ) def A__ ( self: Union[str, Any] ) -> Dict: UpperCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*lowerCamelCase_ ) @slow def A__ ( self: str ) -> Tuple: for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase_ : Dict = SwinvaModel.from_pretrained(lowerCamelCase_ ) self.assertIsNotNone(lowerCamelCase_ ) def A__ ( self: Any ) -> int: UpperCAmelCase_ , UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase_ : List[str] = _config_zero_init(lowerCamelCase_ ) for model_class in self.all_model_classes: UpperCAmelCase_ : int = model_class(config=lowerCamelCase_ ) for name, param in model.named_parameters(): if "embeddings" not in name and "logit_scale" not in name and param.requires_grad: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item() ,[0.0, 1.0] ,msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' ,) @require_vision @require_torch class _snake_case ( unittest.TestCase ): '''simple docstring''' @cached_property def A__ ( self: Dict ) -> Optional[Any]: return ( AutoImageProcessor.from_pretrained("""microsoft/swinv2-tiny-patch4-window8-256""" ) if is_vision_available() else None ) @slow def A__ ( self: str ) -> List[Any]: UpperCAmelCase_ : Tuple = SwinvaForImageClassification.from_pretrained("""microsoft/swinv2-tiny-patch4-window8-256""" ).to( lowerCamelCase_ ) UpperCAmelCase_ : Any = self.default_image_processor UpperCAmelCase_ : List[str] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) UpperCAmelCase_ : Optional[int] = image_processor(images=lowerCamelCase_ ,return_tensors="""pt""" ).to(lowerCamelCase_ ) # forward pass with torch.no_grad(): UpperCAmelCase_ : Optional[Any] = model(**lowerCamelCase_ ) # verify the logits UpperCAmelCase_ : Dict = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape ,lowerCamelCase_ ) UpperCAmelCase_ : Any = torch.tensor([-0.3_9_4_7, -0.4_3_0_6, 0.0_0_2_6] ).to(lowerCamelCase_ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] ,lowerCamelCase_ ,atol=1e-4 ) )
345
1
import inspect import tempfile import unittest from huggingface_hub import hf_hub_download from transformers import is_torch_available from transformers.testing_utils import is_flaky, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin UpperCamelCase_ = 1E-4 if is_torch_available(): import torch from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder @require_torch class _snake_case : '''simple docstring''' def __init__( self: Tuple ,lowerCamelCase_: Tuple ,lowerCamelCase_: Dict=16 ,lowerCamelCase_: Optional[Any]=13 ,lowerCamelCase_: Dict=7 ,lowerCamelCase_: str=14 ,lowerCamelCase_: Optional[Any]=10 ,lowerCamelCase_: Tuple=19 ,lowerCamelCase_: List[Any]=5 ,lowerCamelCase_: Tuple=4 ,lowerCamelCase_: List[str]=True ,lowerCamelCase_: Tuple=16 ,lowerCamelCase_: Tuple=2 ,lowerCamelCase_: Optional[int]=4 ,lowerCamelCase_: Union[str, Any]=4 ,lowerCamelCase_: Optional[int]="gelu" ,lowerCamelCase_: Union[str, Any]=0.1 ,lowerCamelCase_: Optional[int]=0.1 ,lowerCamelCase_: Dict=[1, 2, 3, 4, 5] ,lowerCamelCase_: Union[str, Any]=25 ,lowerCamelCase_: Any=5 ,) -> str: UpperCAmelCase_ : Union[str, Any] = d_model UpperCAmelCase_ : Any = parent UpperCAmelCase_ : List[Any] = batch_size UpperCAmelCase_ : Union[str, Any] = prediction_length UpperCAmelCase_ : str = context_length UpperCAmelCase_ : List[Any] = cardinality UpperCAmelCase_ : Dict = num_time_features UpperCAmelCase_ : Any = lags_sequence UpperCAmelCase_ : Any = embedding_dimension UpperCAmelCase_ : Union[str, Any] = is_training UpperCAmelCase_ : List[str] = hidden_size UpperCAmelCase_ : int = num_hidden_layers UpperCAmelCase_ : List[str] = num_attention_heads UpperCAmelCase_ : Any = intermediate_size UpperCAmelCase_ : List[Any] = hidden_act UpperCAmelCase_ : List[Any] = hidden_dropout_prob UpperCAmelCase_ : Optional[int] = attention_probs_dropout_prob UpperCAmelCase_ : Any = context_length UpperCAmelCase_ : str = prediction_length + label_length UpperCAmelCase_ : Any = label_length UpperCAmelCase_ : Union[str, Any] = moving_average UpperCAmelCase_ : Dict = autocorrelation_factor def A__ ( self: List[Any] ) -> Union[str, Any]: return AutoformerConfig( d_model=self.d_model ,encoder_layers=self.num_hidden_layers ,decoder_layers=self.num_hidden_layers ,encoder_attention_heads=self.num_attention_heads ,decoder_attention_heads=self.num_attention_heads ,encoder_ffn_dim=self.intermediate_size ,decoder_ffn_dim=self.intermediate_size ,dropout=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,prediction_length=self.prediction_length ,context_length=self.context_length ,label_length=self.label_length ,lags_sequence=self.lags_sequence ,num_time_features=self.num_time_features ,num_static_categorical_features=1 ,cardinality=[self.cardinality] ,embedding_dimension=[self.embedding_dimension] ,moving_average=self.moving_average ,) def A__ ( self: Optional[Any] ,lowerCamelCase_: List[str] ) -> Any: UpperCAmelCase_ : Any = config.context_length + max(config.lags_sequence ) UpperCAmelCase_ : Any = ids_tensor([self.batch_size, 1] ,config.cardinality[0] ) UpperCAmelCase_ : int = floats_tensor([self.batch_size, _past_length, config.num_time_features] ) UpperCAmelCase_ : List[Any] = floats_tensor([self.batch_size, _past_length] ) UpperCAmelCase_ : Dict = floats_tensor([self.batch_size, _past_length] ) > 0.5 # decoder inputs UpperCAmelCase_ : Optional[Any] = floats_tensor([self.batch_size, config.prediction_length, config.num_time_features] ) UpperCAmelCase_ : List[str] = floats_tensor([self.batch_size, config.prediction_length] ) UpperCAmelCase_ : Dict = { """past_values""": past_values, """static_categorical_features""": static_categorical_features, """past_time_features""": past_time_features, """past_observed_mask""": past_observed_mask, """future_time_features""": future_time_features, """future_values""": future_values, } return inputs_dict def A__ ( self: List[Any] ) -> int: UpperCAmelCase_ : Union[str, Any] = self.get_config() UpperCAmelCase_ : Any = self.prepare_autoformer_inputs_dict(lowerCamelCase_ ) return config, inputs_dict def A__ ( self: List[Any] ) -> Union[str, Any]: UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = self.prepare_config_and_inputs() return config, inputs_dict def A__ ( self: Union[str, Any] ,lowerCamelCase_: str ,lowerCamelCase_: Union[str, Any] ) -> Union[str, Any]: UpperCAmelCase_ : str = AutoformerModel(config=lowerCamelCase_ ).to(lowerCamelCase_ ).eval() UpperCAmelCase_ : List[str] = model(**lowerCamelCase_ ) UpperCAmelCase_ : List[Any] = outputs.encoder_last_hidden_state UpperCAmelCase_ : Dict = outputs.last_hidden_state with tempfile.TemporaryDirectory() as tmpdirname: UpperCAmelCase_ : Optional[Any] = model.get_encoder() encoder.save_pretrained(lowerCamelCase_ ) UpperCAmelCase_ : Any = AutoformerEncoder.from_pretrained(lowerCamelCase_ ).to(lowerCamelCase_ ) UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Tuple = model.create_network_inputs(**lowerCamelCase_ ) UpperCAmelCase_ , UpperCAmelCase_ : Tuple = model.decomposition_layer(transformer_inputs[:, : config.context_length, ...] ) UpperCAmelCase_ : Dict = torch.cat( (transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]) ,dim=-1 ,) UpperCAmelCase_ : Tuple = encoder(inputs_embeds=lowerCamelCase_ )[0] self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1e-3 ) UpperCAmelCase_ : str = ( torch.mean(transformer_inputs[:, : config.context_length, ...] ,dim=1 ) .unsqueeze(1 ) .repeat(1 ,config.prediction_length ,1 ) ) UpperCAmelCase_ : int = torch.zeros( [transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]] ,device=enc_input.device ,) UpperCAmelCase_ : List[str] = torch.cat( ( torch.cat((seasonal_input[:, -config.label_length :, ...], zeros) ,dim=1 ), feature[:, config.context_length - config.label_length :, ...], ) ,dim=-1 ,) UpperCAmelCase_ : List[str] = torch.cat( ( torch.cat((trend_input[:, -config.label_length :, ...], mean) ,dim=1 ), feature[:, config.context_length - config.label_length :, ...], ) ,dim=-1 ,) with tempfile.TemporaryDirectory() as tmpdirname: UpperCAmelCase_ : Optional[Any] = model.get_decoder() decoder.save_pretrained(lowerCamelCase_ ) UpperCAmelCase_ : List[str] = AutoformerDecoder.from_pretrained(lowerCamelCase_ ).to(lowerCamelCase_ ) UpperCAmelCase_ : List[Any] = decoder( trend=lowerCamelCase_ ,inputs_embeds=lowerCamelCase_ ,encoder_hidden_states=lowerCamelCase_ ,)[0] self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1e-3 ) @require_torch class _snake_case ( __snake_case , __snake_case , unittest.TestCase ): '''simple docstring''' A__ : Union[str, Any] = (AutoformerModel, AutoformerForPrediction) if is_torch_available() else () A__ : List[Any] = (AutoformerForPrediction,) if is_torch_available() else () A__ : List[Any] = {"feature-extraction": AutoformerModel} if is_torch_available() else {} A__ : Union[str, Any] = False A__ : int = False A__ : Optional[Any] = False A__ : Tuple = False A__ : Optional[int] = False A__ : Any = False def A__ ( self: Union[str, Any] ) -> Union[str, Any]: UpperCAmelCase_ : List[str] = AutoformerModelTester(self ) UpperCAmelCase_ : int = ConfigTester(self ,config_class=lowerCamelCase_ ,has_text_modality=lowerCamelCase_ ) def A__ ( self: List[Any] ) -> Optional[Any]: self.config_tester.run_common_tests() def A__ ( self: Optional[Any] ) -> int: UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: UpperCAmelCase_ : Dict = model_class(lowerCamelCase_ ) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(lowerCamelCase_ ) UpperCAmelCase_ , UpperCAmelCase_ : Tuple = model_class.from_pretrained(lowerCamelCase_ ,output_loading_info=lowerCamelCase_ ) self.assertEqual(info["""missing_keys"""] ,[] ) def A__ ( self: Tuple ) -> Union[str, Any]: UpperCAmelCase_ : int = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_encoder_decoder_model_standalone(*lowerCamelCase_ ) @unittest.skip(reason="""Model has no tokens embeddings""" ) def A__ ( self: Tuple ) -> Any: pass def A__ ( self: Dict ) -> Any: UpperCAmelCase_ : Optional[int] = inspect.signature(getattr(lowerCamelCase_ ,"""forward""" ) ) # The main input is the name of the argument after `self` UpperCAmelCase_ : Dict = list(model_signature.parameters.keys() )[1] self.assertEqual(AutoformerModel.main_input_name ,lowerCamelCase_ ) def A__ ( self: Optional[Any] ) -> List[Any]: UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase_ : List[str] = model_class(lowerCamelCase_ ) UpperCAmelCase_ : Optional[int] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCAmelCase_ : Optional[int] = [*signature.parameters.keys()] UpperCAmelCase_ : Tuple = [ """past_values""", """past_time_features""", """past_observed_mask""", """static_categorical_features""", """static_real_features""", """future_values""", """future_time_features""", ] if model.__class__.__name__ in ["AutoformerForPrediction"]: expected_arg_names.append("""future_observed_mask""" ) expected_arg_names.extend( [ """decoder_attention_mask""", """head_mask""", """decoder_head_mask""", """cross_attn_head_mask""", """encoder_outputs""", """past_key_values""", """output_hidden_states""", """output_attentions""", """use_cache""", """return_dict""", ] ) self.assertListEqual(arg_names[: len(lowerCamelCase_ )] ,lowerCamelCase_ ) def A__ ( self: List[Any] ) -> str: UpperCAmelCase_ , UpperCAmelCase_ : int = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase_ : Optional[int] = True UpperCAmelCase_ : Union[str, Any] = getattr(self.model_tester ,"""seq_length""" ,lowerCamelCase_ ) UpperCAmelCase_ : Dict = getattr(self.model_tester ,"""decoder_seq_length""" ,lowerCamelCase_ ) UpperCAmelCase_ : Dict = getattr(self.model_tester ,"""encoder_seq_length""" ,lowerCamelCase_ ) UpperCAmelCase_ : Tuple = getattr(self.model_tester ,"""d_model""" ,lowerCamelCase_ ) UpperCAmelCase_ : Optional[int] = getattr(self.model_tester ,"""num_attention_heads""" ,lowerCamelCase_ ) UpperCAmelCase_ : Optional[int] = d_model // num_attention_heads for model_class in self.all_model_classes: UpperCAmelCase_ : Optional[int] = True UpperCAmelCase_ : str = False UpperCAmelCase_ : str = True UpperCAmelCase_ : str = model_class(lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() with torch.no_grad(): UpperCAmelCase_ : Union[str, Any] = model(**self._prepare_for_class(lowerCamelCase_ ,lowerCamelCase_ ) ) UpperCAmelCase_ : Dict = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(lowerCamelCase_ ) ,self.model_tester.num_hidden_layers ) # check that output_attentions also work using config del inputs_dict["output_attentions"] UpperCAmelCase_ : Tuple = True UpperCAmelCase_ : Dict = model_class(lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() with torch.no_grad(): UpperCAmelCase_ : Optional[int] = model(**self._prepare_for_class(lowerCamelCase_ ,lowerCamelCase_ ) ) UpperCAmelCase_ : Union[str, Any] = outputs.encoder_attentions self.assertEqual(len(lowerCamelCase_ ) ,self.model_tester.num_hidden_layers ) self.assertListEqual( list(attentions[0].shape[-3:] ) ,[self.model_tester.num_attention_heads, encoder_seq_length, dim] ,) UpperCAmelCase_ : Any = len(lowerCamelCase_ ) UpperCAmelCase_ : Dict = 7 if "last_hidden_state" in outputs: correct_outlen += 1 if "trend" in outputs: correct_outlen += 1 if "past_key_values" in outputs: correct_outlen += 1 # past_key_values have been returned if "loss" in outputs: correct_outlen += 1 if "params" in outputs: correct_outlen += 1 self.assertEqual(lowerCamelCase_ ,lowerCamelCase_ ) # decoder attentions UpperCAmelCase_ : List[Any] = outputs.decoder_attentions self.assertIsInstance(lowerCamelCase_ ,(list, tuple) ) self.assertEqual(len(lowerCamelCase_ ) ,self.model_tester.num_hidden_layers ) self.assertListEqual( list(decoder_attentions[0].shape[-3:] ) ,[self.model_tester.num_attention_heads, decoder_seq_length, dim] ,) # cross attentions UpperCAmelCase_ : List[Any] = outputs.cross_attentions self.assertIsInstance(lowerCamelCase_ ,(list, tuple) ) self.assertEqual(len(lowerCamelCase_ ) ,self.model_tester.num_hidden_layers ) self.assertListEqual( list(cross_attentions[0].shape[-3:] ) ,[self.model_tester.num_attention_heads, decoder_seq_length, dim] ,) # Check attention is always last and order is fine UpperCAmelCase_ : Tuple = True UpperCAmelCase_ : Tuple = True UpperCAmelCase_ : List[Any] = model_class(lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() with torch.no_grad(): UpperCAmelCase_ : Dict = model(**self._prepare_for_class(lowerCamelCase_ ,lowerCamelCase_ ) ) self.assertEqual(out_len + 2 ,len(lowerCamelCase_ ) ) UpperCAmelCase_ : Any = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(lowerCamelCase_ ) ,self.model_tester.num_hidden_layers ) self.assertListEqual( list(self_attentions[0].shape[-3:] ) ,[self.model_tester.num_attention_heads, encoder_seq_length, dim] ,) @is_flaky() def A__ ( self: str ) -> Optional[int]: super().test_retain_grad_hidden_states_attentions() def lowerCamelCase_ ( _a : Optional[int]="train-batch.pt" ): '''simple docstring''' UpperCAmelCase_ : Optional[Any] = hf_hub_download(repo_id="""hf-internal-testing/tourism-monthly-batch""" , filename=_a , repo_type="""dataset""" ) UpperCAmelCase_ : Any = torch.load(_a , map_location=_a ) return batch @require_torch @slow class _snake_case ( unittest.TestCase ): '''simple docstring''' def A__ ( self: Union[str, Any] ) -> int: UpperCAmelCase_ : List[str] = AutoformerModel.from_pretrained("""huggingface/autoformer-tourism-monthly""" ).to(lowerCamelCase_ ) UpperCAmelCase_ : Optional[int] = prepare_batch() with torch.no_grad(): UpperCAmelCase_ : str = model( past_values=batch["""past_values"""] ,past_time_features=batch["""past_time_features"""] ,past_observed_mask=batch["""past_observed_mask"""] ,static_categorical_features=batch["""static_categorical_features"""] ,future_values=batch["""future_values"""] ,future_time_features=batch["""future_time_features"""] ,)[0] UpperCAmelCase_ : Optional[Any] = torch.Size( (64, model.config.prediction_length + model.config.label_length, model.config.feature_size) ) self.assertEqual(output.shape ,lowerCamelCase_ ) UpperCAmelCase_ : Dict = torch.tensor( [[0.3_5_9_3, -1.3_3_9_8, 0.6_3_3_0], [0.2_2_7_9, 1.5_3_9_6, -0.1_7_9_2], [0.0_4_5_0, 1.3_2_2_5, -0.2_3_3_5]] ,device=lowerCamelCase_ ) self.assertTrue(torch.allclose(output[0, :3, :3] ,lowerCamelCase_ ,atol=lowerCamelCase_ ) ) def A__ ( self: Tuple ) -> List[str]: UpperCAmelCase_ : List[str] = AutoformerForPrediction.from_pretrained("""huggingface/autoformer-tourism-monthly""" ).to(lowerCamelCase_ ) UpperCAmelCase_ : List[Any] = prepare_batch("""val-batch.pt""" ) with torch.no_grad(): UpperCAmelCase_ : List[Any] = model( past_values=batch["""past_values"""] ,past_time_features=batch["""past_time_features"""] ,past_observed_mask=batch["""past_observed_mask"""] ,static_categorical_features=batch["""static_categorical_features"""] ,).encoder_last_hidden_state UpperCAmelCase_ : Optional[int] = torch.Size((64, model.config.context_length, model.config.d_model) ) self.assertEqual(output.shape ,lowerCamelCase_ ) UpperCAmelCase_ : Tuple = torch.tensor( [[-0.0_7_3_4, -0.9_0_3_6, 0.8_3_5_8], [4.7_1_8_6, 2.4_1_1_3, 1.9_5_8_1], [1.7_9_5_3, 2.3_5_5_8, 1.2_9_7_0]] ,device=lowerCamelCase_ ) self.assertTrue(torch.allclose(output[0, :3, :3] ,lowerCamelCase_ ,atol=lowerCamelCase_ ) ) def A__ ( self: Union[str, Any] ) -> Union[str, Any]: UpperCAmelCase_ : Any = AutoformerForPrediction.from_pretrained("""huggingface/autoformer-tourism-monthly""" ).to(lowerCamelCase_ ) UpperCAmelCase_ : Any = prepare_batch("""val-batch.pt""" ) with torch.no_grad(): UpperCAmelCase_ : List[str] = model.generate( static_categorical_features=batch["""static_categorical_features"""] ,past_time_features=batch["""past_time_features"""] ,past_values=batch["""past_values"""] ,future_time_features=batch["""future_time_features"""] ,past_observed_mask=batch["""past_observed_mask"""] ,) UpperCAmelCase_ : Optional[Any] = torch.Size((64, model.config.num_parallel_samples, model.config.prediction_length) ) self.assertEqual(outputs.sequences.shape ,lowerCamelCase_ ) UpperCAmelCase_ : List[Any] = torch.tensor([3_1_3_0.6_7_6_3, 4_0_5_6.5_2_9_3, 7_0_5_3.0_7_8_6] ,device=lowerCamelCase_ ) UpperCAmelCase_ : Union[str, Any] = outputs.sequences.mean(dim=1 ) self.assertTrue(torch.allclose(mean_prediction[0, -3:] ,lowerCamelCase_ ,rtol=1e-1 ) )
345
import json import os from functools import lru_cache from typing import Dict, List, Optional, Tuple, Union import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...tokenization_utils_base import BatchEncoding, EncodedInput from ...utils import PaddingStrategy, logging UpperCamelCase_ = logging.get_logger(__name__) UpperCamelCase_ = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt'''} # See all LED models at https://huggingface.co/models?filter=LED UpperCamelCase_ = { '''vocab_file''': { '''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json''', }, '''merges_file''': { '''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt''', }, '''tokenizer_file''': { '''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json''', }, } UpperCamelCase_ = { '''allenai/led-base-16384''': 16384, } @lru_cache() # Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode def lowerCamelCase_ ( ): '''simple docstring''' UpperCAmelCase_ : int = ( list(range(ord("""!""" ) , ord("""~""" ) + 1 ) ) + list(range(ord("""¡""" ) , ord("""¬""" ) + 1 ) ) + list(range(ord("""®""" ) , ord("""ÿ""" ) + 1 ) ) ) UpperCAmelCase_ : Dict = bs[:] UpperCAmelCase_ : Any = 0 for b in range(2**8 ): if b not in bs: bs.append(_a ) cs.append(2**8 + n ) n += 1 UpperCAmelCase_ : Any = [chr(_a ) for n in cs] return dict(zip(_a , _a ) ) def lowerCamelCase_ ( _a : List[str] ): '''simple docstring''' UpperCAmelCase_ : Union[str, Any] = set() UpperCAmelCase_ : List[Any] = word[0] for char in word[1:]: pairs.add((prev_char, char) ) UpperCAmelCase_ : Optional[int] = char return pairs class _snake_case ( __snake_case ): '''simple docstring''' A__ : str = VOCAB_FILES_NAMES A__ : List[str] = PRETRAINED_VOCAB_FILES_MAP A__ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES A__ : Optional[int] = ["input_ids", "attention_mask"] def __init__( self: Union[str, Any] ,lowerCamelCase_: Tuple ,lowerCamelCase_: Any ,lowerCamelCase_: Union[str, Any]="replace" ,lowerCamelCase_: Optional[Any]="<s>" ,lowerCamelCase_: List[Any]="</s>" ,lowerCamelCase_: List[str]="</s>" ,lowerCamelCase_: int="<s>" ,lowerCamelCase_: int="<unk>" ,lowerCamelCase_: str="<pad>" ,lowerCamelCase_: Optional[Any]="<mask>" ,lowerCamelCase_: List[str]=False ,**lowerCamelCase_: Tuple ,) -> Any: UpperCAmelCase_ : Union[str, Any] = AddedToken(lowerCamelCase_ ,lstrip=lowerCamelCase_ ,rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ ,lowerCamelCase_ ) else bos_token UpperCAmelCase_ : int = AddedToken(lowerCamelCase_ ,lstrip=lowerCamelCase_ ,rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ ,lowerCamelCase_ ) else eos_token UpperCAmelCase_ : List[str] = AddedToken(lowerCamelCase_ ,lstrip=lowerCamelCase_ ,rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ ,lowerCamelCase_ ) else sep_token UpperCAmelCase_ : List[str] = AddedToken(lowerCamelCase_ ,lstrip=lowerCamelCase_ ,rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ ,lowerCamelCase_ ) else cls_token UpperCAmelCase_ : Optional[Any] = AddedToken(lowerCamelCase_ ,lstrip=lowerCamelCase_ ,rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ ,lowerCamelCase_ ) else unk_token UpperCAmelCase_ : List[str] = AddedToken(lowerCamelCase_ ,lstrip=lowerCamelCase_ ,rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ ,lowerCamelCase_ ) else pad_token # Mask token behave like a normal word, i.e. include the space before it UpperCAmelCase_ : str = AddedToken(lowerCamelCase_ ,lstrip=lowerCamelCase_ ,rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ ,lowerCamelCase_ ) else mask_token super().__init__( errors=lowerCamelCase_ ,bos_token=lowerCamelCase_ ,eos_token=lowerCamelCase_ ,unk_token=lowerCamelCase_ ,sep_token=lowerCamelCase_ ,cls_token=lowerCamelCase_ ,pad_token=lowerCamelCase_ ,mask_token=lowerCamelCase_ ,add_prefix_space=lowerCamelCase_ ,**lowerCamelCase_ ,) with open(lowerCamelCase_ ,encoding="""utf-8""" ) as vocab_handle: UpperCAmelCase_ : Union[str, Any] = json.load(lowerCamelCase_ ) UpperCAmelCase_ : Optional[int] = {v: k for k, v in self.encoder.items()} UpperCAmelCase_ : Any = errors # how to handle errors in decoding UpperCAmelCase_ : int = bytes_to_unicode() UpperCAmelCase_ : Dict = {v: k for k, v in self.byte_encoder.items()} with open(lowerCamelCase_ ,encoding="""utf-8""" ) as merges_handle: UpperCAmelCase_ : Any = merges_handle.read().split("""\n""" )[1:-1] UpperCAmelCase_ : int = [tuple(merge.split() ) for merge in bpe_merges] UpperCAmelCase_ : Union[str, Any] = dict(zip(lowerCamelCase_ ,range(len(lowerCamelCase_ ) ) ) ) UpperCAmelCase_ : Tuple = {} UpperCAmelCase_ : Optional[int] = add_prefix_space # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions UpperCAmelCase_ : int = re.compile(R"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""" ) @property # Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size def A__ ( self: List[str] ) -> List[str]: return len(self.encoder ) def A__ ( self: Any ) -> Union[str, Any]: return dict(self.encoder ,**self.added_tokens_encoder ) def A__ ( self: Tuple ,lowerCamelCase_: Dict ) -> Optional[Any]: if token in self.cache: return self.cache[token] UpperCAmelCase_ : Union[str, Any] = tuple(lowerCamelCase_ ) UpperCAmelCase_ : Union[str, Any] = get_pairs(lowerCamelCase_ ) if not pairs: return token while True: UpperCAmelCase_ : Union[str, Any] = min(lowerCamelCase_ ,key=lambda lowerCamelCase_ : self.bpe_ranks.get(lowerCamelCase_ ,float("""inf""" ) ) ) if bigram not in self.bpe_ranks: break UpperCAmelCase_ , UpperCAmelCase_ : Any = bigram UpperCAmelCase_ : Optional[Any] = [] UpperCAmelCase_ : List[str] = 0 while i < len(lowerCamelCase_ ): try: UpperCAmelCase_ : str = word.index(lowerCamelCase_ ,lowerCamelCase_ ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) UpperCAmelCase_ : Union[str, Any] = j if word[i] == first and i < len(lowerCamelCase_ ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 UpperCAmelCase_ : List[str] = tuple(lowerCamelCase_ ) UpperCAmelCase_ : List[Any] = new_word if len(lowerCamelCase_ ) == 1: break else: UpperCAmelCase_ : List[str] = get_pairs(lowerCamelCase_ ) UpperCAmelCase_ : int = """ """.join(lowerCamelCase_ ) UpperCAmelCase_ : Optional[Any] = word return word def A__ ( self: Union[str, Any] ,lowerCamelCase_: Tuple ) -> List[str]: UpperCAmelCase_ : str = [] for token in re.findall(self.pat ,lowerCamelCase_ ): UpperCAmelCase_ : List[Any] = """""".join( self.byte_encoder[b] for b in token.encode("""utf-8""" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowerCamelCase_ ).split(""" """ ) ) return bpe_tokens def A__ ( self: List[Any] ,lowerCamelCase_: Optional[Any] ) -> Optional[int]: return self.encoder.get(lowerCamelCase_ ,self.encoder.get(self.unk_token ) ) def A__ ( self: List[str] ,lowerCamelCase_: str ) -> Optional[Any]: return self.decoder.get(lowerCamelCase_ ) def A__ ( self: List[str] ,lowerCamelCase_: List[str] ) -> List[Any]: UpperCAmelCase_ : str = """""".join(lowerCamelCase_ ) UpperCAmelCase_ : int = bytearray([self.byte_decoder[c] for c in text] ).decode("""utf-8""" ,errors=self.errors ) return text def A__ ( self: Optional[Any] ,lowerCamelCase_: str ,lowerCamelCase_: Optional[str] = None ) -> Tuple[str]: if not os.path.isdir(lowerCamelCase_ ): logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' ) return UpperCAmelCase_ : List[Any] = os.path.join( lowerCamelCase_ ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) UpperCAmelCase_ : List[str] = os.path.join( lowerCamelCase_ ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] ) with open(lowerCamelCase_ ,"""w""" ,encoding="""utf-8""" ) as f: f.write(json.dumps(self.encoder ,indent=2 ,sort_keys=lowerCamelCase_ ,ensure_ascii=lowerCamelCase_ ) + """\n""" ) UpperCAmelCase_ : str = 0 with open(lowerCamelCase_ ,"""w""" ,encoding="""utf-8""" ) as writer: writer.write("""#version: 0.2\n""" ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() ,key=lambda lowerCamelCase_ : kv[1] ): if index != token_index: logger.warning( F'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.''' """ Please check that the tokenizer is not corrupted!""" ) UpperCAmelCase_ : Tuple = token_index writer.write(""" """.join(lowerCamelCase_ ) + """\n""" ) index += 1 return vocab_file, merge_file def A__ ( self: str ,lowerCamelCase_: List[int] ,lowerCamelCase_: Optional[List[int]] = None ) -> List[int]: if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] UpperCAmelCase_ : int = [self.cls_token_id] UpperCAmelCase_ : Optional[int] = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def A__ ( self: Union[str, Any] ,lowerCamelCase_: List[int] ,lowerCamelCase_: Optional[List[int]] = None ,lowerCamelCase_: bool = False ) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=lowerCamelCase_ ,token_ids_a=lowerCamelCase_ ,already_has_special_tokens=lowerCamelCase_ ) if token_ids_a is None: return [1] + ([0] * len(lowerCamelCase_ )) + [1] return [1] + ([0] * len(lowerCamelCase_ )) + [1, 1] + ([0] * len(lowerCamelCase_ )) + [1] def A__ ( self: str ,lowerCamelCase_: List[int] ,lowerCamelCase_: Optional[List[int]] = None ) -> List[int]: UpperCAmelCase_ : Optional[Any] = [self.sep_token_id] UpperCAmelCase_ : Optional[Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def A__ ( self: Optional[Any] ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: str=False ,**lowerCamelCase_: List[str] ) -> Optional[int]: UpperCAmelCase_ : Optional[int] = kwargs.pop("""add_prefix_space""" ,self.add_prefix_space ) if (is_split_into_words or add_prefix_space) and (len(lowerCamelCase_ ) > 0 and not text[0].isspace()): UpperCAmelCase_ : Dict = """ """ + text return (text, kwargs) def A__ ( self: List[str] ,lowerCamelCase_: Union[Dict[str, EncodedInput], BatchEncoding] ,lowerCamelCase_: Optional[int] = None ,lowerCamelCase_: PaddingStrategy = PaddingStrategy.DO_NOT_PAD ,lowerCamelCase_: Optional[int] = None ,lowerCamelCase_: Optional[bool] = None ,) -> dict: UpperCAmelCase_ : Optional[int] = super()._pad( encoded_inputs=lowerCamelCase_ ,max_length=lowerCamelCase_ ,padding_strategy=lowerCamelCase_ ,pad_to_multiple_of=lowerCamelCase_ ,return_attention_mask=lowerCamelCase_ ,) # Load from model defaults if return_attention_mask is None: UpperCAmelCase_ : str = """attention_mask""" in self.model_input_names if return_attention_mask and "global_attention_mask" in encoded_inputs: UpperCAmelCase_ : str = encoded_inputs[self.model_input_names[0]] # `global_attention_mask` need to have the same length as other (sequential) inputs. UpperCAmelCase_ : List[Any] = len(encoded_inputs["""global_attention_mask"""] ) != len(lowerCamelCase_ ) if needs_to_be_padded: UpperCAmelCase_ : Dict = len(lowerCamelCase_ ) - len(encoded_inputs["""global_attention_mask"""] ) if self.padding_side == "right": # Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend` UpperCAmelCase_ : str = ( encoded_inputs["""global_attention_mask"""] + [-1] * difference ) elif self.padding_side == "left": UpperCAmelCase_ : List[str] = [-1] * difference + encoded_inputs[ """global_attention_mask""" ] else: raise ValueError("""Invalid padding strategy:""" + str(self.padding_side ) ) return encoded_inputs
345
1
from typing import Dict, List, Optional, Tuple, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_torch_available, is_torch_tensor, logging if is_torch_available(): import torch UpperCamelCase_ = logging.get_logger(__name__) class _snake_case ( __snake_case ): '''simple docstring''' A__ : List[Any] = ["pixel_values"] def __init__( self: List[str] ,lowerCamelCase_: bool = True ,lowerCamelCase_: Optional[Dict[str, int]] = None ,lowerCamelCase_: PILImageResampling = PILImageResampling.BILINEAR ,lowerCamelCase_: bool = True ,lowerCamelCase_: Dict[str, int] = None ,lowerCamelCase_: bool = True ,lowerCamelCase_: Union[int, float] = 1 / 255 ,lowerCamelCase_: bool = True ,lowerCamelCase_: Optional[Union[float, List[float]]] = None ,lowerCamelCase_: Optional[Union[float, List[float]]] = None ,**lowerCamelCase_: List[str] ,) -> None: super().__init__(**lowerCamelCase_ ) UpperCAmelCase_ : List[str] = size if size is not None else {"""shortest_edge""": 256} UpperCAmelCase_ : List[Any] = get_size_dict(lowerCamelCase_ ,default_to_square=lowerCamelCase_ ) UpperCAmelCase_ : str = crop_size if crop_size is not None else {"""height""": 224, """width""": 224} UpperCAmelCase_ : Dict = get_size_dict(lowerCamelCase_ ,param_name="""crop_size""" ) UpperCAmelCase_ : List[Any] = do_resize UpperCAmelCase_ : Dict = size UpperCAmelCase_ : Optional[int] = resample UpperCAmelCase_ : str = do_center_crop UpperCAmelCase_ : Union[str, Any] = crop_size UpperCAmelCase_ : Optional[Any] = do_rescale UpperCAmelCase_ : int = rescale_factor UpperCAmelCase_ : Union[str, Any] = do_normalize UpperCAmelCase_ : Tuple = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN UpperCAmelCase_ : List[Any] = image_std if image_std is not None else IMAGENET_STANDARD_STD def A__ ( self: Tuple ,lowerCamelCase_: np.ndarray ,lowerCamelCase_: Dict[str, int] ,lowerCamelCase_: PILImageResampling = PILImageResampling.BICUBIC ,lowerCamelCase_: Optional[Union[str, ChannelDimension]] = None ,**lowerCamelCase_: List[str] ,) -> np.ndarray: UpperCAmelCase_ : Optional[int] = get_size_dict(lowerCamelCase_ ,default_to_square=lowerCamelCase_ ) if "shortest_edge" not in size: raise ValueError(F'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' ) UpperCAmelCase_ : List[Any] = get_resize_output_image_size(lowerCamelCase_ ,size=size["""shortest_edge"""] ,default_to_square=lowerCamelCase_ ) return resize(lowerCamelCase_ ,size=lowerCamelCase_ ,resample=lowerCamelCase_ ,data_format=lowerCamelCase_ ,**lowerCamelCase_ ) def A__ ( self: int ,lowerCamelCase_: np.ndarray ,lowerCamelCase_: Dict[str, int] ,lowerCamelCase_: Optional[Union[str, ChannelDimension]] = None ,**lowerCamelCase_: List[Any] ,) -> np.ndarray: UpperCAmelCase_ : Optional[int] = get_size_dict(lowerCamelCase_ ) if "height" not in size or "width" not in size: raise ValueError(F'''The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}''' ) return center_crop(lowerCamelCase_ ,size=(size["""height"""], size["""width"""]) ,data_format=lowerCamelCase_ ,**lowerCamelCase_ ) def A__ ( self: Dict ,lowerCamelCase_: np.ndarray ,lowerCamelCase_: float ,lowerCamelCase_: Optional[Union[str, ChannelDimension]] = None ,**lowerCamelCase_: int ) -> np.ndarray: return rescale(lowerCamelCase_ ,scale=lowerCamelCase_ ,data_format=lowerCamelCase_ ,**lowerCamelCase_ ) def A__ ( self: Optional[Any] ,lowerCamelCase_: np.ndarray ,lowerCamelCase_: Union[float, List[float]] ,lowerCamelCase_: Union[float, List[float]] ,lowerCamelCase_: Optional[Union[str, ChannelDimension]] = None ,**lowerCamelCase_: int ,) -> np.ndarray: return normalize(lowerCamelCase_ ,mean=lowerCamelCase_ ,std=lowerCamelCase_ ,data_format=lowerCamelCase_ ,**lowerCamelCase_ ) def A__ ( self: Dict ,lowerCamelCase_: ImageInput ,lowerCamelCase_: Optional[bool] = None ,lowerCamelCase_: Dict[str, int] = None ,lowerCamelCase_: PILImageResampling = None ,lowerCamelCase_: bool = None ,lowerCamelCase_: Dict[str, int] = None ,lowerCamelCase_: Optional[bool] = None ,lowerCamelCase_: Optional[float] = None ,lowerCamelCase_: Optional[bool] = None ,lowerCamelCase_: Optional[Union[float, List[float]]] = None ,lowerCamelCase_: Optional[Union[float, List[float]]] = None ,lowerCamelCase_: Optional[Union[str, TensorType]] = None ,lowerCamelCase_: Union[str, ChannelDimension] = ChannelDimension.FIRST ,**lowerCamelCase_: Dict ,) -> int: UpperCAmelCase_ : str = do_resize if do_resize is not None else self.do_resize UpperCAmelCase_ : List[Any] = size if size is not None else self.size UpperCAmelCase_ : Dict = get_size_dict(lowerCamelCase_ ,default_to_square=lowerCamelCase_ ) UpperCAmelCase_ : Union[str, Any] = resample if resample is not None else self.resample UpperCAmelCase_ : Union[str, Any] = do_center_crop if do_center_crop is not None else self.do_center_crop UpperCAmelCase_ : str = crop_size if crop_size is not None else self.crop_size UpperCAmelCase_ : List[str] = get_size_dict(lowerCamelCase_ ,param_name="""crop_size""" ) UpperCAmelCase_ : Optional[Any] = do_rescale if do_rescale is not None else self.do_rescale UpperCAmelCase_ : Any = rescale_factor if rescale_factor is not None else self.rescale_factor UpperCAmelCase_ : str = do_normalize if do_normalize is not None else self.do_normalize UpperCAmelCase_ : List[str] = image_mean if image_mean is not None else self.image_mean UpperCAmelCase_ : List[Any] = image_std if image_std is not None else self.image_std UpperCAmelCase_ : List[Any] = make_list_of_images(lowerCamelCase_ ) if not valid_images(lowerCamelCase_ ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) if do_resize and size is None: raise ValueError("""Size must be specified if do_resize is True.""" ) if do_center_crop and crop_size is None: raise ValueError("""Crop size must be specified if do_center_crop is True.""" ) if do_rescale and rescale_factor is None: raise ValueError("""Rescale factor must be specified if do_rescale is True.""" ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("""Image mean and std must be specified if do_normalize is True.""" ) # All transformations expect numpy arrays. UpperCAmelCase_ : Dict = [to_numpy_array(lowerCamelCase_ ) for image in images] if do_resize: UpperCAmelCase_ : List[Any] = [self.resize(image=lowerCamelCase_ ,size=lowerCamelCase_ ,resample=lowerCamelCase_ ) for image in images] if do_center_crop: UpperCAmelCase_ : Optional[Any] = [self.center_crop(image=lowerCamelCase_ ,size=lowerCamelCase_ ) for image in images] if do_rescale: UpperCAmelCase_ : str = [self.rescale(image=lowerCamelCase_ ,scale=lowerCamelCase_ ) for image in images] if do_normalize: UpperCAmelCase_ : Optional[Any] = [self.normalize(image=lowerCamelCase_ ,mean=lowerCamelCase_ ,std=lowerCamelCase_ ) for image in images] UpperCAmelCase_ : int = [to_channel_dimension_format(lowerCamelCase_ ,lowerCamelCase_ ) for image in images] UpperCAmelCase_ : Optional[int] = {"""pixel_values""": images} return BatchFeature(data=lowerCamelCase_ ,tensor_type=lowerCamelCase_ ) def A__ ( self: Optional[Any] ,lowerCamelCase_: Tuple ,lowerCamelCase_: List[Tuple] = None ) -> Optional[int]: UpperCAmelCase_ : List[str] = outputs.logits # Resize logits and compute semantic segmentation maps if target_sizes is not None: if len(lowerCamelCase_ ) != len(lowerCamelCase_ ): raise ValueError( """Make sure that you pass in as many target sizes as the batch dimension of the logits""" ) if is_torch_tensor(lowerCamelCase_ ): UpperCAmelCase_ : Optional[Any] = target_sizes.numpy() UpperCAmelCase_ : List[str] = [] for idx in range(len(lowerCamelCase_ ) ): UpperCAmelCase_ : str = torch.nn.functional.interpolate( logits[idx].unsqueeze(dim=0 ) ,size=target_sizes[idx] ,mode="""bilinear""" ,align_corners=lowerCamelCase_ ) UpperCAmelCase_ : List[Any] = resized_logits[0].argmax(dim=0 ) semantic_segmentation.append(lowerCamelCase_ ) else: UpperCAmelCase_ : Union[str, Any] = logits.argmax(dim=1 ) UpperCAmelCase_ : str = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )] return semantic_segmentation
345
import gc import random import tempfile import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline from diffusers.utils import floats_tensor, nightly, torch_device from diffusers.utils.testing_utils import require_torch_gpu class _snake_case ( unittest.TestCase ): '''simple docstring''' def A__ ( self: Union[str, Any] ) -> Union[str, Any]: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() @property def A__ ( self: List[str] ) -> Dict: UpperCAmelCase_ : Union[str, Any] = 1 UpperCAmelCase_ : Tuple = 3 UpperCAmelCase_ : Optional[Any] = (32, 32) UpperCAmelCase_ : Optional[int] = floats_tensor((batch_size, num_channels) + sizes ,rng=random.Random(0 ) ).to(lowerCamelCase_ ) return image @property def A__ ( self: List[Any] ) -> Optional[Any]: torch.manual_seed(0 ) UpperCAmelCase_ : int = UNetaDConditionModel( block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") ,up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") ,cross_attention_dim=32 ,) return model @property def A__ ( self: str ) -> List[str]: torch.manual_seed(0 ) UpperCAmelCase_ : Optional[int] = AutoencoderKL( block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] ,up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] ,latent_channels=4 ,) return model @property def A__ ( self: Optional[int] ) -> int: torch.manual_seed(0 ) UpperCAmelCase_ : Dict = CLIPTextConfig( bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1e-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1000 ,) return CLIPTextModel(lowerCamelCase_ ) @property def A__ ( self: Tuple ) -> Tuple: def extract(*lowerCamelCase_: Optional[Any] ,**lowerCamelCase_: str ): class _snake_case : '''simple docstring''' def __init__( self: List[Any] ) -> Optional[Any]: UpperCAmelCase_ : List[str] = torch.ones([0] ) def A__ ( self: List[Any] ,lowerCamelCase_: str ) -> int: self.pixel_values.to(lowerCamelCase_ ) return self return Out() return extract def A__ ( self: Union[str, Any] ) -> Tuple: UpperCAmelCase_ : int = """cpu""" # ensure determinism for the device-dependent torch.Generator UpperCAmelCase_ : int = self.dummy_cond_unet UpperCAmelCase_ : Optional[Any] = DDIMScheduler( beta_start=0.0_0_0_8_5 ,beta_end=0.0_1_2 ,beta_schedule="""scaled_linear""" ,clip_sample=lowerCamelCase_ ,set_alpha_to_one=lowerCamelCase_ ,) UpperCAmelCase_ : str = self.dummy_vae UpperCAmelCase_ : List[str] = self.dummy_text_encoder UpperCAmelCase_ : int = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) # make sure here that pndm scheduler skips prk UpperCAmelCase_ : str = StableDiffusionPipeline( unet=lowerCamelCase_ ,scheduler=lowerCamelCase_ ,vae=lowerCamelCase_ ,text_encoder=lowerCamelCase_ ,tokenizer=lowerCamelCase_ ,safety_checker=lowerCamelCase_ ,feature_extractor=self.dummy_extractor ,) UpperCAmelCase_ : List[str] = sd_pipe.to(lowerCamelCase_ ) sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ ) UpperCAmelCase_ : List[str] = """A painting of a squirrel eating a burger""" UpperCAmelCase_ : str = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 ) UpperCAmelCase_ : int = sd_pipe([prompt] ,generator=lowerCamelCase_ ,guidance_scale=6.0 ,num_inference_steps=2 ,output_type="""np""" ) UpperCAmelCase_ : List[Any] = output.images UpperCAmelCase_ : str = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 ) UpperCAmelCase_ : Dict = sd_pipe( [prompt] ,generator=lowerCamelCase_ ,guidance_scale=6.0 ,num_inference_steps=2 ,output_type="""np""" ,return_dict=lowerCamelCase_ ,)[0] UpperCAmelCase_ : int = image[0, -3:, -3:, -1] UpperCAmelCase_ : Union[str, Any] = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) UpperCAmelCase_ : Tuple = np.array([0.5_7_5_6, 0.6_1_1_8, 0.5_0_0_5, 0.5_0_4_1, 0.5_4_7_1, 0.4_7_2_6, 0.4_9_7_6, 0.4_8_6_5, 0.4_8_6_4] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 def A__ ( self: Optional[Any] ) -> Any: UpperCAmelCase_ : Tuple = """cpu""" # ensure determinism for the device-dependent torch.Generator UpperCAmelCase_ : Dict = self.dummy_cond_unet UpperCAmelCase_ : List[Any] = PNDMScheduler(skip_prk_steps=lowerCamelCase_ ) UpperCAmelCase_ : str = self.dummy_vae UpperCAmelCase_ : Union[str, Any] = self.dummy_text_encoder UpperCAmelCase_ : str = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) # make sure here that pndm scheduler skips prk UpperCAmelCase_ : Any = StableDiffusionPipeline( unet=lowerCamelCase_ ,scheduler=lowerCamelCase_ ,vae=lowerCamelCase_ ,text_encoder=lowerCamelCase_ ,tokenizer=lowerCamelCase_ ,safety_checker=lowerCamelCase_ ,feature_extractor=self.dummy_extractor ,) UpperCAmelCase_ : int = sd_pipe.to(lowerCamelCase_ ) sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ ) UpperCAmelCase_ : Optional[Any] = """A painting of a squirrel eating a burger""" UpperCAmelCase_ : Optional[Any] = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 ) UpperCAmelCase_ : Optional[Any] = sd_pipe([prompt] ,generator=lowerCamelCase_ ,guidance_scale=6.0 ,num_inference_steps=2 ,output_type="""np""" ) UpperCAmelCase_ : str = output.images UpperCAmelCase_ : Union[str, Any] = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 ) UpperCAmelCase_ : int = sd_pipe( [prompt] ,generator=lowerCamelCase_ ,guidance_scale=6.0 ,num_inference_steps=2 ,output_type="""np""" ,return_dict=lowerCamelCase_ ,)[0] UpperCAmelCase_ : Dict = image[0, -3:, -3:, -1] UpperCAmelCase_ : List[Any] = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) UpperCAmelCase_ : Tuple = np.array([0.5_1_2_5, 0.5_7_1_6, 0.4_8_2_8, 0.5_0_6_0, 0.5_6_5_0, 0.4_7_6_8, 0.5_1_8_5, 0.4_8_9_5, 0.4_9_9_3] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 def A__ ( self: str ) -> Dict: UpperCAmelCase_ : Any = StableDiffusionPipeline.from_pretrained( """hf-internal-testing/tiny-stable-diffusion-lms-pipe""" ,safety_checker=lowerCamelCase_ ) assert isinstance(lowerCamelCase_ ,lowerCamelCase_ ) assert isinstance(pipe.scheduler ,lowerCamelCase_ ) assert pipe.safety_checker is None UpperCAmelCase_ : List[Any] = pipe("""example prompt""" ,num_inference_steps=2 ).images[0] assert image is not None # check that there's no error when saving a pipeline with one of the models being None with tempfile.TemporaryDirectory() as tmpdirname: pipe.save_pretrained(lowerCamelCase_ ) UpperCAmelCase_ : Any = StableDiffusionPipeline.from_pretrained(lowerCamelCase_ ) # sanity check that the pipeline still works assert pipe.safety_checker is None UpperCAmelCase_ : Optional[int] = pipe("""example prompt""" ,num_inference_steps=2 ).images[0] assert image is not None @unittest.skipIf(torch_device != """cuda""" ,"""This test requires a GPU""" ) def A__ ( self: List[str] ) -> Any: UpperCAmelCase_ : Tuple = self.dummy_cond_unet UpperCAmelCase_ : Dict = PNDMScheduler(skip_prk_steps=lowerCamelCase_ ) UpperCAmelCase_ : List[Any] = self.dummy_vae UpperCAmelCase_ : List[str] = self.dummy_text_encoder UpperCAmelCase_ : Union[str, Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) # put models in fp16 UpperCAmelCase_ : Optional[Any] = unet.half() UpperCAmelCase_ : Optional[int] = vae.half() UpperCAmelCase_ : int = bert.half() # make sure here that pndm scheduler skips prk UpperCAmelCase_ : Any = StableDiffusionPipeline( unet=lowerCamelCase_ ,scheduler=lowerCamelCase_ ,vae=lowerCamelCase_ ,text_encoder=lowerCamelCase_ ,tokenizer=lowerCamelCase_ ,safety_checker=lowerCamelCase_ ,feature_extractor=self.dummy_extractor ,) UpperCAmelCase_ : List[Any] = sd_pipe.to(lowerCamelCase_ ) sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ ) UpperCAmelCase_ : Tuple = """A painting of a squirrel eating a burger""" UpperCAmelCase_ : Optional[int] = sd_pipe([prompt] ,num_inference_steps=2 ,output_type="""np""" ).images assert image.shape == (1, 64, 64, 3) @nightly @require_torch_gpu class _snake_case ( unittest.TestCase ): '''simple docstring''' def A__ ( self: Optional[int] ) -> Optional[Any]: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def A__ ( self: List[str] ) -> List[Any]: UpperCAmelCase_ : Tuple = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" ,safety_checker=lowerCamelCase_ ) UpperCAmelCase_ : Optional[int] = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config ) UpperCAmelCase_ : str = sd_pipe.to(lowerCamelCase_ ) sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ ) UpperCAmelCase_ : str = ( """portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle""" """ coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with""" """ anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and""" """ children from bahnhof zoo, detailed """ ) UpperCAmelCase_ : Optional[int] = 4003660346 UpperCAmelCase_ : int = 7 # without safety guidance (sld_guidance_scale = 0) UpperCAmelCase_ : Dict = torch.manual_seed(lowerCamelCase_ ) UpperCAmelCase_ : List[Any] = sd_pipe( [prompt] ,generator=lowerCamelCase_ ,guidance_scale=lowerCamelCase_ ,num_inference_steps=50 ,output_type="""np""" ,width=512 ,height=512 ,sld_guidance_scale=0 ,) UpperCAmelCase_ : Optional[int] = output.images UpperCAmelCase_ : Union[str, Any] = image[0, -3:, -3:, -1] UpperCAmelCase_ : Dict = [0.2_2_7_8, 0.2_2_3_1, 0.2_2_4_9, 0.2_3_3_3, 0.2_3_0_3, 0.1_8_8_5, 0.2_2_7_3, 0.2_1_4_4, 0.2_1_7_6] assert image.shape == (1, 512, 512, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 # without safety guidance (strong configuration) UpperCAmelCase_ : Union[str, Any] = torch.manual_seed(lowerCamelCase_ ) UpperCAmelCase_ : Any = sd_pipe( [prompt] ,generator=lowerCamelCase_ ,guidance_scale=lowerCamelCase_ ,num_inference_steps=50 ,output_type="""np""" ,width=512 ,height=512 ,sld_guidance_scale=2000 ,sld_warmup_steps=7 ,sld_threshold=0.0_2_5 ,sld_momentum_scale=0.5 ,sld_mom_beta=0.7 ,) UpperCAmelCase_ : Tuple = output.images UpperCAmelCase_ : Union[str, Any] = image[0, -3:, -3:, -1] UpperCAmelCase_ : str = [0.2_3_8_3, 0.2_2_7_6, 0.2_3_6, 0.2_1_9_2, 0.2_1_8_6, 0.2_0_5_3, 0.1_9_7_1, 0.1_9_0_1, 0.1_7_1_9] assert image.shape == (1, 512, 512, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def A__ ( self: Optional[int] ) -> Any: UpperCAmelCase_ : Any = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" ,safety_checker=lowerCamelCase_ ) UpperCAmelCase_ : Any = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config ) UpperCAmelCase_ : str = sd_pipe.to(lowerCamelCase_ ) sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ ) UpperCAmelCase_ : Any = """padme amidala taking a bath artwork, safe for work, no nudity""" UpperCAmelCase_ : List[Any] = 2734971755 UpperCAmelCase_ : Optional[Any] = 7 UpperCAmelCase_ : int = torch.manual_seed(lowerCamelCase_ ) UpperCAmelCase_ : Optional[int] = sd_pipe( [prompt] ,generator=lowerCamelCase_ ,guidance_scale=lowerCamelCase_ ,num_inference_steps=50 ,output_type="""np""" ,width=512 ,height=512 ,sld_guidance_scale=0 ,) UpperCAmelCase_ : Dict = output.images UpperCAmelCase_ : Tuple = image[0, -3:, -3:, -1] UpperCAmelCase_ : Optional[Any] = [0.3_5_0_2, 0.3_6_2_2, 0.3_3_9_6, 0.3_6_4_2, 0.3_4_7_8, 0.3_3_1_8, 0.3_5, 0.3_3_4_8, 0.3_2_9_7] assert image.shape == (1, 512, 512, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 UpperCAmelCase_ : Any = torch.manual_seed(lowerCamelCase_ ) UpperCAmelCase_ : Tuple = sd_pipe( [prompt] ,generator=lowerCamelCase_ ,guidance_scale=lowerCamelCase_ ,num_inference_steps=50 ,output_type="""np""" ,width=512 ,height=512 ,sld_guidance_scale=2000 ,sld_warmup_steps=7 ,sld_threshold=0.0_2_5 ,sld_momentum_scale=0.5 ,sld_mom_beta=0.7 ,) UpperCAmelCase_ : Dict = output.images UpperCAmelCase_ : List[Any] = image[0, -3:, -3:, -1] UpperCAmelCase_ : Tuple = [0.5_5_3_1, 0.5_2_0_6, 0.4_8_9_5, 0.5_1_5_6, 0.5_1_8_2, 0.4_7_5_1, 0.4_8_0_2, 0.4_8_0_3, 0.4_4_4_3] assert image.shape == (1, 512, 512, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def A__ ( self: Union[str, Any] ) -> int: UpperCAmelCase_ : List[Any] = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" ) UpperCAmelCase_ : List[str] = sd_pipe.to(lowerCamelCase_ ) sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ ) UpperCAmelCase_ : Any = ( """the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c.""" """ leyendecker""" ) UpperCAmelCase_ : Optional[Any] = 1044355234 UpperCAmelCase_ : List[str] = 12 UpperCAmelCase_ : List[Any] = torch.manual_seed(lowerCamelCase_ ) UpperCAmelCase_ : List[Any] = sd_pipe( [prompt] ,generator=lowerCamelCase_ ,guidance_scale=lowerCamelCase_ ,num_inference_steps=50 ,output_type="""np""" ,width=512 ,height=512 ,sld_guidance_scale=0 ,) UpperCAmelCase_ : Any = output.images UpperCAmelCase_ : Dict = image[0, -3:, -3:, -1] UpperCAmelCase_ : Optional[Any] = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] ) assert image.shape == (1, 512, 512, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-7 UpperCAmelCase_ : Optional[int] = torch.manual_seed(lowerCamelCase_ ) UpperCAmelCase_ : Optional[Any] = sd_pipe( [prompt] ,generator=lowerCamelCase_ ,guidance_scale=lowerCamelCase_ ,num_inference_steps=50 ,output_type="""np""" ,width=512 ,height=512 ,sld_guidance_scale=2000 ,sld_warmup_steps=7 ,sld_threshold=0.0_2_5 ,sld_momentum_scale=0.5 ,sld_mom_beta=0.7 ,) UpperCAmelCase_ : List[str] = output.images UpperCAmelCase_ : Any = image[0, -3:, -3:, -1] UpperCAmelCase_ : Any = np.array([0.5_8_1_8, 0.6_2_8_5, 0.6_8_3_5, 0.6_0_1_9, 0.6_2_5, 0.6_7_5_4, 0.6_0_9_6, 0.6_3_3_4, 0.6_5_6_1] ) assert image.shape == (1, 512, 512, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
345
1
import os import tempfile import unittest from transformers import NezhaConfig, is_torch_available from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_PRETRAINING_MAPPING, NezhaForMaskedLM, NezhaForMultipleChoice, NezhaForNextSentencePrediction, NezhaForPreTraining, NezhaForQuestionAnswering, NezhaForSequenceClassification, NezhaForTokenClassification, NezhaModel, ) from transformers.models.nezha.modeling_nezha import NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST class _snake_case : '''simple docstring''' def __init__( self: List[str] ,lowerCamelCase_: Tuple ,lowerCamelCase_: List[Any]=13 ,lowerCamelCase_: Union[str, Any]=7 ,lowerCamelCase_: List[str]=True ,lowerCamelCase_: Optional[Any]=True ,lowerCamelCase_: Dict=True ,lowerCamelCase_: List[Any]=True ,lowerCamelCase_: List[str]=99 ,lowerCamelCase_: str=32 ,lowerCamelCase_: List[str]=5 ,lowerCamelCase_: int=4 ,lowerCamelCase_: Dict=37 ,lowerCamelCase_: Tuple="gelu" ,lowerCamelCase_: Optional[Any]=0.1 ,lowerCamelCase_: List[Any]=0.1 ,lowerCamelCase_: Optional[Any]=128 ,lowerCamelCase_: List[str]=32 ,lowerCamelCase_: int=16 ,lowerCamelCase_: Any=2 ,lowerCamelCase_: int=0.0_2 ,lowerCamelCase_: Optional[int]=3 ,lowerCamelCase_: Dict=4 ,lowerCamelCase_: List[Any]=None ,) -> Any: UpperCAmelCase_ : List[str] = parent UpperCAmelCase_ : str = batch_size UpperCAmelCase_ : str = seq_length UpperCAmelCase_ : List[str] = is_training UpperCAmelCase_ : Optional[int] = use_input_mask UpperCAmelCase_ : Union[str, Any] = use_token_type_ids UpperCAmelCase_ : Any = use_labels UpperCAmelCase_ : int = vocab_size UpperCAmelCase_ : Optional[Any] = hidden_size UpperCAmelCase_ : Dict = num_hidden_layers UpperCAmelCase_ : Tuple = num_attention_heads UpperCAmelCase_ : Any = intermediate_size UpperCAmelCase_ : Optional[int] = hidden_act UpperCAmelCase_ : Union[str, Any] = hidden_dropout_prob UpperCAmelCase_ : Any = attention_probs_dropout_prob UpperCAmelCase_ : Optional[Any] = max_position_embeddings UpperCAmelCase_ : List[str] = type_vocab_size UpperCAmelCase_ : Union[str, Any] = type_sequence_label_size UpperCAmelCase_ : Optional[int] = initializer_range UpperCAmelCase_ : Optional[int] = num_labels UpperCAmelCase_ : Dict = num_choices UpperCAmelCase_ : int = scope def A__ ( self: Union[str, Any] ) -> List[Any]: UpperCAmelCase_ : Dict = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size ) UpperCAmelCase_ : Any = None if self.use_input_mask: UpperCAmelCase_ : Any = random_attention_mask([self.batch_size, self.seq_length] ) UpperCAmelCase_ : Dict = None if self.use_token_type_ids: UpperCAmelCase_ : Dict = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size ) UpperCAmelCase_ : List[str] = None UpperCAmelCase_ : Tuple = None UpperCAmelCase_ : List[str] = None if self.use_labels: UpperCAmelCase_ : List[str] = ids_tensor([self.batch_size] ,self.type_sequence_label_size ) UpperCAmelCase_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels ) UpperCAmelCase_ : List[Any] = ids_tensor([self.batch_size] ,self.num_choices ) UpperCAmelCase_ : int = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def A__ ( self: Optional[int] ) -> str: return NezhaConfig( vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=lowerCamelCase_ ,initializer_range=self.initializer_range ,) def A__ ( self: Any ) -> Tuple: ( ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ) : Tuple = self.prepare_config_and_inputs() UpperCAmelCase_ : Tuple = True UpperCAmelCase_ : List[Any] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) UpperCAmelCase_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] ,vocab_size=2 ) return ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) def A__ ( self: Any ,lowerCamelCase_: List[str] ,lowerCamelCase_: Dict ,lowerCamelCase_: str ,lowerCamelCase_: str ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: int ,lowerCamelCase_: List[Any] ) -> Optional[int]: UpperCAmelCase_ : Optional[Any] = NezhaModel(config=lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() UpperCAmelCase_ : Optional[int] = model(lowerCamelCase_ ,attention_mask=lowerCamelCase_ ,token_type_ids=lowerCamelCase_ ) UpperCAmelCase_ : Any = model(lowerCamelCase_ ,token_type_ids=lowerCamelCase_ ) UpperCAmelCase_ : Optional[int] = model(lowerCamelCase_ ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.hidden_size) ) def A__ ( self: int ,lowerCamelCase_: Any ,lowerCamelCase_: str ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: List[Any] ,lowerCamelCase_: Tuple ,lowerCamelCase_: Tuple ,lowerCamelCase_: Any ,lowerCamelCase_: Tuple ,lowerCamelCase_: Optional[int] ,) -> str: UpperCAmelCase_ : Union[str, Any] = True UpperCAmelCase_ : str = NezhaModel(lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() UpperCAmelCase_ : Tuple = model( lowerCamelCase_ ,attention_mask=lowerCamelCase_ ,token_type_ids=lowerCamelCase_ ,encoder_hidden_states=lowerCamelCase_ ,encoder_attention_mask=lowerCamelCase_ ,) UpperCAmelCase_ : Optional[Any] = model( lowerCamelCase_ ,attention_mask=lowerCamelCase_ ,token_type_ids=lowerCamelCase_ ,encoder_hidden_states=lowerCamelCase_ ,) UpperCAmelCase_ : Union[str, Any] = model(lowerCamelCase_ ,attention_mask=lowerCamelCase_ ,token_type_ids=lowerCamelCase_ ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.hidden_size) ) def A__ ( self: str ,lowerCamelCase_: List[str] ,lowerCamelCase_: int ,lowerCamelCase_: List[str] ,lowerCamelCase_: List[str] ,lowerCamelCase_: Optional[int] ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: int ) -> Union[str, Any]: UpperCAmelCase_ : Tuple = NezhaForMaskedLM(config=lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() UpperCAmelCase_ : int = model(lowerCamelCase_ ,attention_mask=lowerCamelCase_ ,token_type_ids=lowerCamelCase_ ,labels=lowerCamelCase_ ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) ) def A__ ( self: List[Any] ,lowerCamelCase_: int ,lowerCamelCase_: str ,lowerCamelCase_: Optional[int] ,lowerCamelCase_: List[str] ,lowerCamelCase_: List[Any] ,lowerCamelCase_: int ,lowerCamelCase_: Union[str, Any] ) -> Optional[Any]: UpperCAmelCase_ : List[str] = NezhaForNextSentencePrediction(config=lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() UpperCAmelCase_ : int = model( lowerCamelCase_ ,attention_mask=lowerCamelCase_ ,token_type_ids=lowerCamelCase_ ,labels=lowerCamelCase_ ,) self.parent.assertEqual(result.logits.shape ,(self.batch_size, 2) ) def A__ ( self: int ,lowerCamelCase_: List[Any] ,lowerCamelCase_: str ,lowerCamelCase_: Any ,lowerCamelCase_: List[str] ,lowerCamelCase_: List[str] ,lowerCamelCase_: Any ,lowerCamelCase_: Optional[int] ) -> List[Any]: UpperCAmelCase_ : Tuple = NezhaForPreTraining(config=lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() UpperCAmelCase_ : int = model( lowerCamelCase_ ,attention_mask=lowerCamelCase_ ,token_type_ids=lowerCamelCase_ ,labels=lowerCamelCase_ ,next_sentence_label=lowerCamelCase_ ,) self.parent.assertEqual(result.prediction_logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertEqual(result.seq_relationship_logits.shape ,(self.batch_size, 2) ) def A__ ( self: Optional[int] ,lowerCamelCase_: int ,lowerCamelCase_: Optional[int] ,lowerCamelCase_: Optional[int] ,lowerCamelCase_: Optional[int] ,lowerCamelCase_: int ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: Any ) -> Tuple: UpperCAmelCase_ : List[Any] = NezhaForQuestionAnswering(config=lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() UpperCAmelCase_ : Union[str, Any] = model( lowerCamelCase_ ,attention_mask=lowerCamelCase_ ,token_type_ids=lowerCamelCase_ ,start_positions=lowerCamelCase_ ,end_positions=lowerCamelCase_ ,) self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) ) def A__ ( self: Optional[Any] ,lowerCamelCase_: Dict ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: int ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: Any ,lowerCamelCase_: Optional[Any] ) -> Tuple: UpperCAmelCase_ : str = self.num_labels UpperCAmelCase_ : List[Any] = NezhaForSequenceClassification(lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() UpperCAmelCase_ : str = model(lowerCamelCase_ ,attention_mask=lowerCamelCase_ ,token_type_ids=lowerCamelCase_ ,labels=lowerCamelCase_ ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) ) def A__ ( self: Any ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: List[Any] ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: Optional[int] ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: Optional[int] ) -> Dict: UpperCAmelCase_ : Optional[int] = self.num_labels UpperCAmelCase_ : Optional[Any] = NezhaForTokenClassification(config=lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() UpperCAmelCase_ : Tuple = model(lowerCamelCase_ ,attention_mask=lowerCamelCase_ ,token_type_ids=lowerCamelCase_ ,labels=lowerCamelCase_ ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) ) def A__ ( self: Any ,lowerCamelCase_: str ,lowerCamelCase_: List[str] ,lowerCamelCase_: Any ,lowerCamelCase_: Tuple ,lowerCamelCase_: Any ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: List[str] ) -> Dict: UpperCAmelCase_ : Union[str, Any] = self.num_choices UpperCAmelCase_ : Optional[int] = NezhaForMultipleChoice(config=lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() UpperCAmelCase_ : Dict = input_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous() UpperCAmelCase_ : Optional[Any] = token_type_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous() UpperCAmelCase_ : str = input_mask.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous() UpperCAmelCase_ : int = model( lowerCamelCase_ ,attention_mask=lowerCamelCase_ ,token_type_ids=lowerCamelCase_ ,labels=lowerCamelCase_ ,) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) ) def A__ ( self: Any ) -> Optional[Any]: UpperCAmelCase_ : Optional[int] = self.prepare_config_and_inputs() ( ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ) : List[str] = config_and_inputs UpperCAmelCase_ : Any = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class _snake_case ( __snake_case , __snake_case , __snake_case , unittest.TestCase ): '''simple docstring''' A__ : Dict = ( ( NezhaModel, NezhaForMaskedLM, NezhaForMultipleChoice, NezhaForNextSentencePrediction, NezhaForPreTraining, NezhaForQuestionAnswering, NezhaForSequenceClassification, NezhaForTokenClassification, ) if is_torch_available() else () ) A__ : List[str] = ( { "feature-extraction": NezhaModel, "fill-mask": NezhaForMaskedLM, "question-answering": NezhaForQuestionAnswering, "text-classification": NezhaForSequenceClassification, "token-classification": NezhaForTokenClassification, "zero-shot": NezhaForSequenceClassification, } if is_torch_available() else {} ) A__ : List[str] = True def A__ ( self: List[Any] ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: str ,lowerCamelCase_: List[Any]=False ) -> Any: UpperCAmelCase_ : Tuple = super()._prepare_for_class(lowerCamelCase_ ,lowerCamelCase_ ,return_labels=lowerCamelCase_ ) if return_labels: if model_class in get_values(lowerCamelCase_ ): UpperCAmelCase_ : Union[str, Any] = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length) ,dtype=torch.long ,device=lowerCamelCase_ ) UpperCAmelCase_ : str = torch.zeros( self.model_tester.batch_size ,dtype=torch.long ,device=lowerCamelCase_ ) return inputs_dict def A__ ( self: Dict ) -> Union[str, Any]: UpperCAmelCase_ : List[Any] = NezhaModelTester(self ) UpperCAmelCase_ : int = ConfigTester(self ,config_class=lowerCamelCase_ ,hidden_size=37 ) def A__ ( self: Optional[int] ) -> Optional[Any]: self.config_tester.run_common_tests() def A__ ( self: Dict ) -> List[str]: UpperCAmelCase_ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCamelCase_ ) def A__ ( self: Optional[Any] ) -> Dict: UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(*lowerCamelCase_ ) def A__ ( self: Dict ) -> int: # This regression test was failing with PyTorch < 1.3 ( ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ) : Optional[int] = self.model_tester.prepare_config_and_inputs_for_decoder() UpperCAmelCase_ : str = None self.model_tester.create_and_check_model_as_decoder( lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,) def A__ ( self: List[str] ) -> Optional[Any]: UpperCAmelCase_ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*lowerCamelCase_ ) def A__ ( self: Tuple ) -> Union[str, Any]: UpperCAmelCase_ : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*lowerCamelCase_ ) def A__ ( self: Tuple ) -> Dict: UpperCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_next_sequence_prediction(*lowerCamelCase_ ) def A__ ( self: Optional[int] ) -> List[Any]: UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*lowerCamelCase_ ) def A__ ( self: str ) -> Any: UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*lowerCamelCase_ ) def A__ ( self: List[str] ) -> Tuple: UpperCAmelCase_ : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*lowerCamelCase_ ) def A__ ( self: int ) -> List[str]: UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*lowerCamelCase_ ) @slow def A__ ( self: Tuple ) -> Union[str, Any]: for model_name in NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase_ : Union[str, Any] = NezhaModel.from_pretrained(lowerCamelCase_ ) self.assertIsNotNone(lowerCamelCase_ ) @slow @require_torch_gpu def A__ ( self: Optional[int] ) -> str: UpperCAmelCase_ , UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # NezhaForMultipleChoice behaves incorrectly in JIT environments. if model_class == NezhaForMultipleChoice: return UpperCAmelCase_ : Optional[Any] = True UpperCAmelCase_ : Tuple = model_class(config=lowerCamelCase_ ) UpperCAmelCase_ : Optional[Any] = self._prepare_for_class(lowerCamelCase_ ,lowerCamelCase_ ) UpperCAmelCase_ : Optional[int] = torch.jit.trace( lowerCamelCase_ ,(inputs_dict["""input_ids"""].to("""cpu""" ), inputs_dict["""attention_mask"""].to("""cpu""" )) ) with tempfile.TemporaryDirectory() as tmp: torch.jit.save(lowerCamelCase_ ,os.path.join(lowerCamelCase_ ,"""bert.pt""" ) ) UpperCAmelCase_ : List[str] = torch.jit.load(os.path.join(lowerCamelCase_ ,"""bert.pt""" ) ,map_location=lowerCamelCase_ ) loaded(inputs_dict["""input_ids"""].to(lowerCamelCase_ ) ,inputs_dict["""attention_mask"""].to(lowerCamelCase_ ) ) @require_torch class _snake_case ( unittest.TestCase ): '''simple docstring''' @slow def A__ ( self: Union[str, Any] ) -> int: UpperCAmelCase_ : List[Any] = NezhaModel.from_pretrained("""sijunhe/nezha-cn-base""" ) UpperCAmelCase_ : int = torch.tensor([[0, 1, 2, 3, 4, 5]] ) UpperCAmelCase_ : str = torch.tensor([[0, 1, 1, 1, 1, 1]] ) with torch.no_grad(): UpperCAmelCase_ : Optional[int] = model(lowerCamelCase_ ,attention_mask=lowerCamelCase_ )[0] UpperCAmelCase_ : Optional[Any] = torch.Size((1, 6, 768) ) self.assertEqual(output.shape ,lowerCamelCase_ ) UpperCAmelCase_ : Optional[Any] = torch.tensor([[[0.0_6_8_5, 0.2_4_4_1, 0.1_1_0_2], [0.0_6_0_0, 0.1_9_0_6, 0.1_3_4_9], [0.0_2_2_1, 0.0_8_1_9, 0.0_5_8_6]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] ,lowerCamelCase_ ,atol=1e-4 ) ) @slow def A__ ( self: str ) -> Dict: UpperCAmelCase_ : Union[str, Any] = NezhaForMaskedLM.from_pretrained("""sijunhe/nezha-cn-base""" ) UpperCAmelCase_ : Dict = torch.tensor([[0, 1, 2, 3, 4, 5]] ) UpperCAmelCase_ : Optional[int] = torch.tensor([[1, 1, 1, 1, 1, 1]] ) with torch.no_grad(): UpperCAmelCase_ : int = model(lowerCamelCase_ ,attention_mask=lowerCamelCase_ )[0] UpperCAmelCase_ : List[Any] = torch.Size((1, 6, 21128) ) self.assertEqual(output.shape ,lowerCamelCase_ ) UpperCAmelCase_ : str = torch.tensor( [[-2.7_9_3_9, -1.7_9_0_2, -2.2_1_8_9], [-2.8_5_8_5, -1.8_9_0_8, -2.3_7_2_3], [-2.6_4_9_9, -1.7_7_5_0, -2.2_5_5_8]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] ,lowerCamelCase_ ,atol=1e-4 ) )
345
import unittest from transformers import MobileBertConfig, is_torch_available from transformers.models.auto import get_values from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_PRETRAINING_MAPPING, MobileBertForMaskedLM, MobileBertForMultipleChoice, MobileBertForNextSentencePrediction, MobileBertForPreTraining, MobileBertForQuestionAnswering, MobileBertForSequenceClassification, MobileBertForTokenClassification, MobileBertModel, ) class _snake_case : '''simple docstring''' def __init__( self: Optional[int] ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: Tuple=13 ,lowerCamelCase_: int=7 ,lowerCamelCase_: Union[str, Any]=True ,lowerCamelCase_: Dict=True ,lowerCamelCase_: str=True ,lowerCamelCase_: Tuple=True ,lowerCamelCase_: int=99 ,lowerCamelCase_: List[str]=64 ,lowerCamelCase_: Tuple=32 ,lowerCamelCase_: List[str]=5 ,lowerCamelCase_: str=4 ,lowerCamelCase_: str=37 ,lowerCamelCase_: Union[str, Any]="gelu" ,lowerCamelCase_: Union[str, Any]=0.1 ,lowerCamelCase_: str=0.1 ,lowerCamelCase_: List[str]=512 ,lowerCamelCase_: Dict=16 ,lowerCamelCase_: List[str]=2 ,lowerCamelCase_: List[str]=0.0_2 ,lowerCamelCase_: Optional[Any]=3 ,lowerCamelCase_: Union[str, Any]=4 ,lowerCamelCase_: str=None ,) -> List[str]: UpperCAmelCase_ : Any = parent UpperCAmelCase_ : List[Any] = batch_size UpperCAmelCase_ : Union[str, Any] = seq_length UpperCAmelCase_ : Optional[int] = is_training UpperCAmelCase_ : Dict = use_input_mask UpperCAmelCase_ : Any = use_token_type_ids UpperCAmelCase_ : Tuple = use_labels UpperCAmelCase_ : List[Any] = vocab_size UpperCAmelCase_ : str = hidden_size UpperCAmelCase_ : List[str] = embedding_size UpperCAmelCase_ : List[Any] = num_hidden_layers UpperCAmelCase_ : List[Any] = num_attention_heads UpperCAmelCase_ : List[Any] = intermediate_size UpperCAmelCase_ : Tuple = hidden_act UpperCAmelCase_ : str = hidden_dropout_prob UpperCAmelCase_ : List[str] = attention_probs_dropout_prob UpperCAmelCase_ : Any = max_position_embeddings UpperCAmelCase_ : List[str] = type_vocab_size UpperCAmelCase_ : Any = type_sequence_label_size UpperCAmelCase_ : Optional[Any] = initializer_range UpperCAmelCase_ : Optional[int] = num_labels UpperCAmelCase_ : Optional[int] = num_choices UpperCAmelCase_ : List[str] = scope def A__ ( self: Any ) -> Optional[int]: UpperCAmelCase_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size ) UpperCAmelCase_ : List[str] = None if self.use_input_mask: UpperCAmelCase_ : Tuple = random_attention_mask([self.batch_size, self.seq_length] ) UpperCAmelCase_ : Dict = None if self.use_token_type_ids: UpperCAmelCase_ : str = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size ) UpperCAmelCase_ : int = None UpperCAmelCase_ : Union[str, Any] = None UpperCAmelCase_ : Union[str, Any] = None if self.use_labels: UpperCAmelCase_ : List[str] = ids_tensor([self.batch_size] ,self.type_sequence_label_size ) UpperCAmelCase_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels ) UpperCAmelCase_ : int = ids_tensor([self.batch_size] ,self.num_choices ) UpperCAmelCase_ : Tuple = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def A__ ( self: Any ) -> Dict: return MobileBertConfig( vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,embedding_size=self.embedding_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=lowerCamelCase_ ,initializer_range=self.initializer_range ,) def A__ ( self: List[Any] ,lowerCamelCase_: str ,lowerCamelCase_: Optional[int] ,lowerCamelCase_: Any ,lowerCamelCase_: List[Any] ,lowerCamelCase_: List[str] ,lowerCamelCase_: str ,lowerCamelCase_: str ) -> int: UpperCAmelCase_ : Any = MobileBertModel(config=lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() UpperCAmelCase_ : List[Any] = model(lowerCamelCase_ ,attention_mask=lowerCamelCase_ ,token_type_ids=lowerCamelCase_ ) UpperCAmelCase_ : Union[str, Any] = model(lowerCamelCase_ ,token_type_ids=lowerCamelCase_ ) UpperCAmelCase_ : Tuple = model(lowerCamelCase_ ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.hidden_size) ) def A__ ( self: Optional[Any] ,lowerCamelCase_: List[str] ,lowerCamelCase_: List[str] ,lowerCamelCase_: Tuple ,lowerCamelCase_: List[Any] ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: List[Any] ,lowerCamelCase_: Dict ) -> int: UpperCAmelCase_ : Union[str, Any] = MobileBertForMaskedLM(config=lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() UpperCAmelCase_ : Optional[Any] = model(lowerCamelCase_ ,attention_mask=lowerCamelCase_ ,token_type_ids=lowerCamelCase_ ,labels=lowerCamelCase_ ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) ) def A__ ( self: str ,lowerCamelCase_: Any ,lowerCamelCase_: Dict ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: List[str] ,lowerCamelCase_: str ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: int ) -> int: UpperCAmelCase_ : List[Any] = MobileBertForNextSentencePrediction(config=lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() UpperCAmelCase_ : Union[str, Any] = model( lowerCamelCase_ ,attention_mask=lowerCamelCase_ ,token_type_ids=lowerCamelCase_ ,labels=lowerCamelCase_ ,) self.parent.assertEqual(result.logits.shape ,(self.batch_size, 2) ) def A__ ( self: Tuple ,lowerCamelCase_: Tuple ,lowerCamelCase_: Dict ,lowerCamelCase_: List[str] ,lowerCamelCase_: Tuple ,lowerCamelCase_: Tuple ,lowerCamelCase_: Dict ,lowerCamelCase_: Any ) -> Optional[Any]: UpperCAmelCase_ : Tuple = MobileBertForPreTraining(config=lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() UpperCAmelCase_ : Optional[int] = model( lowerCamelCase_ ,attention_mask=lowerCamelCase_ ,token_type_ids=lowerCamelCase_ ,labels=lowerCamelCase_ ,next_sentence_label=lowerCamelCase_ ,) self.parent.assertEqual(result.prediction_logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertEqual(result.seq_relationship_logits.shape ,(self.batch_size, 2) ) def A__ ( self: Any ,lowerCamelCase_: Optional[int] ,lowerCamelCase_: Any ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: List[str] ,lowerCamelCase_: Any ,lowerCamelCase_: int ,lowerCamelCase_: List[Any] ) -> List[str]: UpperCAmelCase_ : Optional[Any] = MobileBertForQuestionAnswering(config=lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() UpperCAmelCase_ : int = model( lowerCamelCase_ ,attention_mask=lowerCamelCase_ ,token_type_ids=lowerCamelCase_ ,start_positions=lowerCamelCase_ ,end_positions=lowerCamelCase_ ,) self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) ) def A__ ( self: List[str] ,lowerCamelCase_: List[Any] ,lowerCamelCase_: Tuple ,lowerCamelCase_: Any ,lowerCamelCase_: Tuple ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: Any ) -> str: UpperCAmelCase_ : Optional[Any] = self.num_labels UpperCAmelCase_ : Union[str, Any] = MobileBertForSequenceClassification(lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() UpperCAmelCase_ : Optional[int] = model(lowerCamelCase_ ,attention_mask=lowerCamelCase_ ,token_type_ids=lowerCamelCase_ ,labels=lowerCamelCase_ ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) ) def A__ ( self: Union[str, Any] ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: str ,lowerCamelCase_: Dict ,lowerCamelCase_: Any ,lowerCamelCase_: List[str] ) -> Any: UpperCAmelCase_ : str = self.num_labels UpperCAmelCase_ : Optional[int] = MobileBertForTokenClassification(config=lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() UpperCAmelCase_ : List[Any] = model(lowerCamelCase_ ,attention_mask=lowerCamelCase_ ,token_type_ids=lowerCamelCase_ ,labels=lowerCamelCase_ ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) ) def A__ ( self: Tuple ,lowerCamelCase_: str ,lowerCamelCase_: int ,lowerCamelCase_: Tuple ,lowerCamelCase_: List[Any] ,lowerCamelCase_: str ,lowerCamelCase_: Optional[int] ,lowerCamelCase_: List[Any] ) -> Union[str, Any]: UpperCAmelCase_ : Union[str, Any] = self.num_choices UpperCAmelCase_ : Tuple = MobileBertForMultipleChoice(config=lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() UpperCAmelCase_ : Dict = input_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous() UpperCAmelCase_ : Union[str, Any] = token_type_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous() UpperCAmelCase_ : str = input_mask.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous() UpperCAmelCase_ : Optional[int] = model( lowerCamelCase_ ,attention_mask=lowerCamelCase_ ,token_type_ids=lowerCamelCase_ ,labels=lowerCamelCase_ ,) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) ) def A__ ( self: List[str] ) -> str: UpperCAmelCase_ : str = self.prepare_config_and_inputs() ( ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ) : Union[str, Any] = config_and_inputs UpperCAmelCase_ : Dict = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class _snake_case ( __snake_case , __snake_case , unittest.TestCase ): '''simple docstring''' A__ : Dict = ( ( MobileBertModel, MobileBertForMaskedLM, MobileBertForMultipleChoice, MobileBertForNextSentencePrediction, MobileBertForPreTraining, MobileBertForQuestionAnswering, MobileBertForSequenceClassification, MobileBertForTokenClassification, ) if is_torch_available() else () ) A__ : List[str] = ( { "feature-extraction": MobileBertModel, "fill-mask": MobileBertForMaskedLM, "question-answering": MobileBertForQuestionAnswering, "text-classification": MobileBertForSequenceClassification, "token-classification": MobileBertForTokenClassification, "zero-shot": MobileBertForSequenceClassification, } if is_torch_available() else {} ) A__ : List[str] = True def A__ ( self: Dict ,lowerCamelCase_: Tuple ,lowerCamelCase_: Tuple ,lowerCamelCase_: int=False ) -> Union[str, Any]: UpperCAmelCase_ : List[Any] = super()._prepare_for_class(lowerCamelCase_ ,lowerCamelCase_ ,return_labels=lowerCamelCase_ ) if return_labels: if model_class in get_values(lowerCamelCase_ ): UpperCAmelCase_ : Any = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length) ,dtype=torch.long ,device=lowerCamelCase_ ) UpperCAmelCase_ : List[str] = torch.zeros( self.model_tester.batch_size ,dtype=torch.long ,device=lowerCamelCase_ ) return inputs_dict def A__ ( self: List[str] ) -> Any: UpperCAmelCase_ : List[str] = MobileBertModelTester(self ) UpperCAmelCase_ : Union[str, Any] = ConfigTester(self ,config_class=lowerCamelCase_ ,hidden_size=37 ) def A__ ( self: Optional[Any] ) -> List[Any]: self.config_tester.run_common_tests() def A__ ( self: List[str] ) -> Optional[Any]: UpperCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_model(*lowerCamelCase_ ) def A__ ( self: Optional[int] ) -> Optional[int]: UpperCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_masked_lm(*lowerCamelCase_ ) def A__ ( self: Optional[Any] ) -> Tuple: UpperCAmelCase_ : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_multiple_choice(*lowerCamelCase_ ) def A__ ( self: List[Any] ) -> List[str]: UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*lowerCamelCase_ ) def A__ ( self: Optional[Any] ) -> Dict: UpperCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_pretraining(*lowerCamelCase_ ) def A__ ( self: Optional[int] ) -> Optional[int]: UpperCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_question_answering(*lowerCamelCase_ ) def A__ ( self: Union[str, Any] ) -> Optional[int]: UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_sequence_classification(*lowerCamelCase_ ) def A__ ( self: Any ) -> Optional[int]: UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_token_classification(*lowerCamelCase_ ) def lowerCamelCase_ ( _a : Union[str, Any] ): '''simple docstring''' return torch.tensor( _a , dtype=torch.long , device=_a , ) UpperCamelCase_ = 1E-3 @require_torch @require_sentencepiece @require_tokenizers class _snake_case ( unittest.TestCase ): '''simple docstring''' @slow def A__ ( self: List[Any] ) -> str: UpperCAmelCase_ : Any = MobileBertModel.from_pretrained("""google/mobilebert-uncased""" ).to(lowerCamelCase_ ) UpperCAmelCase_ : str = _long_tensor([[101, 7110, 1005, 1056, 2023, 11333, 17413, 1029, 102]] ) with torch.no_grad(): UpperCAmelCase_ : Union[str, Any] = model(lowerCamelCase_ )[0] UpperCAmelCase_ : Union[str, Any] = torch.Size((1, 9, 512) ) self.assertEqual(output.shape ,lowerCamelCase_ ) UpperCAmelCase_ : Tuple = torch.tensor( [ [ [-2.473_6526e07, 8.269_1656e04, 1.652_1838e05], [-5.754_1704e-01, 3.905_6022e00, 4.401_1507e00], [2.604_7359e00, 1.567_7652e00, -1.732_4188e-01], ] ] ,device=lowerCamelCase_ ,) # MobileBERT results range from 10e0 to 10e8. Even a 0.0000001% difference with a value of 10e8 results in a # ~1 difference, it's therefore not a good idea to measure using addition. # Here, we instead divide the expected result with the result in order to obtain ~1. We then check that the # result is held between bounds: 1 - TOLERANCE < expected_result / result < 1 + TOLERANCE UpperCAmelCase_ : Dict = torch.all((expected_slice / output[..., :3, :3]) >= 1 - TOLERANCE ) UpperCAmelCase_ : Dict = torch.all((expected_slice / output[..., :3, :3]) <= 1 + TOLERANCE ) self.assertTrue(lower_bound and upper_bound )
345
1
# Logistic Regression from scratch # In[62]: # In[63]: # importing all the required libraries import numpy as np from matplotlib import pyplot as plt from sklearn import datasets def lowerCamelCase_ ( _a : Union[str, Any] ): '''simple docstring''' return 1 / (1 + np.exp(-z )) def lowerCamelCase_ ( _a : Any , _a : List[str] ): '''simple docstring''' return (-y * np.log(_a ) - (1 - y) * np.log(1 - h )).mean() def lowerCamelCase_ ( _a : List[Any] , _a : Dict , _a : Optional[int] ): '''simple docstring''' UpperCAmelCase_ : str = np.dot(_a , _a ) return np.sum(y * scores - np.log(1 + np.exp(_a ) ) ) def lowerCamelCase_ ( _a : Any , _a : Optional[Any] , _a : int , _a : Dict=7_0000 ): '''simple docstring''' UpperCAmelCase_ : List[str] = np.zeros(x.shape[1] ) for iterations in range(_a ): UpperCAmelCase_ : int = np.dot(_a , _a ) UpperCAmelCase_ : List[Any] = sigmoid_function(_a ) UpperCAmelCase_ : Union[str, Any] = np.dot(x.T , h - y ) / y.size UpperCAmelCase_ : List[Any] = theta - alpha * gradient # updating the weights UpperCAmelCase_ : List[str] = np.dot(_a , _a ) UpperCAmelCase_ : List[Any] = sigmoid_function(_a ) UpperCAmelCase_ : Tuple = cost_function(_a , _a ) if iterations % 100 == 0: print(F'''loss: {j} \t''' ) # printing the loss after every 100 iterations return theta # In[68]: if __name__ == "__main__": UpperCamelCase_ = datasets.load_iris() UpperCamelCase_ = iris.data[:, :2] UpperCamelCase_ = (iris.target != 0) * 1 UpperCamelCase_ = 0.1 UpperCamelCase_ = logistic_reg(alpha, x, y, max_iterations=70000) print('''theta: ''', theta) # printing the theta i.e our weights vector def lowerCamelCase_ ( _a : List[Any] ): '''simple docstring''' return sigmoid_function( np.dot(_a , _a ) ) # predicting the value of probability from the logistic regression algorithm plt.figure(figsize=(10, 6)) plt.scatter(x[y == 0][:, 0], x[y == 0][:, 1], color='''b''', label='''0''') plt.scatter(x[y == 1][:, 0], x[y == 1][:, 1], color='''r''', label='''1''') ((UpperCamelCase_) ,(UpperCamelCase_)) = (x[:, 0].min(), x[:, 0].max()) ((UpperCamelCase_) ,(UpperCamelCase_)) = (x[:, 1].min(), x[:, 1].max()) ((UpperCamelCase_) ,(UpperCamelCase_)) = np.meshgrid(np.linspace(xa_min, xa_max), np.linspace(xa_min, xa_max)) UpperCamelCase_ = np.c_[xxa.ravel(), xxa.ravel()] UpperCamelCase_ = predict_prob(grid).reshape(xxa.shape) plt.contour(xxa, xxa, probs, [0.5], linewidths=1, colors='''black''') plt.legend() plt.show()
345
import os import shutil import tempfile import unittest import numpy as np from transformers import AutoTokenizer, BarkProcessor from transformers.testing_utils import require_torch, slow @require_torch class _snake_case ( unittest.TestCase ): '''simple docstring''' def A__ ( self: str ) -> int: UpperCAmelCase_ : List[Any] = """ylacombe/bark-small""" UpperCAmelCase_ : Tuple = tempfile.mkdtemp() UpperCAmelCase_ : Union[str, Any] = """en_speaker_1""" UpperCAmelCase_ : Optional[Any] = """This is a test string""" UpperCAmelCase_ : int = """speaker_embeddings_path.json""" UpperCAmelCase_ : Any = """speaker_embeddings""" def A__ ( self: Tuple ,**lowerCamelCase_: List[str] ) -> List[Any]: return AutoTokenizer.from_pretrained(self.checkpoint ,**lowerCamelCase_ ) def A__ ( self: str ) -> Union[str, Any]: shutil.rmtree(self.tmpdirname ) def A__ ( self: List[Any] ) -> int: UpperCAmelCase_ : int = self.get_tokenizer() UpperCAmelCase_ : Tuple = BarkProcessor(tokenizer=lowerCamelCase_ ) processor.save_pretrained(self.tmpdirname ) UpperCAmelCase_ : Optional[int] = BarkProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer.get_vocab() ) @slow def A__ ( self: List[Any] ) -> Optional[int]: UpperCAmelCase_ : List[Any] = BarkProcessor.from_pretrained( pretrained_processor_name_or_path=self.checkpoint ,speaker_embeddings_dict_path=self.speaker_embeddings_dict_path ,) processor.save_pretrained( self.tmpdirname ,speaker_embeddings_dict_path=self.speaker_embeddings_dict_path ,speaker_embeddings_directory=self.speaker_embeddings_directory ,) UpperCAmelCase_ : Optional[Any] = self.get_tokenizer(bos_token="""(BOS)""" ,eos_token="""(EOS)""" ) UpperCAmelCase_ : List[Any] = BarkProcessor.from_pretrained( self.tmpdirname ,self.speaker_embeddings_dict_path ,bos_token="""(BOS)""" ,eos_token="""(EOS)""" ,) self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer_add_kwargs.get_vocab() ) def A__ ( self: List[str] ) -> Optional[Any]: UpperCAmelCase_ : Any = BarkProcessor.from_pretrained( pretrained_processor_name_or_path=self.checkpoint ,speaker_embeddings_dict_path=self.speaker_embeddings_dict_path ,) UpperCAmelCase_ : Optional[int] = 35 UpperCAmelCase_ : Optional[int] = 2 UpperCAmelCase_ : Dict = 8 UpperCAmelCase_ : Optional[int] = { """semantic_prompt""": np.ones(lowerCamelCase_ ), """coarse_prompt""": np.ones((nb_codebooks_coarse, seq_len) ), """fine_prompt""": np.ones((nb_codebooks_total, seq_len) ), } # test providing already loaded voice_preset UpperCAmelCase_ : str = processor(text=self.input_string ,voice_preset=lowerCamelCase_ ) UpperCAmelCase_ : Optional[int] = inputs["""history_prompt"""] for key in voice_preset: self.assertListEqual(voice_preset[key].tolist() ,processed_voice_preset.get(lowerCamelCase_ ,np.array([] ) ).tolist() ) # test loading voice preset from npz file UpperCAmelCase_ : List[Any] = os.path.join(self.tmpdirname ,"""file.npz""" ) np.savez(lowerCamelCase_ ,**lowerCamelCase_ ) UpperCAmelCase_ : Optional[Any] = processor(text=self.input_string ,voice_preset=lowerCamelCase_ ) UpperCAmelCase_ : int = inputs["""history_prompt"""] for key in voice_preset: self.assertListEqual(voice_preset[key].tolist() ,processed_voice_preset.get(lowerCamelCase_ ,np.array([] ) ).tolist() ) # test loading voice preset from the hub UpperCAmelCase_ : Union[str, Any] = processor(text=self.input_string ,voice_preset=self.voice_preset ) def A__ ( self: Dict ) -> Tuple: UpperCAmelCase_ : Any = self.get_tokenizer() UpperCAmelCase_ : Dict = BarkProcessor(tokenizer=lowerCamelCase_ ) UpperCAmelCase_ : Optional[Any] = processor(text=self.input_string ) UpperCAmelCase_ : str = tokenizer( self.input_string ,padding="""max_length""" ,max_length=256 ,add_special_tokens=lowerCamelCase_ ,return_attention_mask=lowerCamelCase_ ,return_token_type_ids=lowerCamelCase_ ,) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] ,encoded_processor[key].squeeze().tolist() )
345
1
import logging import os import sys from dataclasses import dataclass, field from typing import Optional from seqaseq_trainer import SeqaSeqTrainer from seqaseq_training_args import SeqaSeqTrainingArguments import transformers from transformers import ( AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer, HfArgumentParser, MBartTokenizer, MBartTokenizerFast, set_seed, ) from transformers.trainer_utils import EvaluationStrategy, is_main_process from transformers.training_args import ParallelMode from utils import ( SeqaSeqDataCollator, SeqaSeqDataset, assert_all_frozen, build_compute_metrics_fn, check_output_dir, freeze_embeds, freeze_params, lmap, save_json, use_task_specific_params, write_txt_file, ) UpperCamelCase_ = logging.getLogger(__name__) @dataclass class _snake_case : '''simple docstring''' A__ : str = field( metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} ) A__ : Optional[str] = field( default=__snake_case , metadata={"help": "Pretrained config name or path if not the same as model_name"} ) A__ : Optional[str] = field( default=__snake_case , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} ) A__ : Optional[str] = field( default=__snake_case , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , ) A__ : bool = field(default=__snake_case , metadata={"help": "Whether tp freeze the encoder."} ) A__ : bool = field(default=__snake_case , metadata={"help": "Whether to freeze the embeddings."} ) @dataclass class _snake_case : '''simple docstring''' A__ : str = field( metadata={"help": "The input data dir. Should contain the .tsv files (or other data files) for the task."} ) A__ : Optional[str] = field( default="summarization" , metadata={"help": "Task name, summarization (or summarization_{dataset} for pegasus) or translation"} , ) A__ : Optional[int] = field( default=1_024 , metadata={ "help": ( "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) } , ) A__ : Optional[int] = field( default=128 , metadata={ "help": ( "The maximum total sequence length for target text after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) } , ) A__ : Optional[int] = field( default=142 , metadata={ "help": ( "The maximum total sequence length for validation target text after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded. " "This argument is also used to override the ``max_length`` param of ``model.generate``, which is used " "during ``evaluate`` and ``predict``." ) } , ) A__ : Optional[int] = field( default=142 , metadata={ "help": ( "The maximum total sequence length for test target text after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) } , ) A__ : Optional[int] = field(default=-1 , metadata={"help": "# training examples. -1 means use all."} ) A__ : Optional[int] = field(default=-1 , metadata={"help": "# validation examples. -1 means use all."} ) A__ : Optional[int] = field(default=-1 , metadata={"help": "# test examples. -1 means use all."} ) A__ : Optional[str] = field(default=__snake_case , metadata={"help": "Source language id for translation."} ) A__ : Optional[str] = field(default=__snake_case , metadata={"help": "Target language id for translation."} ) A__ : Optional[int] = field(default=__snake_case , metadata={"help": "# num_beams to use for evaluation."} ) A__ : bool = field( default=__snake_case , metadata={"help": "If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined."} , ) def lowerCamelCase_ ( _a : str , _a : int , _a : Optional[int] ): '''simple docstring''' logger.info(F'''***** {split} metrics *****''' ) for key in sorted(metrics.keys() ): logger.info(F''' {key} = {metrics[key]}''' ) save_json(_a , os.path.join(_a , F'''{split}_results.json''' ) ) def lowerCamelCase_ ( ): '''simple docstring''' UpperCAmelCase_ : Any = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : str = parser.parse_args_into_dataclasses() check_output_dir(_a ) # Setup logging logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , ) logger.warning( """Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s""" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() logger.info("""Training/evaluation parameters %s""" , _a ) # Set seed set_seed(training_args.seed ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. UpperCAmelCase_ : Union[str, Any] = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) UpperCAmelCase_ : str = ("""encoder_layerdrop""", """decoder_layerdrop""", """dropout""", """attention_dropout""") for p in extra_model_params: if getattr(_a , _a , _a ): assert hasattr(_a , _a ), F'''({config.__class__.__name__}) doesn\'t have a `{p}` attribute''' setattr(_a , _a , getattr(_a , _a ) ) UpperCAmelCase_ : int = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) UpperCAmelCase_ : Tuple = AutoModelForSeqaSeqLM.from_pretrained( model_args.model_name_or_path , from_tf=""".ckpt""" in model_args.model_name_or_path , config=_a , cache_dir=model_args.cache_dir , ) # use task specific params use_task_specific_params(_a , data_args.task ) # set num_beams for evaluation if data_args.eval_beams is None: UpperCAmelCase_ : List[Any] = model.config.num_beams # set decoder_start_token_id for MBart if model.config.decoder_start_token_id is None and isinstance(_a , (MBartTokenizer, MBartTokenizerFast) ): assert ( data_args.tgt_lang is not None and data_args.src_lang is not None ), "mBart requires --tgt_lang and --src_lang" if isinstance(_a , _a ): UpperCAmelCase_ : Dict = tokenizer.lang_code_to_id[data_args.tgt_lang] else: UpperCAmelCase_ : Optional[Any] = tokenizer.convert_tokens_to_ids(data_args.tgt_lang ) if model_args.freeze_embeds: freeze_embeds(_a ) if model_args.freeze_encoder: freeze_params(model.get_encoder() ) assert_all_frozen(model.get_encoder() ) UpperCAmelCase_ : List[Any] = SeqaSeqDataset # Get datasets UpperCAmelCase_ : Optional[Any] = ( dataset_class( _a , type_path="""train""" , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or """""" , ) if training_args.do_train else None ) UpperCAmelCase_ : Union[str, Any] = ( dataset_class( _a , type_path="""val""" , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or """""" , ) if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO else None ) UpperCAmelCase_ : Any = ( dataset_class( _a , type_path="""test""" , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or """""" , ) if training_args.do_predict else None ) # Initialize our Trainer UpperCAmelCase_ : Tuple = ( build_compute_metrics_fn(data_args.task , _a ) if training_args.predict_with_generate else None ) UpperCAmelCase_ : Dict = SeqaSeqTrainer( model=_a , args=_a , data_args=_a , train_dataset=_a , eval_dataset=_a , data_collator=SeqaSeqDataCollator( _a , _a , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=_a , tokenizer=_a , ) UpperCAmelCase_ : Optional[Any] = {} # Training if training_args.do_train: logger.info("""*** Train ***""" ) UpperCAmelCase_ : Dict = trainer.train( model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None ) UpperCAmelCase_ : str = train_result.metrics UpperCAmelCase_ : Union[str, Any] = data_args.n_train trainer.save_model() # this also saves the tokenizer if trainer.is_world_process_zero(): handle_metrics("""train""" , _a , training_args.output_dir ) all_metrics.update(_a ) # Need to save the state, since Trainer.save_model saves only the tokenizer with the model trainer.state.save_to_json(os.path.join(training_args.output_dir , """trainer_state.json""" ) ) # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) tokenizer.save_pretrained(training_args.output_dir ) # Evaluation if training_args.do_eval: logger.info("""*** Evaluate ***""" ) UpperCAmelCase_ : Tuple = trainer.evaluate(metric_key_prefix="""val""" ) UpperCAmelCase_ : List[str] = data_args.n_val UpperCAmelCase_ : Optional[Any] = round(metrics["""val_loss"""] , 4 ) if trainer.is_world_process_zero(): handle_metrics("""val""" , _a , training_args.output_dir ) all_metrics.update(_a ) if training_args.do_predict: logger.info("""*** Predict ***""" ) UpperCAmelCase_ : Tuple = trainer.predict(test_dataset=_a , metric_key_prefix="""test""" ) UpperCAmelCase_ : Tuple = test_output.metrics UpperCAmelCase_ : Any = data_args.n_test if trainer.is_world_process_zero(): UpperCAmelCase_ : Optional[int] = round(metrics["""test_loss"""] , 4 ) handle_metrics("""test""" , _a , training_args.output_dir ) all_metrics.update(_a ) if training_args.predict_with_generate: UpperCAmelCase_ : List[str] = tokenizer.batch_decode( test_output.predictions , skip_special_tokens=_a , clean_up_tokenization_spaces=_a ) UpperCAmelCase_ : Union[str, Any] = lmap(str.strip , _a ) write_txt_file(_a , os.path.join(training_args.output_dir , """test_generations.txt""" ) ) if trainer.is_world_process_zero(): save_json(_a , os.path.join(training_args.output_dir , """all_results.json""" ) ) return all_metrics def lowerCamelCase_ ( _a : Union[str, Any] ): '''simple docstring''' main() if __name__ == "__main__": main()
345
import unittest from queue import Empty from threading import Thread from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available from transformers.testing_utils import CaptureStdout, require_torch, torch_device from ..test_modeling_common import ids_tensor if is_torch_available(): import torch from transformers import AutoModelForCausalLM @require_torch class _snake_case ( unittest.TestCase ): '''simple docstring''' def A__ ( self: Optional[int] ) -> Any: UpperCAmelCase_ : List[str] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ) UpperCAmelCase_ : Union[str, Any] = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(lowerCamelCase_ ) UpperCAmelCase_ : str = -1 UpperCAmelCase_ : Dict = ids_tensor((1, 5) ,vocab_size=model.config.vocab_size ).to(lowerCamelCase_ ) UpperCAmelCase_ : Union[str, Any] = model.generate(lowerCamelCase_ ,max_new_tokens=10 ,do_sample=lowerCamelCase_ ) UpperCAmelCase_ : Any = tokenizer.decode(greedy_ids[0] ) with CaptureStdout() as cs: UpperCAmelCase_ : List[Any] = TextStreamer(lowerCamelCase_ ) model.generate(lowerCamelCase_ ,max_new_tokens=10 ,do_sample=lowerCamelCase_ ,streamer=lowerCamelCase_ ) # The greedy text should be printed to stdout, except for the final "\n" in the streamer UpperCAmelCase_ : Optional[int] = cs.out[:-1] self.assertEqual(lowerCamelCase_ ,lowerCamelCase_ ) def A__ ( self: Dict ) -> Optional[Any]: UpperCAmelCase_ : str = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ) UpperCAmelCase_ : Optional[Any] = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(lowerCamelCase_ ) UpperCAmelCase_ : Optional[int] = -1 UpperCAmelCase_ : List[Any] = ids_tensor((1, 5) ,vocab_size=model.config.vocab_size ).to(lowerCamelCase_ ) UpperCAmelCase_ : List[str] = model.generate(lowerCamelCase_ ,max_new_tokens=10 ,do_sample=lowerCamelCase_ ) UpperCAmelCase_ : Dict = tokenizer.decode(greedy_ids[0] ) UpperCAmelCase_ : str = TextIteratorStreamer(lowerCamelCase_ ) UpperCAmelCase_ : Optional[int] = {"""input_ids""": input_ids, """max_new_tokens""": 10, """do_sample""": False, """streamer""": streamer} UpperCAmelCase_ : str = Thread(target=model.generate ,kwargs=lowerCamelCase_ ) thread.start() UpperCAmelCase_ : int = """""" for new_text in streamer: streamer_text += new_text self.assertEqual(lowerCamelCase_ ,lowerCamelCase_ ) def A__ ( self: List[Any] ) -> Dict: UpperCAmelCase_ : List[Any] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ) UpperCAmelCase_ : Optional[Any] = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(lowerCamelCase_ ) UpperCAmelCase_ : Optional[int] = -1 UpperCAmelCase_ : Tuple = ids_tensor((1, 5) ,vocab_size=model.config.vocab_size ).to(lowerCamelCase_ ) UpperCAmelCase_ : Dict = model.generate(lowerCamelCase_ ,max_new_tokens=10 ,do_sample=lowerCamelCase_ ) UpperCAmelCase_ : str = greedy_ids[:, input_ids.shape[1] :] UpperCAmelCase_ : Dict = tokenizer.decode(new_greedy_ids[0] ) with CaptureStdout() as cs: UpperCAmelCase_ : List[Any] = TextStreamer(lowerCamelCase_ ,skip_prompt=lowerCamelCase_ ) model.generate(lowerCamelCase_ ,max_new_tokens=10 ,do_sample=lowerCamelCase_ ,streamer=lowerCamelCase_ ) # The greedy text should be printed to stdout, except for the final "\n" in the streamer UpperCAmelCase_ : List[str] = cs.out[:-1] self.assertEqual(lowerCamelCase_ ,lowerCamelCase_ ) def A__ ( self: str ) -> str: # Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested # with actual models -- the dummy models' tokenizers are not aligned with their models, and # `skip_special_tokens=True` has no effect on them UpperCAmelCase_ : Union[str, Any] = AutoTokenizer.from_pretrained("""distilgpt2""" ) UpperCAmelCase_ : Optional[Any] = AutoModelForCausalLM.from_pretrained("""distilgpt2""" ).to(lowerCamelCase_ ) UpperCAmelCase_ : Any = -1 UpperCAmelCase_ : Union[str, Any] = torch.ones((1, 5) ,device=lowerCamelCase_ ).long() * model.config.bos_token_id with CaptureStdout() as cs: UpperCAmelCase_ : Union[str, Any] = TextStreamer(lowerCamelCase_ ,skip_special_tokens=lowerCamelCase_ ) model.generate(lowerCamelCase_ ,max_new_tokens=1 ,do_sample=lowerCamelCase_ ,streamer=lowerCamelCase_ ) # The prompt contains a special token, so the streamer should not print it. As such, the output text, when # re-tokenized, must only contain one token UpperCAmelCase_ : List[str] = cs.out[:-1] # Remove the final "\n" UpperCAmelCase_ : Dict = tokenizer(lowerCamelCase_ ,return_tensors="""pt""" ) self.assertEqual(streamer_text_tokenized.input_ids.shape ,(1, 1) ) def A__ ( self: List[str] ) -> Any: UpperCAmelCase_ : List[Any] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ) UpperCAmelCase_ : Any = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(lowerCamelCase_ ) UpperCAmelCase_ : List[str] = -1 UpperCAmelCase_ : Optional[Any] = ids_tensor((1, 5) ,vocab_size=model.config.vocab_size ).to(lowerCamelCase_ ) UpperCAmelCase_ : Optional[int] = TextIteratorStreamer(lowerCamelCase_ ,timeout=0.0_0_1 ) UpperCAmelCase_ : Any = {"""input_ids""": input_ids, """max_new_tokens""": 10, """do_sample""": False, """streamer""": streamer} UpperCAmelCase_ : Dict = Thread(target=model.generate ,kwargs=lowerCamelCase_ ) thread.start() # The streamer will timeout after 0.001 seconds, so an exception will be raised with self.assertRaises(lowerCamelCase_ ): UpperCAmelCase_ : Union[str, Any] = """""" for new_text in streamer: streamer_text += new_text
345
1
import os from pathlib import Path import numpy as np import pytest from pack_dataset import pack_data_dir from parameterized import parameterized from save_len_file import save_len_file from torch.utils.data import DataLoader from transformers import AutoTokenizer from transformers.models.mbart.modeling_mbart import shift_tokens_right from transformers.testing_utils import TestCasePlus, slow from utils import FAIRSEQ_AVAILABLE, DistributedSortishSampler, LegacySeqaSeqDataset, SeqaSeqDataset UpperCamelCase_ = '''bert-base-cased''' UpperCamelCase_ = '''google/pegasus-xsum''' UpperCamelCase_ = [''' Sam ate lunch today.''', '''Sams lunch ingredients.'''] UpperCamelCase_ = ['''A very interesting story about what I ate for lunch.''', '''Avocado, celery, turkey, coffee'''] UpperCamelCase_ = '''patrickvonplaten/t5-tiny-random''' UpperCamelCase_ = '''sshleifer/bart-tiny-random''' UpperCamelCase_ = '''sshleifer/tiny-mbart''' UpperCamelCase_ = '''sshleifer/tiny-marian-en-de''' def lowerCamelCase_ ( _a : Path , _a : list ): '''simple docstring''' UpperCAmelCase_ : List[str] = """\n""".join(_a ) Path(_a ).open("""w""" ).writelines(_a ) def lowerCamelCase_ ( _a : List[str] ): '''simple docstring''' for split in ["train", "val", "test"]: _dump_articles(os.path.join(_a , F'''{split}.source''' ) , _a ) _dump_articles(os.path.join(_a , F'''{split}.target''' ) , _a ) return tmp_dir class _snake_case ( __snake_case ): '''simple docstring''' @parameterized.expand( [ MBART_TINY, MARIAN_TINY, T5_TINY, BART_TINY, PEGASUS_XSUM, ] ,) @slow def A__ ( self: List[Any] ,lowerCamelCase_: Tuple ) -> int: UpperCAmelCase_ : List[str] = AutoTokenizer.from_pretrained(lowerCamelCase_ ) UpperCAmelCase_ : Any = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) UpperCAmelCase_ : Optional[int] = max(len(tokenizer.encode(lowerCamelCase_ ) ) for a in ARTICLES ) UpperCAmelCase_ : List[str] = max(len(tokenizer.encode(lowerCamelCase_ ) ) for a in SUMMARIES ) UpperCAmelCase_ : str = 4 UpperCAmelCase_ : List[str] = 8 assert max_len_target > max_src_len # Will be truncated assert max_len_source > max_src_len # Will be truncated UpperCAmelCase_ , UpperCAmelCase_ : List[str] = """ro_RO""", """de_DE""" # ignored for all but mbart, but never causes error. UpperCAmelCase_ : List[str] = SeqaSeqDataset( lowerCamelCase_ ,data_dir=lowerCamelCase_ ,type_path="""train""" ,max_source_length=lowerCamelCase_ ,max_target_length=lowerCamelCase_ ,src_lang=lowerCamelCase_ ,tgt_lang=lowerCamelCase_ ,) UpperCAmelCase_ : List[Any] = DataLoader(lowerCamelCase_ ,batch_size=2 ,collate_fn=train_dataset.collate_fn ) for batch in dataloader: assert isinstance(lowerCamelCase_ ,lowerCamelCase_ ) assert batch["attention_mask"].shape == batch["input_ids"].shape # show that articles were trimmed. assert batch["input_ids"].shape[1] == max_src_len # show that targets are the same len assert batch["labels"].shape[1] == max_tgt_len if tok_name != MBART_TINY: continue # check language codes in correct place UpperCAmelCase_ : List[str] = shift_tokens_right(batch["""labels"""] ,tokenizer.pad_token_id ) assert batch["decoder_input_ids"][0, 0].item() == tokenizer.lang_code_to_id[tgt_lang] assert batch["decoder_input_ids"][0, -1].item() == tokenizer.eos_token_id assert batch["input_ids"][0, -2].item() == tokenizer.eos_token_id assert batch["input_ids"][0, -1].item() == tokenizer.lang_code_to_id[src_lang] break # No need to test every batch @parameterized.expand([BART_TINY, BERT_BASE_CASED] ) def A__ ( self: Optional[Any] ,lowerCamelCase_: Dict ) -> Dict: UpperCAmelCase_ : str = AutoTokenizer.from_pretrained(lowerCamelCase_ ) UpperCAmelCase_ : str = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) UpperCAmelCase_ : Dict = max(len(tokenizer.encode(lowerCamelCase_ ) ) for a in ARTICLES ) UpperCAmelCase_ : Optional[Any] = max(len(tokenizer.encode(lowerCamelCase_ ) ) for a in SUMMARIES ) UpperCAmelCase_ : int = 4 UpperCAmelCase_ : Optional[int] = LegacySeqaSeqDataset( lowerCamelCase_ ,data_dir=lowerCamelCase_ ,type_path="""train""" ,max_source_length=20 ,max_target_length=lowerCamelCase_ ,) UpperCAmelCase_ : Optional[Any] = DataLoader(lowerCamelCase_ ,batch_size=2 ,collate_fn=train_dataset.collate_fn ) for batch in dataloader: assert batch["attention_mask"].shape == batch["input_ids"].shape # show that articles were trimmed. assert batch["input_ids"].shape[1] == max_len_source assert 20 >= batch["input_ids"].shape[1] # trimmed significantly # show that targets were truncated assert batch["labels"].shape[1] == trunc_target # Truncated assert max_len_target > trunc_target # Truncated break # No need to test every batch def A__ ( self: List[Any] ) -> List[str]: UpperCAmelCase_ : List[Any] = AutoTokenizer.from_pretrained("""facebook/mbart-large-cc25""" ) UpperCAmelCase_ : Dict = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) ) UpperCAmelCase_ : List[str] = tmp_dir.joinpath("""train.source""" ).open().readlines() UpperCAmelCase_ : int = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) ) pack_data_dir(lowerCamelCase_ ,lowerCamelCase_ ,128 ,lowerCamelCase_ ) UpperCAmelCase_ : Union[str, Any] = {x.name for x in tmp_dir.iterdir()} UpperCAmelCase_ : Optional[Any] = {x.name for x in save_dir.iterdir()} UpperCAmelCase_ : Optional[Any] = save_dir.joinpath("""train.source""" ).open().readlines() # orig: [' Sam ate lunch today.\n', 'Sams lunch ingredients.'] # desired_packed: [' Sam ate lunch today.\n Sams lunch ingredients.'] assert len(lowerCamelCase_ ) < len(lowerCamelCase_ ) assert len(lowerCamelCase_ ) == 1 assert len(packed_examples[0] ) == sum(len(lowerCamelCase_ ) for x in orig_examples ) assert orig_paths == new_paths @pytest.mark.skipif(not FAIRSEQ_AVAILABLE ,reason="""This test requires fairseq""" ) def A__ ( self: Tuple ) -> Any: if not FAIRSEQ_AVAILABLE: return UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : int = self._get_dataset(max_len=64 ) UpperCAmelCase_ : Optional[Any] = 64 UpperCAmelCase_ : Tuple = ds.make_dynamic_sampler(lowerCamelCase_ ,required_batch_size_multiple=lowerCamelCase_ ) UpperCAmelCase_ : str = [len(lowerCamelCase_ ) for x in batch_sampler] assert len(set(lowerCamelCase_ ) ) > 1 # it's not dynamic batch size if every batch is the same length assert sum(lowerCamelCase_ ) == len(lowerCamelCase_ ) # no dropped or added examples UpperCAmelCase_ : str = DataLoader(lowerCamelCase_ ,batch_sampler=lowerCamelCase_ ,collate_fn=ds.collate_fn ,num_workers=2 ) UpperCAmelCase_ : Union[str, Any] = [] UpperCAmelCase_ : Dict = [] for batch in data_loader: UpperCAmelCase_ : Optional[Any] = batch["""input_ids"""].shape UpperCAmelCase_ : List[Any] = src_shape[0] assert bs % required_batch_size_multiple == 0 or bs < required_batch_size_multiple UpperCAmelCase_ : str = np.product(batch["""input_ids"""].shape ) num_src_per_batch.append(lowerCamelCase_ ) if num_src_tokens > (max_tokens * 1.1): failures.append(lowerCamelCase_ ) assert num_src_per_batch[0] == max(lowerCamelCase_ ) if failures: raise AssertionError(F'''too many tokens in {len(lowerCamelCase_ )} batches''' ) def A__ ( self: Union[str, Any] ) -> Union[str, Any]: UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[str] = self._get_dataset(max_len=512 ) UpperCAmelCase_ : List[Any] = 2 UpperCAmelCase_ : int = ds.make_sortish_sampler(lowerCamelCase_ ,shuffle=lowerCamelCase_ ) UpperCAmelCase_ : int = DataLoader(lowerCamelCase_ ,batch_size=lowerCamelCase_ ,collate_fn=ds.collate_fn ,num_workers=2 ) UpperCAmelCase_ : Optional[int] = DataLoader(lowerCamelCase_ ,batch_size=lowerCamelCase_ ,collate_fn=ds.collate_fn ,num_workers=2 ,sampler=lowerCamelCase_ ) UpperCAmelCase_ : List[str] = tokenizer.pad_token_id def count_pad_tokens(lowerCamelCase_: List[Any] ,lowerCamelCase_: str="input_ids" ): return [batch[k].eq(lowerCamelCase_ ).sum().item() for batch in data_loader] assert sum(count_pad_tokens(lowerCamelCase_ ,k="""labels""" ) ) < sum(count_pad_tokens(lowerCamelCase_ ,k="""labels""" ) ) assert sum(count_pad_tokens(lowerCamelCase_ ) ) < sum(count_pad_tokens(lowerCamelCase_ ) ) assert len(lowerCamelCase_ ) == len(lowerCamelCase_ ) def A__ ( self: Tuple ,lowerCamelCase_: int=1000 ,lowerCamelCase_: Optional[int]=128 ) -> Optional[Any]: if os.getenv("""USE_REAL_DATA""" ,lowerCamelCase_ ): UpperCAmelCase_ : str = """examples/seq2seq/wmt_en_ro""" UpperCAmelCase_ : Dict = max_len * 2 * 64 if not Path(lowerCamelCase_ ).joinpath("""train.len""" ).exists(): save_len_file(lowerCamelCase_ ,lowerCamelCase_ ) else: UpperCAmelCase_ : int = """examples/seq2seq/test_data/wmt_en_ro""" UpperCAmelCase_ : Dict = max_len * 4 save_len_file(lowerCamelCase_ ,lowerCamelCase_ ) UpperCAmelCase_ : List[str] = AutoTokenizer.from_pretrained(lowerCamelCase_ ) UpperCAmelCase_ : str = SeqaSeqDataset( lowerCamelCase_ ,data_dir=lowerCamelCase_ ,type_path="""train""" ,max_source_length=lowerCamelCase_ ,max_target_length=lowerCamelCase_ ,n_obs=lowerCamelCase_ ,) return ds, max_tokens, tokenizer def A__ ( self: List[str] ) -> str: UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Dict = self._get_dataset() UpperCAmelCase_ : Union[str, Any] = set(DistributedSortishSampler(lowerCamelCase_ ,256 ,num_replicas=2 ,rank=0 ,add_extra_examples=lowerCamelCase_ ) ) UpperCAmelCase_ : Dict = set(DistributedSortishSampler(lowerCamelCase_ ,256 ,num_replicas=2 ,rank=1 ,add_extra_examples=lowerCamelCase_ ) ) assert idsa.intersection(lowerCamelCase_ ) == set() @parameterized.expand( [ MBART_TINY, MARIAN_TINY, T5_TINY, BART_TINY, PEGASUS_XSUM, ] ,) def A__ ( self: List[str] ,lowerCamelCase_: List[str] ) -> Optional[int]: UpperCAmelCase_ : Optional[Any] = AutoTokenizer.from_pretrained(lowerCamelCase_ ,use_fast=lowerCamelCase_ ) if tok_name == MBART_TINY: UpperCAmelCase_ : Any = SeqaSeqDataset( lowerCamelCase_ ,data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) ,type_path="""train""" ,max_source_length=4 ,max_target_length=8 ,src_lang="""EN""" ,tgt_lang="""FR""" ,) UpperCAmelCase_ : Tuple = train_dataset.dataset_kwargs assert "src_lang" in kwargs and "tgt_lang" in kwargs else: UpperCAmelCase_ : Optional[int] = SeqaSeqDataset( lowerCamelCase_ ,data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) ,type_path="""train""" ,max_source_length=4 ,max_target_length=8 ,) UpperCAmelCase_ : Optional[int] = train_dataset.dataset_kwargs assert "add_prefix_space" not in kwargs if tok_name != BART_TINY else "add_prefix_space" in kwargs assert len(lowerCamelCase_ ) == 1 if tok_name == BART_TINY else len(lowerCamelCase_ ) == 0
345
import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel from diffusers import DDIMScheduler, LDMPipeline, UNetaDModel, VQModel from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device enable_full_determinism() class _snake_case ( unittest.TestCase ): '''simple docstring''' @property def A__ ( self: Optional[int] ) -> int: torch.manual_seed(0 ) UpperCAmelCase_ : Union[str, Any] = UNetaDModel( block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=3 ,out_channels=3 ,down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") ,up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") ,) return model @property def A__ ( self: Tuple ) -> Optional[Any]: torch.manual_seed(0 ) UpperCAmelCase_ : List[str] = VQModel( block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] ,up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] ,latent_channels=3 ,) return model @property def A__ ( self: Tuple ) -> Any: torch.manual_seed(0 ) UpperCAmelCase_ : int = CLIPTextConfig( bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1e-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1000 ,) return CLIPTextModel(lowerCamelCase_ ) def A__ ( self: str ) -> Optional[Any]: UpperCAmelCase_ : str = self.dummy_uncond_unet UpperCAmelCase_ : List[Any] = DDIMScheduler() UpperCAmelCase_ : List[Any] = self.dummy_vq_model UpperCAmelCase_ : Optional[int] = LDMPipeline(unet=lowerCamelCase_ ,vqvae=lowerCamelCase_ ,scheduler=lowerCamelCase_ ) ldm.to(lowerCamelCase_ ) ldm.set_progress_bar_config(disable=lowerCamelCase_ ) UpperCAmelCase_ : Any = torch.manual_seed(0 ) UpperCAmelCase_ : int = ldm(generator=lowerCamelCase_ ,num_inference_steps=2 ,output_type="""numpy""" ).images UpperCAmelCase_ : List[str] = torch.manual_seed(0 ) UpperCAmelCase_ : Union[str, Any] = ldm(generator=lowerCamelCase_ ,num_inference_steps=2 ,output_type="""numpy""" ,return_dict=lowerCamelCase_ )[0] UpperCAmelCase_ : Optional[Any] = image[0, -3:, -3:, -1] UpperCAmelCase_ : Tuple = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) UpperCAmelCase_ : str = np.array([0.8_5_1_2, 0.8_1_8, 0.6_4_1_1, 0.6_8_0_8, 0.4_4_6_5, 0.5_6_1_8, 0.4_6, 0.6_2_3_1, 0.5_1_7_2] ) UpperCAmelCase_ : Tuple = 1e-2 if torch_device != """mps""" else 3e-2 assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < tolerance @slow @require_torch class _snake_case ( unittest.TestCase ): '''simple docstring''' def A__ ( self: Optional[int] ) -> Optional[Any]: UpperCAmelCase_ : List[str] = LDMPipeline.from_pretrained("""CompVis/ldm-celebahq-256""" ) ldm.to(lowerCamelCase_ ) ldm.set_progress_bar_config(disable=lowerCamelCase_ ) UpperCAmelCase_ : Optional[Any] = torch.manual_seed(0 ) UpperCAmelCase_ : Optional[int] = ldm(generator=lowerCamelCase_ ,num_inference_steps=5 ,output_type="""numpy""" ).images UpperCAmelCase_ : Optional[Any] = image[0, -3:, -3:, -1] assert image.shape == (1, 256, 256, 3) UpperCAmelCase_ : int = np.array([0.4_3_9_9, 0.4_4_9_7_5, 0.4_6_8_2_5, 0.4_7_4, 0.4_3_5_9, 0.4_5_8_1, 0.4_5_0_9_5, 0.4_3_4_1, 0.4_4_4_7] ) UpperCAmelCase_ : Union[str, Any] = 1e-2 if torch_device != """mps""" else 3e-2 assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
345
1
from typing import Optional from torch import nn from .transformer_ad import TransformeraDModel, TransformeraDModelOutput class _snake_case ( nn.Module ): '''simple docstring''' def __init__( self: Dict ,lowerCamelCase_: int = 16 ,lowerCamelCase_: int = 88 ,lowerCamelCase_: Optional[int] = None ,lowerCamelCase_: int = 1 ,lowerCamelCase_: float = 0.0 ,lowerCamelCase_: int = 32 ,lowerCamelCase_: Optional[int] = None ,lowerCamelCase_: bool = False ,lowerCamelCase_: Optional[int] = None ,lowerCamelCase_: Optional[int] = None ,lowerCamelCase_: str = "geglu" ,lowerCamelCase_: Optional[int] = None ,) -> Any: super().__init__() UpperCAmelCase_ : str = nn.ModuleList( [ TransformeraDModel( num_attention_heads=lowerCamelCase_ ,attention_head_dim=lowerCamelCase_ ,in_channels=lowerCamelCase_ ,num_layers=lowerCamelCase_ ,dropout=lowerCamelCase_ ,norm_num_groups=lowerCamelCase_ ,cross_attention_dim=lowerCamelCase_ ,attention_bias=lowerCamelCase_ ,sample_size=lowerCamelCase_ ,num_vector_embeds=lowerCamelCase_ ,activation_fn=lowerCamelCase_ ,num_embeds_ada_norm=lowerCamelCase_ ,) for _ in range(2 ) ] ) # Variables that can be set by a pipeline: # The ratio of transformer1 to transformer2's output states to be combined during inference UpperCAmelCase_ : Any = 0.5 # The shape of `encoder_hidden_states` is expected to be # `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)` UpperCAmelCase_ : Optional[Any] = [77, 257] # Which transformer to use to encode which condition. # E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])` UpperCAmelCase_ : int = [1, 0] def A__ ( self: int ,lowerCamelCase_: Any ,lowerCamelCase_: int ,lowerCamelCase_: Tuple=None ,lowerCamelCase_: List[str]=None ,lowerCamelCase_: Dict=None ,lowerCamelCase_: bool = True ,) -> Union[str, Any]: UpperCAmelCase_ : Any = hidden_states UpperCAmelCase_ : Tuple = [] UpperCAmelCase_ : List[str] = 0 # attention_mask is not used yet for i in range(2 ): # for each of the two transformers, pass the corresponding condition tokens UpperCAmelCase_ : Tuple = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]] UpperCAmelCase_ : int = self.transformer_index_for_condition[i] UpperCAmelCase_ : List[Any] = self.transformers[transformer_index]( lowerCamelCase_ ,encoder_hidden_states=lowerCamelCase_ ,timestep=lowerCamelCase_ ,cross_attention_kwargs=lowerCamelCase_ ,return_dict=lowerCamelCase_ ,)[0] encoded_states.append(encoded_state - input_states ) tokens_start += self.condition_lengths[i] UpperCAmelCase_ : Dict = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio) UpperCAmelCase_ : Tuple = output_states + input_states if not return_dict: return (output_states,) return TransformeraDModelOutput(sample=lowerCamelCase_ )
345
def lowerCamelCase_ ( _a : List[str] ): '''simple docstring''' UpperCAmelCase_ : Tuple = [0] * len(_a ) UpperCAmelCase_ : Dict = [] UpperCAmelCase_ : Optional[int] = [] UpperCAmelCase_ : Dict = 0 for values in graph.values(): for i in values: indegree[i] += 1 for i in range(len(_a ) ): if indegree[i] == 0: queue.append(_a ) while queue: UpperCAmelCase_ : List[str] = queue.pop(0 ) cnt += 1 topo.append(_a ) for x in graph[vertex]: indegree[x] -= 1 if indegree[x] == 0: queue.append(_a ) if cnt != len(_a ): print("""Cycle exists""" ) else: print(_a ) # Adjacency List of Graph UpperCamelCase_ = {0: [1, 2], 1: [3], 2: [3], 3: [4, 5], 4: [], 5: []} topological_sort(graph)
345
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) UpperCamelCase_ = { '''configuration_resnet''': ['''RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ResNetConfig''', '''ResNetOnnxConfig'''] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase_ = [ '''RESNET_PRETRAINED_MODEL_ARCHIVE_LIST''', '''ResNetForImageClassification''', '''ResNetModel''', '''ResNetPreTrainedModel''', '''ResNetBackbone''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase_ = [ '''TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFResNetForImageClassification''', '''TFResNetModel''', '''TFResNetPreTrainedModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase_ = [ '''FlaxResNetForImageClassification''', '''FlaxResNetModel''', '''FlaxResNetPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_resnet import RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ResNetConfig, ResNetOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_resnet import ( RESNET_PRETRAINED_MODEL_ARCHIVE_LIST, ResNetBackbone, ResNetForImageClassification, ResNetModel, ResNetPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_resnet import ( TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFResNetForImageClassification, TFResNetModel, TFResNetPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_resnet import FlaxResNetForImageClassification, FlaxResNetModel, FlaxResNetPreTrainedModel else: import sys UpperCamelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
345
from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCamelCase_ = logging.get_logger(__name__) UpperCamelCase_ = { '''microsoft/swinv2-tiny-patch4-window8-256''': ( '''https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json''' ), } class _snake_case ( __snake_case ): '''simple docstring''' A__ : Optional[Any] = "swinv2" A__ : int = { "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers", } def __init__( self: List[str] ,lowerCamelCase_: List[str]=224 ,lowerCamelCase_: List[str]=4 ,lowerCamelCase_: List[Any]=3 ,lowerCamelCase_: Optional[Any]=96 ,lowerCamelCase_: Any=[2, 2, 6, 2] ,lowerCamelCase_: Dict=[3, 6, 12, 24] ,lowerCamelCase_: str=7 ,lowerCamelCase_: Optional[Any]=4.0 ,lowerCamelCase_: Tuple=True ,lowerCamelCase_: List[str]=0.0 ,lowerCamelCase_: Optional[int]=0.0 ,lowerCamelCase_: List[str]=0.1 ,lowerCamelCase_: str="gelu" ,lowerCamelCase_: str=False ,lowerCamelCase_: Dict=0.0_2 ,lowerCamelCase_: Union[str, Any]=1e-5 ,lowerCamelCase_: str=32 ,**lowerCamelCase_: List[str] ,) -> Tuple: super().__init__(**lowerCamelCase_ ) UpperCAmelCase_ : Tuple = image_size UpperCAmelCase_ : Tuple = patch_size UpperCAmelCase_ : Dict = num_channels UpperCAmelCase_ : List[Any] = embed_dim UpperCAmelCase_ : Dict = depths UpperCAmelCase_ : Dict = len(lowerCamelCase_ ) UpperCAmelCase_ : str = num_heads UpperCAmelCase_ : Tuple = window_size UpperCAmelCase_ : int = mlp_ratio UpperCAmelCase_ : str = qkv_bias UpperCAmelCase_ : Any = hidden_dropout_prob UpperCAmelCase_ : Tuple = attention_probs_dropout_prob UpperCAmelCase_ : int = drop_path_rate UpperCAmelCase_ : Optional[Any] = hidden_act UpperCAmelCase_ : List[str] = use_absolute_embeddings UpperCAmelCase_ : Dict = layer_norm_eps UpperCAmelCase_ : int = initializer_range UpperCAmelCase_ : Union[str, Any] = encoder_stride # we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model UpperCAmelCase_ : List[str] = int(embed_dim * 2 ** (len(lowerCamelCase_ ) - 1) ) UpperCAmelCase_ : Any = (0, 0, 0, 0)
345
1
import json import os import tempfile import unittest import numpy as np from datasets import load_dataset from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import ImageGPTImageProcessor class _snake_case ( unittest.TestCase ): '''simple docstring''' def __init__( self: Union[str, Any] ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: List[Any]=7 ,lowerCamelCase_: Optional[int]=3 ,lowerCamelCase_: Optional[int]=18 ,lowerCamelCase_: Tuple=30 ,lowerCamelCase_: Any=400 ,lowerCamelCase_: List[str]=True ,lowerCamelCase_: List[Any]=None ,lowerCamelCase_: int=True ,) -> Union[str, Any]: UpperCAmelCase_ : Any = size if size is not None else {"""height""": 18, """width""": 18} UpperCAmelCase_ : Tuple = parent UpperCAmelCase_ : List[str] = batch_size UpperCAmelCase_ : Optional[Any] = num_channels UpperCAmelCase_ : Union[str, Any] = image_size UpperCAmelCase_ : Optional[Any] = min_resolution UpperCAmelCase_ : str = max_resolution UpperCAmelCase_ : List[str] = do_resize UpperCAmelCase_ : Any = size UpperCAmelCase_ : Dict = do_normalize def A__ ( self: str ) -> Any: return { # here we create 2 clusters for the sake of simplicity "clusters": np.asarray( [ [0.8_8_6_6_4_4_3_6_3_4_0_3_3_2_0_3, 0.6_6_1_8_8_2_9_3_6_9_5_4_4_9_8_3, 0.3_8_9_1_7_4_6_4_0_1_7_8_6_8_0_4], [-0.6_0_4_2_5_5_9_1_4_6_8_8_1_1_0_4, -0.0_2_2_9_5_0_0_8_8_6_0_5_2_8_4_6_9, 0.5_4_2_3_7_9_7_3_6_9_0_0_3_2_9_6], ] ), "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, } @require_torch @require_vision class _snake_case ( __snake_case , unittest.TestCase ): '''simple docstring''' A__ : Dict = ImageGPTImageProcessor if is_vision_available() else None def A__ ( self: Optional[int] ) -> str: UpperCAmelCase_ : Optional[int] = ImageGPTImageProcessingTester(self ) @property def A__ ( self: str ) -> List[Any]: return self.image_processor_tester.prepare_image_processor_dict() def A__ ( self: Optional[Any] ) -> Dict: UpperCAmelCase_ : str = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(lowerCamelCase_ ,"""clusters""" ) ) self.assertTrue(hasattr(lowerCamelCase_ ,"""do_resize""" ) ) self.assertTrue(hasattr(lowerCamelCase_ ,"""size""" ) ) self.assertTrue(hasattr(lowerCamelCase_ ,"""do_normalize""" ) ) def A__ ( self: Dict ) -> Any: UpperCAmelCase_ : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size ,{"""height""": 18, """width""": 18} ) UpperCAmelCase_ : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict ,size=42 ) self.assertEqual(image_processor.size ,{"""height""": 42, """width""": 42} ) def A__ ( self: Optional[Any] ) -> List[Any]: UpperCAmelCase_ : str = self.image_processing_class(**self.image_processor_dict ) UpperCAmelCase_ : Any = json.loads(image_processor.to_json_string() ) for key, value in self.image_processor_dict.items(): if key == "clusters": self.assertTrue(np.array_equal(lowerCamelCase_ ,obj[key] ) ) else: self.assertEqual(obj[key] ,lowerCamelCase_ ) def A__ ( self: Dict ) -> Tuple: UpperCAmelCase_ : Optional[int] = self.image_processing_class(**self.image_processor_dict ) with tempfile.TemporaryDirectory() as tmpdirname: UpperCAmelCase_ : Optional[Any] = os.path.join(lowerCamelCase_ ,"""image_processor.json""" ) image_processor_first.to_json_file(lowerCamelCase_ ) UpperCAmelCase_ : Any = self.image_processing_class.from_json_file(lowerCamelCase_ ).to_dict() UpperCAmelCase_ : Union[str, Any] = image_processor_first.to_dict() for key, value in image_processor_first.items(): if key == "clusters": self.assertTrue(np.array_equal(lowerCamelCase_ ,image_processor_second[key] ) ) else: self.assertEqual(image_processor_first[key] ,lowerCamelCase_ ) def A__ ( self: Optional[int] ) -> List[Any]: UpperCAmelCase_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict ) with tempfile.TemporaryDirectory() as tmpdirname: image_processor_first.save_pretrained(lowerCamelCase_ ) UpperCAmelCase_ : int = self.image_processing_class.from_pretrained(lowerCamelCase_ ).to_dict() UpperCAmelCase_ : int = image_processor_first.to_dict() for key, value in image_processor_first.items(): if key == "clusters": self.assertTrue(np.array_equal(lowerCamelCase_ ,image_processor_second[key] ) ) else: self.assertEqual(image_processor_first[key] ,lowerCamelCase_ ) @unittest.skip("""ImageGPT requires clusters at initialization""" ) def A__ ( self: Dict ) -> Optional[int]: pass def lowerCamelCase_ ( ): '''simple docstring''' UpperCAmelCase_ : Tuple = load_dataset("""hf-internal-testing/fixtures_image_utils""" , split="""test""" ) UpperCAmelCase_ : Union[str, Any] = Image.open(dataset[4]["""file"""] ) UpperCAmelCase_ : Dict = Image.open(dataset[5]["""file"""] ) UpperCAmelCase_ : Optional[int] = [imagea, imagea] return images @require_vision @require_torch class _snake_case ( unittest.TestCase ): '''simple docstring''' @slow def A__ ( self: Optional[Any] ) -> Any: UpperCAmelCase_ : int = ImageGPTImageProcessor.from_pretrained("""openai/imagegpt-small""" ) UpperCAmelCase_ : Optional[int] = prepare_images() # test non-batched UpperCAmelCase_ : Union[str, Any] = image_processing(images[0] ,return_tensors="""pt""" ) self.assertIsInstance(encoding.input_ids ,torch.LongTensor ) self.assertEqual(encoding.input_ids.shape ,(1, 1024) ) UpperCAmelCase_ : Optional[Any] = [306, 191, 191] self.assertEqual(encoding.input_ids[0, :3].tolist() ,lowerCamelCase_ ) # test batched UpperCAmelCase_ : Any = image_processing(lowerCamelCase_ ,return_tensors="""pt""" ) self.assertIsInstance(encoding.input_ids ,torch.LongTensor ) self.assertEqual(encoding.input_ids.shape ,(2, 1024) ) UpperCAmelCase_ : List[str] = [303, 13, 13] self.assertEqual(encoding.input_ids[1, -3:].tolist() ,lowerCamelCase_ )
345
import os import sys import tempfile import unittest import unittest.mock as mock from pathlib import Path from huggingface_hub import HfFolder, delete_repo from huggingface_hub.file_download import http_get from requests.exceptions import HTTPError from transformers import ( AlbertTokenizer, AutoTokenizer, BertTokenizer, BertTokenizerFast, GPTaTokenizerFast, is_tokenizers_available, ) from transformers.testing_utils import TOKEN, USER, is_staging_test, require_tokenizers from transformers.tokenization_utils import Trie sys.path.append(str(Path(__file__).parent.parent / '''utils''')) from test_module.custom_tokenization import CustomTokenizer # noqa E402 if is_tokenizers_available(): from test_module.custom_tokenization_fast import CustomTokenizerFast class _snake_case ( unittest.TestCase ): '''simple docstring''' def A__ ( self: int ) -> str: # A mock response for an HTTP head request to emulate server down UpperCAmelCase_ : List[str] = mock.Mock() UpperCAmelCase_ : List[Any] = 500 UpperCAmelCase_ : Union[str, Any] = {} UpperCAmelCase_ : Union[str, Any] = HTTPError UpperCAmelCase_ : Any = {} # Download this model to make sure it's in the cache. UpperCAmelCase_ : Union[str, Any] = BertTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bert""" ) # Under the mock environment we get a 500 error when trying to reach the tokenizer. with mock.patch("""requests.Session.request""" ,return_value=lowerCamelCase_ ) as mock_head: UpperCAmelCase_ : Any = BertTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bert""" ) # This check we did call the fake head request mock_head.assert_called() @require_tokenizers def A__ ( self: str ) -> int: # A mock response for an HTTP head request to emulate server down UpperCAmelCase_ : str = mock.Mock() UpperCAmelCase_ : Optional[int] = 500 UpperCAmelCase_ : int = {} UpperCAmelCase_ : Union[str, Any] = HTTPError UpperCAmelCase_ : List[Any] = {} # Download this model to make sure it's in the cache. UpperCAmelCase_ : Optional[int] = GPTaTokenizerFast.from_pretrained("""gpt2""" ) # Under the mock environment we get a 500 error when trying to reach the tokenizer. with mock.patch("""requests.Session.request""" ,return_value=lowerCamelCase_ ) as mock_head: UpperCAmelCase_ : Any = GPTaTokenizerFast.from_pretrained("""gpt2""" ) # This check we did call the fake head request mock_head.assert_called() def A__ ( self: str ) -> Dict: # This test is for deprecated behavior and can be removed in v5 try: UpperCAmelCase_ : Any = tempfile.mktemp() with open(lowerCamelCase_ ,"""wb""" ) as f: http_get("""https://huggingface.co/albert-base-v1/resolve/main/spiece.model""" ,lowerCamelCase_ ) UpperCAmelCase_ : Tuple = AlbertTokenizer.from_pretrained(lowerCamelCase_ ) finally: os.remove(lowerCamelCase_ ) # Supporting this legacy load introduced a weird bug where the tokenizer would load local files if they are in # the current folder and have the right name. if os.path.isfile("""tokenizer.json""" ): # We skip the test if the user has a `tokenizer.json` in this folder to avoid deleting it. return try: with open("""tokenizer.json""" ,"""wb""" ) as f: http_get("""https://huggingface.co/hf-internal-testing/tiny-random-bert/blob/main/tokenizer.json""" ,lowerCamelCase_ ) UpperCAmelCase_ : str = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ) # The tiny random BERT has a vocab size of 1024, tiny gpt2 as a vocab size of 1000 self.assertEqual(tokenizer.vocab_size ,1000 ) # Tokenizer should depend on the remote checkpoint, not the local tokenizer.json file. finally: os.remove("""tokenizer.json""" ) def A__ ( self: List[str] ) -> Tuple: # This test is for deprecated behavior and can be removed in v5 UpperCAmelCase_ : str = AlbertTokenizer.from_pretrained("""https://huggingface.co/albert-base-v1/resolve/main/spiece.model""" ) @is_staging_test class _snake_case ( unittest.TestCase ): '''simple docstring''' A__ : str = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"] @classmethod def A__ ( cls: Dict ) -> Optional[int]: UpperCAmelCase_ : List[str] = TOKEN HfFolder.save_token(lowerCamelCase_ ) @classmethod def A__ ( cls: Optional[Any] ) -> List[str]: try: delete_repo(token=cls._token ,repo_id="""test-tokenizer""" ) except HTTPError: pass try: delete_repo(token=cls._token ,repo_id="""valid_org/test-tokenizer-org""" ) except HTTPError: pass try: delete_repo(token=cls._token ,repo_id="""test-dynamic-tokenizer""" ) except HTTPError: pass def A__ ( self: Any ) -> Optional[int]: with tempfile.TemporaryDirectory() as tmp_dir: UpperCAmelCase_ : Tuple = os.path.join(lowerCamelCase_ ,"""vocab.txt""" ) with open(lowerCamelCase_ ,"""w""" ,encoding="""utf-8""" ) as vocab_writer: vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) ) UpperCAmelCase_ : List[Any] = BertTokenizer(lowerCamelCase_ ) tokenizer.push_to_hub("""test-tokenizer""" ,use_auth_token=self._token ) UpperCAmelCase_ : List[Any] = BertTokenizer.from_pretrained(F'''{USER}/test-tokenizer''' ) self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab ) # Reset repo delete_repo(token=self._token ,repo_id="""test-tokenizer""" ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(lowerCamelCase_ ,repo_id="""test-tokenizer""" ,push_to_hub=lowerCamelCase_ ,use_auth_token=self._token ) UpperCAmelCase_ : List[Any] = BertTokenizer.from_pretrained(F'''{USER}/test-tokenizer''' ) self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab ) def A__ ( self: Optional[int] ) -> Any: with tempfile.TemporaryDirectory() as tmp_dir: UpperCAmelCase_ : List[Any] = os.path.join(lowerCamelCase_ ,"""vocab.txt""" ) with open(lowerCamelCase_ ,"""w""" ,encoding="""utf-8""" ) as vocab_writer: vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) ) UpperCAmelCase_ : Dict = BertTokenizer(lowerCamelCase_ ) tokenizer.push_to_hub("""valid_org/test-tokenizer-org""" ,use_auth_token=self._token ) UpperCAmelCase_ : Dict = BertTokenizer.from_pretrained("""valid_org/test-tokenizer-org""" ) self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab ) # Reset repo delete_repo(token=self._token ,repo_id="""valid_org/test-tokenizer-org""" ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained( lowerCamelCase_ ,repo_id="""valid_org/test-tokenizer-org""" ,push_to_hub=lowerCamelCase_ ,use_auth_token=self._token ) UpperCAmelCase_ : List[Any] = BertTokenizer.from_pretrained("""valid_org/test-tokenizer-org""" ) self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab ) @require_tokenizers def A__ ( self: Optional[int] ) -> Optional[Any]: CustomTokenizer.register_for_auto_class() with tempfile.TemporaryDirectory() as tmp_dir: UpperCAmelCase_ : Any = os.path.join(lowerCamelCase_ ,"""vocab.txt""" ) with open(lowerCamelCase_ ,"""w""" ,encoding="""utf-8""" ) as vocab_writer: vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) ) UpperCAmelCase_ : Optional[Any] = CustomTokenizer(lowerCamelCase_ ) # No fast custom tokenizer tokenizer.push_to_hub("""test-dynamic-tokenizer""" ,use_auth_token=self._token ) UpperCAmelCase_ : Optional[Any] = AutoTokenizer.from_pretrained(F'''{USER}/test-dynamic-tokenizer''' ,trust_remote_code=lowerCamelCase_ ) # Can't make an isinstance check because the new_model.config is from the CustomTokenizer class of a dynamic module self.assertEqual(tokenizer.__class__.__name__ ,"""CustomTokenizer""" ) # Fast and slow custom tokenizer CustomTokenizerFast.register_for_auto_class() with tempfile.TemporaryDirectory() as tmp_dir: UpperCAmelCase_ : List[str] = os.path.join(lowerCamelCase_ ,"""vocab.txt""" ) with open(lowerCamelCase_ ,"""w""" ,encoding="""utf-8""" ) as vocab_writer: vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) ) UpperCAmelCase_ : str = BertTokenizerFast.from_pretrained(lowerCamelCase_ ) bert_tokenizer.save_pretrained(lowerCamelCase_ ) UpperCAmelCase_ : List[str] = CustomTokenizerFast.from_pretrained(lowerCamelCase_ ) tokenizer.push_to_hub("""test-dynamic-tokenizer""" ,use_auth_token=self._token ) UpperCAmelCase_ : List[str] = AutoTokenizer.from_pretrained(F'''{USER}/test-dynamic-tokenizer''' ,trust_remote_code=lowerCamelCase_ ) # Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module self.assertEqual(tokenizer.__class__.__name__ ,"""CustomTokenizerFast""" ) UpperCAmelCase_ : List[str] = AutoTokenizer.from_pretrained( F'''{USER}/test-dynamic-tokenizer''' ,use_fast=lowerCamelCase_ ,trust_remote_code=lowerCamelCase_ ) # Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module self.assertEqual(tokenizer.__class__.__name__ ,"""CustomTokenizer""" ) class _snake_case ( unittest.TestCase ): '''simple docstring''' def A__ ( self: Optional[Any] ) -> Any: UpperCAmelCase_ : Any = Trie() trie.add("""Hello 友達""" ) self.assertEqual(trie.data ,{"""H""": {"""e""": {"""l""": {"""l""": {"""o""": {""" """: {"""友""": {"""達""": {"""""": 1}}}}}}}}} ) trie.add("""Hello""" ) trie.data self.assertEqual(trie.data ,{"""H""": {"""e""": {"""l""": {"""l""": {"""o""": {"""""": 1, """ """: {"""友""": {"""達""": {"""""": 1}}}}}}}}} ) def A__ ( self: Tuple ) -> Optional[int]: UpperCAmelCase_ : str = Trie() self.assertEqual(trie.split("""[CLS] This is a extra_id_100""" ) ,["""[CLS] This is a extra_id_100"""] ) trie.add("""[CLS]""" ) trie.add("""extra_id_1""" ) trie.add("""extra_id_100""" ) self.assertEqual(trie.split("""[CLS] This is a extra_id_100""" ) ,["""[CLS]""", """ This is a """, """extra_id_100"""] ) def A__ ( self: Optional[Any] ) -> Optional[int]: UpperCAmelCase_ : Dict = Trie() trie.add("""A""" ) self.assertEqual(trie.split("""ABC""" ) ,["""A""", """BC"""] ) self.assertEqual(trie.split("""BCA""" ) ,["""BC""", """A"""] ) def A__ ( self: Union[str, Any] ) -> int: UpperCAmelCase_ : List[str] = Trie() trie.add("""TOKEN]""" ) trie.add("""[SPECIAL_TOKEN]""" ) self.assertEqual(trie.split("""This is something [SPECIAL_TOKEN]""" ) ,["""This is something """, """[SPECIAL_TOKEN]"""] ) def A__ ( self: int ) -> Union[str, Any]: UpperCAmelCase_ : List[str] = Trie() trie.add("""A""" ) trie.add("""P""" ) trie.add("""[SPECIAL_TOKEN]""" ) self.assertEqual(trie.split("""This is something [SPECIAL_TOKEN]""" ) ,["""This is something """, """[SPECIAL_TOKEN]"""] ) def A__ ( self: int ) -> List[str]: UpperCAmelCase_ : int = Trie() trie.add("""AB""" ) trie.add("""B""" ) trie.add("""C""" ) self.assertEqual(trie.split("""ABC""" ) ,["""AB""", """C"""] ) def A__ ( self: str ) -> Optional[int]: UpperCAmelCase_ : Optional[Any] = Trie() trie.add("""ABC""" ) trie.add("""B""" ) trie.add("""CD""" ) self.assertEqual(trie.split("""ABCD""" ) ,["""ABC""", """D"""] ) def A__ ( self: List[Any] ) -> Any: # Even if the offsets are wrong, we necessarily output correct string # parts. UpperCAmelCase_ : Tuple = Trie() UpperCAmelCase_ : Optional[Any] = trie.cut_text("""ABC""" ,[0, 0, 2, 1, 2, 3] ) self.assertEqual(lowerCamelCase_ ,["""AB""", """C"""] )
345
1
def lowerCamelCase_ ( _a : int ): '''simple docstring''' if divisor % 5 == 0 or divisor % 2 == 0: return 0 UpperCAmelCase_ : Optional[Any] = 1 UpperCAmelCase_ : int = 1 while repunit: UpperCAmelCase_ : Union[str, Any] = (10 * repunit + 1) % divisor repunit_index += 1 return repunit_index def lowerCamelCase_ ( _a : int = 100_0000 ): '''simple docstring''' UpperCAmelCase_ : int = limit - 1 if divisor % 2 == 0: divisor += 1 while least_divisible_repunit(_a ) <= limit: divisor += 2 return divisor if __name__ == "__main__": print(F"{solution() = }")
345
from ..utils import DummyObject, requires_backends class _snake_case ( metaclass=__snake_case ): '''simple docstring''' A__ : Tuple = ["flax"] def __init__( self: str ,*lowerCamelCase_: int ,**lowerCamelCase_: List[str] ) -> str: requires_backends(self ,["""flax"""] ) @classmethod def A__ ( cls: Optional[Any] ,*lowerCamelCase_: Dict ,**lowerCamelCase_: List[str] ) -> Any: requires_backends(cls ,["""flax"""] ) @classmethod def A__ ( cls: Optional[int] ,*lowerCamelCase_: Optional[int] ,**lowerCamelCase_: int ) -> Optional[int]: requires_backends(cls ,["""flax"""] ) class _snake_case ( metaclass=__snake_case ): '''simple docstring''' A__ : Any = ["flax"] def __init__( self: int ,*lowerCamelCase_: List[Any] ,**lowerCamelCase_: Tuple ) -> Union[str, Any]: requires_backends(self ,["""flax"""] ) @classmethod def A__ ( cls: Optional[int] ,*lowerCamelCase_: Optional[int] ,**lowerCamelCase_: List[str] ) -> Union[str, Any]: requires_backends(cls ,["""flax"""] ) @classmethod def A__ ( cls: Tuple ,*lowerCamelCase_: Tuple ,**lowerCamelCase_: Any ) -> int: requires_backends(cls ,["""flax"""] ) class _snake_case ( metaclass=__snake_case ): '''simple docstring''' A__ : Dict = ["flax"] def __init__( self: Dict ,*lowerCamelCase_: Optional[int] ,**lowerCamelCase_: List[Any] ) -> Any: requires_backends(self ,["""flax"""] ) @classmethod def A__ ( cls: Tuple ,*lowerCamelCase_: Optional[Any] ,**lowerCamelCase_: List[Any] ) -> str: requires_backends(cls ,["""flax"""] ) @classmethod def A__ ( cls: int ,*lowerCamelCase_: Optional[Any] ,**lowerCamelCase_: Optional[Any] ) -> int: requires_backends(cls ,["""flax"""] ) class _snake_case ( metaclass=__snake_case ): '''simple docstring''' A__ : List[str] = ["flax"] def __init__( self: str ,*lowerCamelCase_: List[str] ,**lowerCamelCase_: Optional[int] ) -> Union[str, Any]: requires_backends(self ,["""flax"""] ) @classmethod def A__ ( cls: Union[str, Any] ,*lowerCamelCase_: Any ,**lowerCamelCase_: Any ) -> Any: requires_backends(cls ,["""flax"""] ) @classmethod def A__ ( cls: Dict ,*lowerCamelCase_: int ,**lowerCamelCase_: Optional[Any] ) -> int: requires_backends(cls ,["""flax"""] ) class _snake_case ( metaclass=__snake_case ): '''simple docstring''' A__ : int = ["flax"] def __init__( self: Dict ,*lowerCamelCase_: Tuple ,**lowerCamelCase_: List[str] ) -> Optional[Any]: requires_backends(self ,["""flax"""] ) @classmethod def A__ ( cls: Optional[Any] ,*lowerCamelCase_: List[Any] ,**lowerCamelCase_: str ) -> Any: requires_backends(cls ,["""flax"""] ) @classmethod def A__ ( cls: Union[str, Any] ,*lowerCamelCase_: Dict ,**lowerCamelCase_: Optional[Any] ) -> str: requires_backends(cls ,["""flax"""] ) class _snake_case ( metaclass=__snake_case ): '''simple docstring''' A__ : Optional[int] = ["flax"] def __init__( self: str ,*lowerCamelCase_: Dict ,**lowerCamelCase_: Optional[int] ) -> Tuple: requires_backends(self ,["""flax"""] ) @classmethod def A__ ( cls: int ,*lowerCamelCase_: int ,**lowerCamelCase_: Tuple ) -> List[str]: requires_backends(cls ,["""flax"""] ) @classmethod def A__ ( cls: str ,*lowerCamelCase_: Union[str, Any] ,**lowerCamelCase_: Optional[Any] ) -> Any: requires_backends(cls ,["""flax"""] ) class _snake_case ( metaclass=__snake_case ): '''simple docstring''' A__ : List[Any] = ["flax"] def __init__( self: Union[str, Any] ,*lowerCamelCase_: Tuple ,**lowerCamelCase_: int ) -> List[Any]: requires_backends(self ,["""flax"""] ) @classmethod def A__ ( cls: Tuple ,*lowerCamelCase_: List[Any] ,**lowerCamelCase_: Dict ) -> Dict: requires_backends(cls ,["""flax"""] ) @classmethod def A__ ( cls: Dict ,*lowerCamelCase_: List[Any] ,**lowerCamelCase_: str ) -> Any: requires_backends(cls ,["""flax"""] ) class _snake_case ( metaclass=__snake_case ): '''simple docstring''' A__ : Tuple = ["flax"] def __init__( self: str ,*lowerCamelCase_: Any ,**lowerCamelCase_: int ) -> Tuple: requires_backends(self ,["""flax"""] ) @classmethod def A__ ( cls: Dict ,*lowerCamelCase_: Optional[int] ,**lowerCamelCase_: Union[str, Any] ) -> List[str]: requires_backends(cls ,["""flax"""] ) @classmethod def A__ ( cls: str ,*lowerCamelCase_: Union[str, Any] ,**lowerCamelCase_: Dict ) -> Optional[int]: requires_backends(cls ,["""flax"""] ) class _snake_case ( metaclass=__snake_case ): '''simple docstring''' A__ : str = ["flax"] def __init__( self: Optional[Any] ,*lowerCamelCase_: str ,**lowerCamelCase_: List[str] ) -> Optional[Any]: requires_backends(self ,["""flax"""] ) @classmethod def A__ ( cls: List[str] ,*lowerCamelCase_: Dict ,**lowerCamelCase_: int ) -> List[str]: requires_backends(cls ,["""flax"""] ) @classmethod def A__ ( cls: str ,*lowerCamelCase_: Optional[Any] ,**lowerCamelCase_: int ) -> Union[str, Any]: requires_backends(cls ,["""flax"""] ) class _snake_case ( metaclass=__snake_case ): '''simple docstring''' A__ : Union[str, Any] = ["flax"] def __init__( self: Any ,*lowerCamelCase_: Tuple ,**lowerCamelCase_: Optional[int] ) -> List[str]: requires_backends(self ,["""flax"""] ) @classmethod def A__ ( cls: Optional[int] ,*lowerCamelCase_: List[Any] ,**lowerCamelCase_: str ) -> Union[str, Any]: requires_backends(cls ,["""flax"""] ) @classmethod def A__ ( cls: List[Any] ,*lowerCamelCase_: Any ,**lowerCamelCase_: Any ) -> int: requires_backends(cls ,["""flax"""] ) class _snake_case ( metaclass=__snake_case ): '''simple docstring''' A__ : Tuple = ["flax"] def __init__( self: Any ,*lowerCamelCase_: Optional[Any] ,**lowerCamelCase_: Dict ) -> str: requires_backends(self ,["""flax"""] ) @classmethod def A__ ( cls: Tuple ,*lowerCamelCase_: Union[str, Any] ,**lowerCamelCase_: List[str] ) -> int: requires_backends(cls ,["""flax"""] ) @classmethod def A__ ( cls: List[Any] ,*lowerCamelCase_: str ,**lowerCamelCase_: str ) -> Any: requires_backends(cls ,["""flax"""] ) class _snake_case ( metaclass=__snake_case ): '''simple docstring''' A__ : Optional[Any] = ["flax"] def __init__( self: Dict ,*lowerCamelCase_: int ,**lowerCamelCase_: Optional[Any] ) -> Union[str, Any]: requires_backends(self ,["""flax"""] ) @classmethod def A__ ( cls: int ,*lowerCamelCase_: int ,**lowerCamelCase_: Tuple ) -> Union[str, Any]: requires_backends(cls ,["""flax"""] ) @classmethod def A__ ( cls: Optional[Any] ,*lowerCamelCase_: List[Any] ,**lowerCamelCase_: Optional[int] ) -> int: requires_backends(cls ,["""flax"""] ) class _snake_case ( metaclass=__snake_case ): '''simple docstring''' A__ : Optional[int] = ["flax"] def __init__( self: List[str] ,*lowerCamelCase_: Dict ,**lowerCamelCase_: Dict ) -> int: requires_backends(self ,["""flax"""] ) @classmethod def A__ ( cls: Dict ,*lowerCamelCase_: List[Any] ,**lowerCamelCase_: Dict ) -> Union[str, Any]: requires_backends(cls ,["""flax"""] ) @classmethod def A__ ( cls: int ,*lowerCamelCase_: Any ,**lowerCamelCase_: Any ) -> Optional[Any]: requires_backends(cls ,["""flax"""] )
345
1
from math import pi, sqrt def lowerCamelCase_ ( _a : float ): '''simple docstring''' if num <= 0: raise ValueError("""math domain error""" ) if num > 1_7_1.5: raise OverflowError("""math range error""" ) elif num - int(_a ) not in (0, 0.5): raise NotImplementedError("""num must be an integer or a half-integer""" ) elif num == 0.5: return sqrt(_a ) else: return 1.0 if num == 1 else (num - 1) * gamma(num - 1 ) def lowerCamelCase_ ( ): '''simple docstring''' assert gamma(0.5 ) == sqrt(_a ) assert gamma(1 ) == 1.0 assert gamma(2 ) == 1.0 if __name__ == "__main__": from doctest import testmod testmod() UpperCamelCase_ = 1.0 while num: UpperCamelCase_ = float(input('''Gamma of: ''')) print(F"gamma({num}) = {gamma(num)}") print('''\nEnter 0 to exit...''')
345
import random from typing import Any def lowerCamelCase_ ( _a : list ): '''simple docstring''' for _ in range(len(_a ) ): UpperCAmelCase_ : Tuple = random.randint(0 , len(_a ) - 1 ) UpperCAmelCase_ : List[Any] = random.randint(0 , len(_a ) - 1 ) UpperCAmelCase_ , UpperCAmelCase_ : int = data[b], data[a] return data if __name__ == "__main__": UpperCamelCase_ = [0, 1, 2, 3, 4, 5, 6, 7] UpperCamelCase_ = ['''python''', '''says''', '''hello''', '''!'''] print('''Fisher-Yates Shuffle:''') print('''List''', integers, strings) print('''FY Shuffle''', fisher_yates_shuffle(integers), fisher_yates_shuffle(strings))
345
1
import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_base import BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import PaddingStrategy, logging from .tokenization_realm import RealmTokenizer UpperCamelCase_ = logging.get_logger(__name__) UpperCamelCase_ = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''} UpperCamelCase_ = { '''vocab_file''': { '''google/realm-cc-news-pretrained-embedder''': ( '''https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/vocab.txt''' ), '''google/realm-cc-news-pretrained-encoder''': ( '''https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/vocab.txt''' ), '''google/realm-cc-news-pretrained-scorer''': ( '''https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/vocab.txt''' ), '''google/realm-cc-news-pretrained-openqa''': ( '''https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/vocab.txt''' ), '''google/realm-orqa-nq-openqa''': '''https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/vocab.txt''', '''google/realm-orqa-nq-reader''': '''https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/vocab.txt''', '''google/realm-orqa-wq-openqa''': '''https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/vocab.txt''', '''google/realm-orqa-wq-reader''': '''https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/vocab.txt''', }, '''tokenizer_file''': { '''google/realm-cc-news-pretrained-embedder''': ( '''https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/tokenizer.jsont''' ), '''google/realm-cc-news-pretrained-encoder''': ( '''https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/tokenizer.json''' ), '''google/realm-cc-news-pretrained-scorer''': ( '''https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/tokenizer.json''' ), '''google/realm-cc-news-pretrained-openqa''': ( '''https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/tokenizer.json''' ), '''google/realm-orqa-nq-openqa''': ( '''https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/tokenizer.json''' ), '''google/realm-orqa-nq-reader''': ( '''https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/tokenizer.json''' ), '''google/realm-orqa-wq-openqa''': ( '''https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/tokenizer.json''' ), '''google/realm-orqa-wq-reader''': ( '''https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/tokenizer.json''' ), }, } UpperCamelCase_ = { '''google/realm-cc-news-pretrained-embedder''': 512, '''google/realm-cc-news-pretrained-encoder''': 512, '''google/realm-cc-news-pretrained-scorer''': 512, '''google/realm-cc-news-pretrained-openqa''': 512, '''google/realm-orqa-nq-openqa''': 512, '''google/realm-orqa-nq-reader''': 512, '''google/realm-orqa-wq-openqa''': 512, '''google/realm-orqa-wq-reader''': 512, } UpperCamelCase_ = { '''google/realm-cc-news-pretrained-embedder''': {'''do_lower_case''': True}, '''google/realm-cc-news-pretrained-encoder''': {'''do_lower_case''': True}, '''google/realm-cc-news-pretrained-scorer''': {'''do_lower_case''': True}, '''google/realm-cc-news-pretrained-openqa''': {'''do_lower_case''': True}, '''google/realm-orqa-nq-openqa''': {'''do_lower_case''': True}, '''google/realm-orqa-nq-reader''': {'''do_lower_case''': True}, '''google/realm-orqa-wq-openqa''': {'''do_lower_case''': True}, '''google/realm-orqa-wq-reader''': {'''do_lower_case''': True}, } class _snake_case ( __snake_case ): '''simple docstring''' A__ : Optional[Any] = VOCAB_FILES_NAMES A__ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP A__ : Optional[int] = PRETRAINED_INIT_CONFIGURATION A__ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES A__ : Tuple = RealmTokenizer def __init__( self: Tuple ,lowerCamelCase_: Union[str, Any]=None ,lowerCamelCase_: str=None ,lowerCamelCase_: Union[str, Any]=True ,lowerCamelCase_: Optional[Any]="[UNK]" ,lowerCamelCase_: str="[SEP]" ,lowerCamelCase_: int="[PAD]" ,lowerCamelCase_: Union[str, Any]="[CLS]" ,lowerCamelCase_: List[Any]="[MASK]" ,lowerCamelCase_: List[Any]=True ,lowerCamelCase_: Tuple=None ,**lowerCamelCase_: Any ,) -> str: super().__init__( lowerCamelCase_ ,tokenizer_file=lowerCamelCase_ ,do_lower_case=lowerCamelCase_ ,unk_token=lowerCamelCase_ ,sep_token=lowerCamelCase_ ,pad_token=lowerCamelCase_ ,cls_token=lowerCamelCase_ ,mask_token=lowerCamelCase_ ,tokenize_chinese_chars=lowerCamelCase_ ,strip_accents=lowerCamelCase_ ,**lowerCamelCase_ ,) UpperCAmelCase_ : int = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get("""lowercase""" ,lowerCamelCase_ ) != do_lower_case or normalizer_state.get("""strip_accents""" ,lowerCamelCase_ ) != strip_accents or normalizer_state.get("""handle_chinese_chars""" ,lowerCamelCase_ ) != tokenize_chinese_chars ): UpperCAmelCase_ : Optional[Any] = getattr(lowerCamelCase_ ,normalizer_state.pop("""type""" ) ) UpperCAmelCase_ : Tuple = do_lower_case UpperCAmelCase_ : Union[str, Any] = strip_accents UpperCAmelCase_ : List[Any] = tokenize_chinese_chars UpperCAmelCase_ : Dict = normalizer_class(**lowerCamelCase_ ) UpperCAmelCase_ : List[Any] = do_lower_case def A__ ( self: Optional[Any] ,lowerCamelCase_: int ,**lowerCamelCase_: str ) -> List[Any]: UpperCAmelCase_ : Dict = PaddingStrategy.MAX_LENGTH UpperCAmelCase_ : int = text UpperCAmelCase_ : Optional[Any] = kwargs.pop("""text_pair""" ,lowerCamelCase_ ) UpperCAmelCase_ : List[Any] = kwargs.pop("""return_tensors""" ,lowerCamelCase_ ) UpperCAmelCase_ : Tuple = { """input_ids""": [], """attention_mask""": [], """token_type_ids""": [], } for idx, candidate_text in enumerate(lowerCamelCase_ ): if batch_text_pair is not None: UpperCAmelCase_ : int = batch_text_pair[idx] else: UpperCAmelCase_ : Dict = None UpperCAmelCase_ : List[str] = super().__call__(lowerCamelCase_ ,lowerCamelCase_ ,return_tensors=lowerCamelCase_ ,**lowerCamelCase_ ) UpperCAmelCase_ : Tuple = encoded_candidates.get("""input_ids""" ) UpperCAmelCase_ : List[Any] = encoded_candidates.get("""attention_mask""" ) UpperCAmelCase_ : Optional[int] = encoded_candidates.get("""token_type_ids""" ) if encoded_input_ids is not None: output_data["input_ids"].append(lowerCamelCase_ ) if encoded_attention_mask is not None: output_data["attention_mask"].append(lowerCamelCase_ ) if encoded_token_type_ids is not None: output_data["token_type_ids"].append(lowerCamelCase_ ) UpperCAmelCase_ : Union[str, Any] = {key: item for key, item in output_data.items() if len(lowerCamelCase_ ) != 0} return BatchEncoding(lowerCamelCase_ ,tensor_type=lowerCamelCase_ ) def A__ ( self: str ,lowerCamelCase_: Dict ,lowerCamelCase_: Dict=None ) -> List[Any]: UpperCAmelCase_ : Union[str, Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def A__ ( self: int ,lowerCamelCase_: List[int] ,lowerCamelCase_: Optional[List[int]] = None ) -> List[int]: UpperCAmelCase_ : Tuple = [self.sep_token_id] UpperCAmelCase_ : List[Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def A__ ( self: List[str] ,lowerCamelCase_: str ,lowerCamelCase_: Optional[str] = None ) -> Tuple[str]: UpperCAmelCase_ : Optional[Any] = self._tokenizer.model.save(lowerCamelCase_ ,name=lowerCamelCase_ ) return tuple(lowerCamelCase_ )
345
import flax.linen as nn import jax.numpy as jnp from .attention_flax import FlaxTransformeraDModel from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD class _snake_case ( nn.Module ): '''simple docstring''' A__ : int A__ : int A__ : float = 0.0 A__ : int = 1 A__ : int = 1 A__ : bool = True A__ : bool = False A__ : bool = False A__ : bool = False A__ : jnp.dtype = jnp.floataa def A__ ( self: Dict ) -> List[str]: UpperCAmelCase_ : Optional[int] = [] UpperCAmelCase_ : Optional[int] = [] for i in range(self.num_layers ): UpperCAmelCase_ : List[Any] = self.in_channels if i == 0 else self.out_channels UpperCAmelCase_ : List[Any] = FlaxResnetBlockaD( in_channels=lowerCamelCase_ ,out_channels=self.out_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,) resnets.append(lowerCamelCase_ ) UpperCAmelCase_ : Union[str, Any] = FlaxTransformeraDModel( in_channels=self.out_channels ,n_heads=self.num_attention_heads ,d_head=self.out_channels // self.num_attention_heads ,depth=1 ,use_linear_projection=self.use_linear_projection ,only_cross_attention=self.only_cross_attention ,use_memory_efficient_attention=self.use_memory_efficient_attention ,dtype=self.dtype ,) attentions.append(lowerCamelCase_ ) UpperCAmelCase_ : int = resnets UpperCAmelCase_ : Tuple = attentions if self.add_downsample: UpperCAmelCase_ : List[Any] = FlaxDownsampleaD(self.out_channels ,dtype=self.dtype ) def __call__( self: Optional[Any] ,lowerCamelCase_: Optional[int] ,lowerCamelCase_: str ,lowerCamelCase_: Optional[int] ,lowerCamelCase_: int=True ) -> int: UpperCAmelCase_ : List[Any] = () for resnet, attn in zip(self.resnets ,self.attentions ): UpperCAmelCase_ : str = resnet(lowerCamelCase_ ,lowerCamelCase_ ,deterministic=lowerCamelCase_ ) UpperCAmelCase_ : Union[str, Any] = attn(lowerCamelCase_ ,lowerCamelCase_ ,deterministic=lowerCamelCase_ ) output_states += (hidden_states,) if self.add_downsample: UpperCAmelCase_ : List[Any] = self.downsamplers_a(lowerCamelCase_ ) output_states += (hidden_states,) return hidden_states, output_states class _snake_case ( nn.Module ): '''simple docstring''' A__ : int A__ : int A__ : float = 0.0 A__ : int = 1 A__ : bool = True A__ : jnp.dtype = jnp.floataa def A__ ( self: Dict ) -> int: UpperCAmelCase_ : List[str] = [] for i in range(self.num_layers ): UpperCAmelCase_ : int = self.in_channels if i == 0 else self.out_channels UpperCAmelCase_ : Dict = FlaxResnetBlockaD( in_channels=lowerCamelCase_ ,out_channels=self.out_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,) resnets.append(lowerCamelCase_ ) UpperCAmelCase_ : Union[str, Any] = resnets if self.add_downsample: UpperCAmelCase_ : List[str] = FlaxDownsampleaD(self.out_channels ,dtype=self.dtype ) def __call__( self: Any ,lowerCamelCase_: List[Any] ,lowerCamelCase_: Any ,lowerCamelCase_: List[Any]=True ) -> Any: UpperCAmelCase_ : Union[str, Any] = () for resnet in self.resnets: UpperCAmelCase_ : Tuple = resnet(lowerCamelCase_ ,lowerCamelCase_ ,deterministic=lowerCamelCase_ ) output_states += (hidden_states,) if self.add_downsample: UpperCAmelCase_ : List[str] = self.downsamplers_a(lowerCamelCase_ ) output_states += (hidden_states,) return hidden_states, output_states class _snake_case ( nn.Module ): '''simple docstring''' A__ : int A__ : int A__ : int A__ : float = 0.0 A__ : int = 1 A__ : int = 1 A__ : bool = True A__ : bool = False A__ : bool = False A__ : bool = False A__ : jnp.dtype = jnp.floataa def A__ ( self: str ) -> Any: UpperCAmelCase_ : Dict = [] UpperCAmelCase_ : List[str] = [] for i in range(self.num_layers ): UpperCAmelCase_ : int = self.in_channels if (i == self.num_layers - 1) else self.out_channels UpperCAmelCase_ : int = self.prev_output_channel if i == 0 else self.out_channels UpperCAmelCase_ : Optional[Any] = FlaxResnetBlockaD( in_channels=resnet_in_channels + res_skip_channels ,out_channels=self.out_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,) resnets.append(lowerCamelCase_ ) UpperCAmelCase_ : int = FlaxTransformeraDModel( in_channels=self.out_channels ,n_heads=self.num_attention_heads ,d_head=self.out_channels // self.num_attention_heads ,depth=1 ,use_linear_projection=self.use_linear_projection ,only_cross_attention=self.only_cross_attention ,use_memory_efficient_attention=self.use_memory_efficient_attention ,dtype=self.dtype ,) attentions.append(lowerCamelCase_ ) UpperCAmelCase_ : List[str] = resnets UpperCAmelCase_ : Dict = attentions if self.add_upsample: UpperCAmelCase_ : Optional[Any] = FlaxUpsampleaD(self.out_channels ,dtype=self.dtype ) def __call__( self: Optional[int] ,lowerCamelCase_: List[Any] ,lowerCamelCase_: int ,lowerCamelCase_: Any ,lowerCamelCase_: str ,lowerCamelCase_: List[str]=True ) -> List[str]: for resnet, attn in zip(self.resnets ,self.attentions ): # pop res hidden states UpperCAmelCase_ : List[str] = res_hidden_states_tuple[-1] UpperCAmelCase_ : Union[str, Any] = res_hidden_states_tuple[:-1] UpperCAmelCase_ : Optional[Any] = jnp.concatenate((hidden_states, res_hidden_states) ,axis=-1 ) UpperCAmelCase_ : Tuple = resnet(lowerCamelCase_ ,lowerCamelCase_ ,deterministic=lowerCamelCase_ ) UpperCAmelCase_ : List[Any] = attn(lowerCamelCase_ ,lowerCamelCase_ ,deterministic=lowerCamelCase_ ) if self.add_upsample: UpperCAmelCase_ : Dict = self.upsamplers_a(lowerCamelCase_ ) return hidden_states class _snake_case ( nn.Module ): '''simple docstring''' A__ : int A__ : int A__ : int A__ : float = 0.0 A__ : int = 1 A__ : bool = True A__ : jnp.dtype = jnp.floataa def A__ ( self: Dict ) -> Dict: UpperCAmelCase_ : Any = [] for i in range(self.num_layers ): UpperCAmelCase_ : str = self.in_channels if (i == self.num_layers - 1) else self.out_channels UpperCAmelCase_ : Optional[int] = self.prev_output_channel if i == 0 else self.out_channels UpperCAmelCase_ : Any = FlaxResnetBlockaD( in_channels=resnet_in_channels + res_skip_channels ,out_channels=self.out_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,) resnets.append(lowerCamelCase_ ) UpperCAmelCase_ : str = resnets if self.add_upsample: UpperCAmelCase_ : Union[str, Any] = FlaxUpsampleaD(self.out_channels ,dtype=self.dtype ) def __call__( self: Dict ,lowerCamelCase_: Dict ,lowerCamelCase_: List[Any] ,lowerCamelCase_: Tuple ,lowerCamelCase_: Any=True ) -> List[str]: for resnet in self.resnets: # pop res hidden states UpperCAmelCase_ : Dict = res_hidden_states_tuple[-1] UpperCAmelCase_ : str = res_hidden_states_tuple[:-1] UpperCAmelCase_ : List[Any] = jnp.concatenate((hidden_states, res_hidden_states) ,axis=-1 ) UpperCAmelCase_ : List[str] = resnet(lowerCamelCase_ ,lowerCamelCase_ ,deterministic=lowerCamelCase_ ) if self.add_upsample: UpperCAmelCase_ : Optional[Any] = self.upsamplers_a(lowerCamelCase_ ) return hidden_states class _snake_case ( nn.Module ): '''simple docstring''' A__ : int A__ : float = 0.0 A__ : int = 1 A__ : int = 1 A__ : bool = False A__ : bool = False A__ : jnp.dtype = jnp.floataa def A__ ( self: Dict ) -> List[str]: # there is always at least one resnet UpperCAmelCase_ : List[Any] = [ FlaxResnetBlockaD( in_channels=self.in_channels ,out_channels=self.in_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,) ] UpperCAmelCase_ : Any = [] for _ in range(self.num_layers ): UpperCAmelCase_ : Optional[Any] = FlaxTransformeraDModel( in_channels=self.in_channels ,n_heads=self.num_attention_heads ,d_head=self.in_channels // self.num_attention_heads ,depth=1 ,use_linear_projection=self.use_linear_projection ,use_memory_efficient_attention=self.use_memory_efficient_attention ,dtype=self.dtype ,) attentions.append(lowerCamelCase_ ) UpperCAmelCase_ : Any = FlaxResnetBlockaD( in_channels=self.in_channels ,out_channels=self.in_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,) resnets.append(lowerCamelCase_ ) UpperCAmelCase_ : Dict = resnets UpperCAmelCase_ : Any = attentions def __call__( self: str ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: str ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: Union[str, Any]=True ) -> List[Any]: UpperCAmelCase_ : List[Any] = self.resnets[0](lowerCamelCase_ ,lowerCamelCase_ ) for attn, resnet in zip(self.attentions ,self.resnets[1:] ): UpperCAmelCase_ : Optional[Any] = attn(lowerCamelCase_ ,lowerCamelCase_ ,deterministic=lowerCamelCase_ ) UpperCAmelCase_ : Union[str, Any] = resnet(lowerCamelCase_ ,lowerCamelCase_ ,deterministic=lowerCamelCase_ ) return hidden_states
345
1
import collections import inspect import unittest from transformers import FocalNetConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( FocalNetBackbone, FocalNetForImageClassification, FocalNetForMaskedImageModeling, FocalNetModel, ) from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class _snake_case : '''simple docstring''' def __init__( self: List[Any] ,lowerCamelCase_: Tuple ,lowerCamelCase_: Union[str, Any]=13 ,lowerCamelCase_: Optional[int]=32 ,lowerCamelCase_: List[str]=2 ,lowerCamelCase_: Optional[Any]=3 ,lowerCamelCase_: int=16 ,lowerCamelCase_: Optional[Any]=[32, 64, 128] ,lowerCamelCase_: Optional[int]=[1, 2, 1] ,lowerCamelCase_: Union[str, Any]=[2, 2, 4] ,lowerCamelCase_: int=2 ,lowerCamelCase_: List[str]=2.0 ,lowerCamelCase_: List[Any]=True ,lowerCamelCase_: List[str]=0.0 ,lowerCamelCase_: List[str]=0.0 ,lowerCamelCase_: Optional[int]=0.1 ,lowerCamelCase_: Optional[int]="gelu" ,lowerCamelCase_: Any=False ,lowerCamelCase_: Dict=True ,lowerCamelCase_: Union[str, Any]=0.0_2 ,lowerCamelCase_: int=1e-5 ,lowerCamelCase_: int=True ,lowerCamelCase_: Tuple=None ,lowerCamelCase_: str=True ,lowerCamelCase_: Dict=10 ,lowerCamelCase_: str=8 ,lowerCamelCase_: Union[str, Any]=["stage1", "stage2"] ,lowerCamelCase_: Optional[Any]=[1, 2] ,) -> str: UpperCAmelCase_ : List[Any] = parent UpperCAmelCase_ : Tuple = batch_size UpperCAmelCase_ : Any = image_size UpperCAmelCase_ : str = patch_size UpperCAmelCase_ : List[str] = num_channels UpperCAmelCase_ : Dict = embed_dim UpperCAmelCase_ : Dict = hidden_sizes UpperCAmelCase_ : str = depths UpperCAmelCase_ : int = num_heads UpperCAmelCase_ : List[Any] = window_size UpperCAmelCase_ : Union[str, Any] = mlp_ratio UpperCAmelCase_ : int = qkv_bias UpperCAmelCase_ : List[str] = hidden_dropout_prob UpperCAmelCase_ : Union[str, Any] = attention_probs_dropout_prob UpperCAmelCase_ : Optional[int] = drop_path_rate UpperCAmelCase_ : Union[str, Any] = hidden_act UpperCAmelCase_ : List[Any] = use_absolute_embeddings UpperCAmelCase_ : List[Any] = patch_norm UpperCAmelCase_ : int = layer_norm_eps UpperCAmelCase_ : int = initializer_range UpperCAmelCase_ : Optional[Any] = is_training UpperCAmelCase_ : Optional[Any] = scope UpperCAmelCase_ : Union[str, Any] = use_labels UpperCAmelCase_ : Union[str, Any] = type_sequence_label_size UpperCAmelCase_ : Optional[int] = encoder_stride UpperCAmelCase_ : Optional[int] = out_features UpperCAmelCase_ : Optional[int] = out_indices def A__ ( self: Union[str, Any] ) -> List[Any]: UpperCAmelCase_ : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) UpperCAmelCase_ : int = None if self.use_labels: UpperCAmelCase_ : str = ids_tensor([self.batch_size] ,self.type_sequence_label_size ) UpperCAmelCase_ : Any = self.get_config() return config, pixel_values, labels def A__ ( self: List[Any] ) -> Tuple: return FocalNetConfig( image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,embed_dim=self.embed_dim ,hidden_sizes=self.hidden_sizes ,depths=self.depths ,num_heads=self.num_heads ,window_size=self.window_size ,mlp_ratio=self.mlp_ratio ,qkv_bias=self.qkv_bias ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,drop_path_rate=self.drop_path_rate ,hidden_act=self.hidden_act ,use_absolute_embeddings=self.use_absolute_embeddings ,path_norm=self.patch_norm ,layer_norm_eps=self.layer_norm_eps ,initializer_range=self.initializer_range ,encoder_stride=self.encoder_stride ,out_features=self.out_features ,out_indices=self.out_indices ,) def A__ ( self: Dict ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: str ,lowerCamelCase_: str ) -> List[str]: UpperCAmelCase_ : Optional[int] = FocalNetModel(config=lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() UpperCAmelCase_ : List[Any] = model(lowerCamelCase_ ) UpperCAmelCase_ : Dict = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1)) UpperCAmelCase_ : Optional[Any] = int(config.embed_dim * 2 ** (len(config.depths ) - 1) ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, expected_seq_len, expected_dim) ) def A__ ( self: Union[str, Any] ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: Any ,lowerCamelCase_: Optional[int] ) -> List[str]: UpperCAmelCase_ : List[str] = FocalNetBackbone(config=lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() UpperCAmelCase_ : Tuple = model(lowerCamelCase_ ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) ,len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) ,[self.batch_size, self.image_size, 8, 8] ) # verify channels self.parent.assertEqual(len(model.channels ) ,len(config.out_features ) ) self.parent.assertListEqual(model.channels ,config.hidden_sizes[:-1] ) # verify backbone works with out_features=None UpperCAmelCase_ : Union[str, Any] = None UpperCAmelCase_ : List[str] = FocalNetBackbone(config=lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() UpperCAmelCase_ : Tuple = model(lowerCamelCase_ ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) ,1 ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) ,[self.batch_size, self.image_size * 2, 4, 4] ) # verify channels self.parent.assertEqual(len(model.channels ) ,1 ) self.parent.assertListEqual(model.channels ,[config.hidden_sizes[-1]] ) def A__ ( self: Optional[int] ,lowerCamelCase_: List[str] ,lowerCamelCase_: Tuple ,lowerCamelCase_: Union[str, Any] ) -> List[Any]: UpperCAmelCase_ : Any = FocalNetForMaskedImageModeling(config=lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() UpperCAmelCase_ : Optional[Any] = model(lowerCamelCase_ ) self.parent.assertEqual( result.reconstruction.shape ,(self.batch_size, self.num_channels, self.image_size, self.image_size) ) # test greyscale images UpperCAmelCase_ : int = 1 UpperCAmelCase_ : List[str] = FocalNetForMaskedImageModeling(lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() UpperCAmelCase_ : int = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) UpperCAmelCase_ : Optional[int] = model(lowerCamelCase_ ) self.parent.assertEqual(result.reconstruction.shape ,(self.batch_size, 1, self.image_size, self.image_size) ) def A__ ( self: List[str] ,lowerCamelCase_: List[str] ,lowerCamelCase_: List[str] ,lowerCamelCase_: Any ) -> int: UpperCAmelCase_ : List[Any] = self.type_sequence_label_size UpperCAmelCase_ : int = FocalNetForImageClassification(lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() UpperCAmelCase_ : Union[str, Any] = model(lowerCamelCase_ ,labels=lowerCamelCase_ ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) ) # test greyscale images UpperCAmelCase_ : List[Any] = 1 UpperCAmelCase_ : Optional[int] = FocalNetForImageClassification(lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() UpperCAmelCase_ : List[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) UpperCAmelCase_ : List[str] = model(lowerCamelCase_ ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) ) def A__ ( self: Union[str, Any] ) -> Optional[int]: UpperCAmelCase_ : List[Any] = self.prepare_config_and_inputs() UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = config_and_inputs UpperCAmelCase_ : int = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class _snake_case ( __snake_case , __snake_case , unittest.TestCase ): '''simple docstring''' A__ : List[Any] = ( ( FocalNetModel, FocalNetForImageClassification, FocalNetForMaskedImageModeling, FocalNetBackbone, ) if is_torch_available() else () ) A__ : Union[str, Any] = ( {"feature-extraction": FocalNetModel, "image-classification": FocalNetForImageClassification} if is_torch_available() else {} ) A__ : Optional[Any] = False A__ : Any = False A__ : List[str] = False A__ : Any = False A__ : Any = False def A__ ( self: List[str] ) -> Tuple: UpperCAmelCase_ : Dict = FocalNetModelTester(self ) UpperCAmelCase_ : int = ConfigTester(self ,config_class=lowerCamelCase_ ,embed_dim=37 ,has_text_modality=lowerCamelCase_ ) def A__ ( self: List[str] ) -> int: self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def A__ ( self: List[str] ) -> Union[str, Any]: return def A__ ( self: str ) -> List[str]: UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCamelCase_ ) def A__ ( self: Tuple ) -> int: UpperCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*lowerCamelCase_ ) def A__ ( self: Dict ) -> List[str]: UpperCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*lowerCamelCase_ ) def A__ ( self: int ) -> int: UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*lowerCamelCase_ ) @unittest.skip(reason="""FocalNet does not use inputs_embeds""" ) def A__ ( self: int ) -> Dict: pass @unittest.skip(reason="""FocalNet does not use feedforward chunking""" ) def A__ ( self: Optional[Any] ) -> Optional[Any]: pass def A__ ( self: Optional[Any] ) -> List[str]: UpperCAmelCase_ , UpperCAmelCase_ : str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes[:-1]: UpperCAmelCase_ : Optional[Any] = model_class(lowerCamelCase_ ) self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) ) UpperCAmelCase_ : List[Any] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(lowerCamelCase_ ,nn.Linear ) ) def A__ ( self: str ) -> Optional[int]: UpperCAmelCase_ , UpperCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes[:-1]: UpperCAmelCase_ : str = model_class(lowerCamelCase_ ) UpperCAmelCase_ : Dict = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCAmelCase_ : Any = [*signature.parameters.keys()] UpperCAmelCase_ : List[str] = ["""pixel_values"""] self.assertListEqual(arg_names[:1] ,lowerCamelCase_ ) def A__ ( self: Dict ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: List[str] ,lowerCamelCase_: Dict ,lowerCamelCase_: Any ) -> List[str]: UpperCAmelCase_ : Tuple = model_class(lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() with torch.no_grad(): UpperCAmelCase_ : Optional[int] = model(**self._prepare_for_class(lowerCamelCase_ ,lowerCamelCase_ ) ) UpperCAmelCase_ : Any = outputs.hidden_states UpperCAmelCase_ : List[Any] = getattr( self.model_tester ,"""expected_num_hidden_layers""" ,len(self.model_tester.depths ) + 1 ) self.assertEqual(len(lowerCamelCase_ ) ,lowerCamelCase_ ) # FocalNet has a different seq_length UpperCAmelCase_ : int = ( config.patch_size if isinstance(config.patch_size ,collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) UpperCAmelCase_ : Optional[int] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.assertListEqual( list(hidden_states[0].shape[-2:] ) ,[num_patches, self.model_tester.embed_dim] ,) UpperCAmelCase_ : Union[str, Any] = outputs.reshaped_hidden_states self.assertEqual(len(lowerCamelCase_ ) ,lowerCamelCase_ ) UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Tuple = reshaped_hidden_states[0].shape UpperCAmelCase_ : List[Any] = ( reshaped_hidden_states[0].view(lowerCamelCase_ ,lowerCamelCase_ ,height * width ).permute(0 ,2 ,1 ) ) self.assertListEqual( list(reshaped_hidden_states.shape[-2:] ) ,[num_patches, self.model_tester.embed_dim] ,) def A__ ( self: Any ) -> List[Any]: UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase_ : Optional[int] = ( self.model_tester.image_size if isinstance(self.model_tester.image_size ,collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) for model_class in self.all_model_classes[:-1]: UpperCAmelCase_ : str = True self.check_hidden_states_output(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] UpperCAmelCase_ : Union[str, Any] = True self.check_hidden_states_output(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ) def A__ ( self: List[str] ) -> str: UpperCAmelCase_ , UpperCAmelCase_ : int = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase_ : Tuple = 3 UpperCAmelCase_ : Tuple = ( self.model_tester.image_size if isinstance(self.model_tester.image_size ,collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) UpperCAmelCase_ : Union[str, Any] = ( config.patch_size if isinstance(config.patch_size ,collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) UpperCAmelCase_ : Union[str, Any] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0]) UpperCAmelCase_ : Any = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1]) for model_class in self.all_model_classes[:-1]: UpperCAmelCase_ : Optional[Any] = True self.check_hidden_states_output(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,(padded_height, padded_width) ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] UpperCAmelCase_ : Optional[int] = True self.check_hidden_states_output(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,(padded_height, padded_width) ) @slow def A__ ( self: Optional[int] ) -> Optional[Any]: for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase_ : Tuple = FocalNetModel.from_pretrained(lowerCamelCase_ ) self.assertIsNotNone(lowerCamelCase_ ) def A__ ( self: Optional[Any] ) -> Optional[int]: UpperCAmelCase_ , UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase_ : Optional[int] = _config_zero_init(lowerCamelCase_ ) for model_class in self.all_model_classes: UpperCAmelCase_ : List[Any] = model_class(config=lowerCamelCase_ ) for name, param in model.named_parameters(): if "embeddings" not in name and param.requires_grad: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item() ,[0.0, 1.0] ,msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' ,) @require_vision @require_torch class _snake_case ( unittest.TestCase ): '''simple docstring''' @cached_property def A__ ( self: Optional[int] ) -> str: # TODO update organization return AutoImageProcessor.from_pretrained("""microsoft/focalnet-tiny""" ) if is_vision_available() else None @slow def A__ ( self: List[Any] ) -> List[str]: UpperCAmelCase_ : Optional[int] = FocalNetForImageClassification.from_pretrained("""microsoft/focalnet-tiny""" ).to(lowerCamelCase_ ) UpperCAmelCase_ : Tuple = self.default_image_processor UpperCAmelCase_ : Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) UpperCAmelCase_ : Dict = image_processor(images=lowerCamelCase_ ,return_tensors="""pt""" ).to(lowerCamelCase_ ) # forward pass with torch.no_grad(): UpperCAmelCase_ : Dict = model(**lowerCamelCase_ ) # verify the logits UpperCAmelCase_ : str = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape ,lowerCamelCase_ ) UpperCAmelCase_ : List[Any] = torch.tensor([0.2_1_6_6, -0.4_3_6_8, 0.2_1_9_1] ).to(lowerCamelCase_ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] ,lowerCamelCase_ ,atol=1e-4 ) ) self.assertTrue(outputs.logits.argmax(dim=-1 ).item() ,281 ) @require_torch class _snake_case ( __snake_case , unittest.TestCase ): '''simple docstring''' A__ : List[Any] = (FocalNetBackbone,) if is_torch_available() else () A__ : int = FocalNetConfig A__ : List[str] = False def A__ ( self: Any ) -> Optional[int]: UpperCAmelCase_ : str = FocalNetModelTester(self )
345
import pickle import numpy as np from matplotlib import pyplot as plt class _snake_case : '''simple docstring''' def __init__( self: Any ,lowerCamelCase_: Dict ,lowerCamelCase_: Tuple ,lowerCamelCase_: Dict ,lowerCamelCase_: Tuple ,lowerCamelCase_: Any ,lowerCamelCase_: Tuple=0.2 ,lowerCamelCase_: Union[str, Any]=0.2 ) -> List[str]: UpperCAmelCase_ : List[Any] = bp_numa UpperCAmelCase_ : str = bp_numa UpperCAmelCase_ : List[Any] = bp_numa UpperCAmelCase_ : Optional[int] = conva_get[:2] UpperCAmelCase_ : List[Any] = conva_get[2] UpperCAmelCase_ : str = size_pa UpperCAmelCase_ : Optional[int] = rate_w UpperCAmelCase_ : Dict = rate_t UpperCAmelCase_ : List[Any] = [ np.mat(-1 * np.random.rand(self.conva[0] ,self.conva[0] ) + 0.5 ) for i in range(self.conva[1] ) ] UpperCAmelCase_ : int = np.mat(-1 * np.random.rand(self.num_bpa ,self.num_bpa ) + 0.5 ) UpperCAmelCase_ : int = np.mat(-1 * np.random.rand(self.num_bpa ,self.num_bpa ) + 0.5 ) UpperCAmelCase_ : Dict = -2 * np.random.rand(self.conva[1] ) + 1 UpperCAmelCase_ : str = -2 * np.random.rand(self.num_bpa ) + 1 UpperCAmelCase_ : Union[str, Any] = -2 * np.random.rand(self.num_bpa ) + 1 def A__ ( self: str ,lowerCamelCase_: Optional[Any] ) -> Tuple: # save model dict with pickle UpperCAmelCase_ : Dict = { """num_bp1""": self.num_bpa, """num_bp2""": self.num_bpa, """num_bp3""": self.num_bpa, """conv1""": self.conva, """step_conv1""": self.step_conva, """size_pooling1""": self.size_poolinga, """rate_weight""": self.rate_weight, """rate_thre""": self.rate_thre, """w_conv1""": self.w_conva, """wkj""": self.wkj, """vji""": self.vji, """thre_conv1""": self.thre_conva, """thre_bp2""": self.thre_bpa, """thre_bp3""": self.thre_bpa, } with open(lowerCamelCase_ ,"""wb""" ) as f: pickle.dump(lowerCamelCase_ ,lowerCamelCase_ ) print(F'''Model saved: {save_path}''' ) @classmethod def A__ ( cls: List[str] ,lowerCamelCase_: str ) -> List[str]: # read saved model with open(lowerCamelCase_ ,"""rb""" ) as f: UpperCAmelCase_ : Any = pickle.load(lowerCamelCase_ ) # noqa: S301 UpperCAmelCase_ : Union[str, Any] = model_dic.get("""conv1""" ) conv_get.append(model_dic.get("""step_conv1""" ) ) UpperCAmelCase_ : List[str] = model_dic.get("""size_pooling1""" ) UpperCAmelCase_ : Tuple = model_dic.get("""num_bp1""" ) UpperCAmelCase_ : Optional[Any] = model_dic.get("""num_bp2""" ) UpperCAmelCase_ : List[str] = model_dic.get("""num_bp3""" ) UpperCAmelCase_ : List[Any] = model_dic.get("""rate_weight""" ) UpperCAmelCase_ : Dict = model_dic.get("""rate_thre""" ) # create model instance UpperCAmelCase_ : List[Any] = CNN(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ) # modify model parameter UpperCAmelCase_ : Any = model_dic.get("""w_conv1""" ) UpperCAmelCase_ : int = model_dic.get("""wkj""" ) UpperCAmelCase_ : int = model_dic.get("""vji""" ) UpperCAmelCase_ : Optional[int] = model_dic.get("""thre_conv1""" ) UpperCAmelCase_ : List[str] = model_dic.get("""thre_bp2""" ) UpperCAmelCase_ : Dict = model_dic.get("""thre_bp3""" ) return conv_ins def A__ ( self: List[Any] ,lowerCamelCase_: Union[str, Any] ) -> Tuple: return 1 / (1 + np.exp(-1 * x )) def A__ ( self: Union[str, Any] ,lowerCamelCase_: Union[str, Any] ) -> Optional[Any]: return round(lowerCamelCase_ ,3 ) def A__ ( self: Tuple ,lowerCamelCase_: Any ,lowerCamelCase_: List[str] ,lowerCamelCase_: str ,lowerCamelCase_: Any ,lowerCamelCase_: Union[str, Any] ) -> Any: # convolution process UpperCAmelCase_ : Optional[Any] = convs[0] UpperCAmelCase_ : int = convs[1] UpperCAmelCase_ : int = np.shape(lowerCamelCase_ )[0] # get the data slice of original image data, data_focus UpperCAmelCase_ : Dict = [] for i_focus in range(0 ,size_data - size_conv + 1 ,lowerCamelCase_ ): for j_focus in range(0 ,size_data - size_conv + 1 ,lowerCamelCase_ ): UpperCAmelCase_ : Union[str, Any] = data[ i_focus : i_focus + size_conv, j_focus : j_focus + size_conv ] data_focus.append(lowerCamelCase_ ) # calculate the feature map of every single kernel, and saved as list of matrix UpperCAmelCase_ : Any = [] UpperCAmelCase_ : Tuple = int((size_data - size_conv) / conv_step + 1 ) for i_map in range(lowerCamelCase_ ): UpperCAmelCase_ : Optional[int] = [] for i_focus in range(len(lowerCamelCase_ ) ): UpperCAmelCase_ : int = ( np.sum(np.multiply(data_focus[i_focus] ,w_convs[i_map] ) ) - thre_convs[i_map] ) featuremap.append(self.sig(lowerCamelCase_ ) ) UpperCAmelCase_ : Union[str, Any] = np.asmatrix(lowerCamelCase_ ).reshape( lowerCamelCase_ ,lowerCamelCase_ ) data_featuremap.append(lowerCamelCase_ ) # expanding the data slice to One dimenssion UpperCAmelCase_ : Optional[Any] = [] for each_focus in data_focus: focusa_list.extend(self.Expand_Mat(lowerCamelCase_ ) ) UpperCAmelCase_ : Optional[int] = np.asarray(lowerCamelCase_ ) return focus_list, data_featuremap def A__ ( self: Tuple ,lowerCamelCase_: Optional[int] ,lowerCamelCase_: Tuple ,lowerCamelCase_: Optional[Any]="average_pool" ) -> List[Any]: # pooling process UpperCAmelCase_ : Optional[Any] = len(featuremaps[0] ) UpperCAmelCase_ : Any = int(size_map / size_pooling ) UpperCAmelCase_ : Optional[int] = [] for i_map in range(len(lowerCamelCase_ ) ): UpperCAmelCase_ : Any = featuremaps[i_map] UpperCAmelCase_ : Tuple = [] for i_focus in range(0 ,lowerCamelCase_ ,lowerCamelCase_ ): for j_focus in range(0 ,lowerCamelCase_ ,lowerCamelCase_ ): UpperCAmelCase_ : str = feature_map[ i_focus : i_focus + size_pooling, j_focus : j_focus + size_pooling, ] if pooling_type == "average_pool": # average pooling map_pooled.append(np.average(lowerCamelCase_ ) ) elif pooling_type == "max_pooling": # max pooling map_pooled.append(np.max(lowerCamelCase_ ) ) UpperCAmelCase_ : int = np.asmatrix(lowerCamelCase_ ).reshape(lowerCamelCase_ ,lowerCamelCase_ ) featuremap_pooled.append(lowerCamelCase_ ) return featuremap_pooled def A__ ( self: Union[str, Any] ,lowerCamelCase_: Tuple ) -> Optional[int]: # expanding three dimension data to one dimension list UpperCAmelCase_ : List[Any] = [] for i in range(len(lowerCamelCase_ ) ): UpperCAmelCase_ : Tuple = np.shape(data[i] ) UpperCAmelCase_ : Optional[int] = data[i].reshape(1 ,shapes[0] * shapes[1] ) UpperCAmelCase_ : Optional[int] = data_listed.getA().tolist()[0] data_expanded.extend(lowerCamelCase_ ) UpperCAmelCase_ : int = np.asarray(lowerCamelCase_ ) return data_expanded def A__ ( self: Optional[Any] ,lowerCamelCase_: Optional[int] ) -> Union[str, Any]: # expanding matrix to one dimension list UpperCAmelCase_ : List[Any] = np.asarray(lowerCamelCase_ ) UpperCAmelCase_ : str = np.shape(lowerCamelCase_ ) UpperCAmelCase_ : Dict = data_mat.reshape(1 ,shapes[0] * shapes[1] ) return data_expanded def A__ ( self: str ,lowerCamelCase_: Dict ,lowerCamelCase_: int ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: Any ) -> Union[str, Any]: UpperCAmelCase_ : Any = [] UpperCAmelCase_ : Tuple = 0 for i_map in range(lowerCamelCase_ ): UpperCAmelCase_ : Optional[Any] = np.ones((size_map, size_map) ) for i in range(0 ,lowerCamelCase_ ,lowerCamelCase_ ): for j in range(0 ,lowerCamelCase_ ,lowerCamelCase_ ): UpperCAmelCase_ : Any = pd_pool[ i_pool ] UpperCAmelCase_ : List[str] = i_pool + 1 UpperCAmelCase_ : Optional[Any] = np.multiply( lowerCamelCase_ ,np.multiply(out_map[i_map] ,(1 - out_map[i_map]) ) ) pd_all.append(lowerCamelCase_ ) return pd_all def A__ ( self: str ,lowerCamelCase_: int ,lowerCamelCase_: int ,lowerCamelCase_: List[Any] ,lowerCamelCase_: Any ,lowerCamelCase_: List[str] ,lowerCamelCase_: Any=bool ) -> Optional[int]: # model traning print("""----------------------Start Training-------------------------""" ) print((""" - - Shape: Train_Data """, np.shape(lowerCamelCase_ )) ) print((""" - - Shape: Teach_Data """, np.shape(lowerCamelCase_ )) ) UpperCAmelCase_ : str = 0 UpperCAmelCase_ : Tuple = [] UpperCAmelCase_ : Any = 10000 while rp < n_repeat and mse >= error_accuracy: UpperCAmelCase_ : List[str] = 0 print(F'''-------------Learning Time {rp}--------------''' ) for p in range(len(lowerCamelCase_ ) ): # print('------------Learning Image: %d--------------'%p) UpperCAmelCase_ : str = np.asmatrix(datas_train[p] ) UpperCAmelCase_ : Optional[Any] = np.asarray(datas_teach[p] ) UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = self.convolute( lowerCamelCase_ ,self.conva ,self.w_conva ,self.thre_conva ,conv_step=self.step_conva ,) UpperCAmelCase_ : List[Any] = self.pooling(lowerCamelCase_ ,self.size_poolinga ) UpperCAmelCase_ : int = np.shape(lowerCamelCase_ ) UpperCAmelCase_ : Dict = self._expand(lowerCamelCase_ ) UpperCAmelCase_ : Union[str, Any] = data_bp_input UpperCAmelCase_ : Optional[Any] = np.dot(lowerCamelCase_ ,self.vji.T ) - self.thre_bpa UpperCAmelCase_ : int = self.sig(lowerCamelCase_ ) UpperCAmelCase_ : Union[str, Any] = np.dot(lowerCamelCase_ ,self.wkj.T ) - self.thre_bpa UpperCAmelCase_ : Optional[Any] = self.sig(lowerCamelCase_ ) # --------------Model Leaning ------------------------ # calculate error and gradient--------------- UpperCAmelCase_ : List[str] = np.multiply( (data_teach - bp_outa) ,np.multiply(lowerCamelCase_ ,(1 - bp_outa) ) ) UpperCAmelCase_ : List[Any] = np.multiply( np.dot(lowerCamelCase_ ,self.wkj ) ,np.multiply(lowerCamelCase_ ,(1 - bp_outa) ) ) UpperCAmelCase_ : Any = np.dot(lowerCamelCase_ ,self.vji ) UpperCAmelCase_ : Tuple = pd_i_all / (self.size_poolinga * self.size_poolinga) UpperCAmelCase_ : List[str] = pd_conva_pooled.T.getA().tolist() UpperCAmelCase_ : str = self._calculate_gradient_from_pool( lowerCamelCase_ ,lowerCamelCase_ ,shape_featuremapa[0] ,shape_featuremapa[1] ,self.size_poolinga ,) # weight and threshold learning process--------- # convolution layer for k_conv in range(self.conva[1] ): UpperCAmelCase_ : List[str] = self._expand_mat(pd_conva_all[k_conv] ) UpperCAmelCase_ : Optional[Any] = self.rate_weight * np.dot(lowerCamelCase_ ,lowerCamelCase_ ) UpperCAmelCase_ : int = self.w_conva[k_conv] + delta_w.reshape( (self.conva[0], self.conva[0]) ) UpperCAmelCase_ : str = ( self.thre_conva[k_conv] - np.sum(pd_conva_all[k_conv] ) * self.rate_thre ) # all connected layer UpperCAmelCase_ : int = self.wkj + pd_k_all.T * bp_outa * self.rate_weight UpperCAmelCase_ : Tuple = self.vji + pd_j_all.T * bp_outa * self.rate_weight UpperCAmelCase_ : int = self.thre_bpa - pd_k_all * self.rate_thre UpperCAmelCase_ : str = self.thre_bpa - pd_j_all * self.rate_thre # calculate the sum error of all single image UpperCAmelCase_ : int = np.sum(abs(data_teach - bp_outa ) ) error_count += errors # print(' ----Teach ',data_teach) # print(' ----BP_output ',bp_out3) UpperCAmelCase_ : int = rp + 1 UpperCAmelCase_ : Any = error_count / patterns all_mse.append(lowerCamelCase_ ) def draw_error(): UpperCAmelCase_ : Any = [error_accuracy for i in range(int(n_repeat * 1.2 ) )] plt.plot(lowerCamelCase_ ,"""+-""" ) plt.plot(lowerCamelCase_ ,"""r--""" ) plt.xlabel("""Learning Times""" ) plt.ylabel("""All_mse""" ) plt.grid(lowerCamelCase_ ,alpha=0.5 ) plt.show() print("""------------------Training Complished---------------------""" ) print((""" - - Training epoch: """, rp, F''' - - Mse: {mse:.6f}''') ) if draw_e: draw_error() return mse def A__ ( self: Optional[int] ,lowerCamelCase_: Any ) -> Tuple: # model predict UpperCAmelCase_ : Union[str, Any] = [] print("""-------------------Start Testing-------------------------""" ) print((""" - - Shape: Test_Data """, np.shape(lowerCamelCase_ )) ) for p in range(len(lowerCamelCase_ ) ): UpperCAmelCase_ : int = np.asmatrix(datas_test[p] ) UpperCAmelCase_ , UpperCAmelCase_ : List[str] = self.convolute( lowerCamelCase_ ,self.conva ,self.w_conva ,self.thre_conva ,conv_step=self.step_conva ,) UpperCAmelCase_ : Optional[Any] = self.pooling(lowerCamelCase_ ,self.size_poolinga ) UpperCAmelCase_ : str = self._expand(lowerCamelCase_ ) UpperCAmelCase_ : str = data_bp_input UpperCAmelCase_ : Union[str, Any] = bp_outa * self.vji.T - self.thre_bpa UpperCAmelCase_ : Optional[int] = self.sig(lowerCamelCase_ ) UpperCAmelCase_ : Tuple = bp_outa * self.wkj.T - self.thre_bpa UpperCAmelCase_ : List[Any] = self.sig(lowerCamelCase_ ) produce_out.extend(bp_outa.getA().tolist() ) UpperCAmelCase_ : int = [list(map(self.do_round ,lowerCamelCase_ ) ) for each in produce_out] return np.asarray(lowerCamelCase_ ) def A__ ( self: Optional[Any] ,lowerCamelCase_: Dict ) -> Tuple: # return the data of image after convoluting process so we can check it out UpperCAmelCase_ : Optional[int] = np.asmatrix(lowerCamelCase_ ) UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = self.convolute( lowerCamelCase_ ,self.conva ,self.w_conva ,self.thre_conva ,conv_step=self.step_conva ,) UpperCAmelCase_ : Dict = self.pooling(lowerCamelCase_ ,self.size_poolinga ) return data_conveda, data_pooleda if __name__ == "__main__": pass
345
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) UpperCamelCase_ = { '''configuration_roformer''': ['''ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''RoFormerConfig''', '''RoFormerOnnxConfig'''], '''tokenization_roformer''': ['''RoFormerTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase_ = ['''RoFormerTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase_ = [ '''ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''RoFormerForCausalLM''', '''RoFormerForMaskedLM''', '''RoFormerForMultipleChoice''', '''RoFormerForQuestionAnswering''', '''RoFormerForSequenceClassification''', '''RoFormerForTokenClassification''', '''RoFormerLayer''', '''RoFormerModel''', '''RoFormerPreTrainedModel''', '''load_tf_weights_in_roformer''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase_ = [ '''TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFRoFormerForCausalLM''', '''TFRoFormerForMaskedLM''', '''TFRoFormerForMultipleChoice''', '''TFRoFormerForQuestionAnswering''', '''TFRoFormerForSequenceClassification''', '''TFRoFormerForTokenClassification''', '''TFRoFormerLayer''', '''TFRoFormerModel''', '''TFRoFormerPreTrainedModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase_ = [ '''FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''FlaxRoFormerForMaskedLM''', '''FlaxRoFormerForMultipleChoice''', '''FlaxRoFormerForQuestionAnswering''', '''FlaxRoFormerForSequenceClassification''', '''FlaxRoFormerForTokenClassification''', '''FlaxRoFormerModel''', '''FlaxRoFormerPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig from .tokenization_roformer import RoFormerTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_roformer_fast import RoFormerTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_roformer import ( ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, RoFormerForCausalLM, RoFormerForMaskedLM, RoFormerForMultipleChoice, RoFormerForQuestionAnswering, RoFormerForSequenceClassification, RoFormerForTokenClassification, RoFormerLayer, RoFormerModel, RoFormerPreTrainedModel, load_tf_weights_in_roformer, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_roformer import ( TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TFRoFormerForCausalLM, TFRoFormerForMaskedLM, TFRoFormerForMultipleChoice, TFRoFormerForQuestionAnswering, TFRoFormerForSequenceClassification, TFRoFormerForTokenClassification, TFRoFormerLayer, TFRoFormerModel, TFRoFormerPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_roformer import ( FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, FlaxRoFormerForMaskedLM, FlaxRoFormerForMultipleChoice, FlaxRoFormerForQuestionAnswering, FlaxRoFormerForSequenceClassification, FlaxRoFormerForTokenClassification, FlaxRoFormerModel, FlaxRoFormerPreTrainedModel, ) else: import sys UpperCamelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
345
import json import os import unittest from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer from ...test_tokenization_common import TokenizerTesterMixin class _snake_case ( __snake_case , unittest.TestCase ): '''simple docstring''' A__ : Optional[Any] = CTRLTokenizer A__ : Optional[Any] = False A__ : str = False def A__ ( self: Optional[int] ) -> List[Any]: super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt UpperCAmelCase_ : Dict = ["""adapt""", """re@@""", """a@@""", """apt""", """c@@""", """t""", """<unk>"""] UpperCAmelCase_ : Union[str, Any] = dict(zip(lowerCamelCase_ ,range(len(lowerCamelCase_ ) ) ) ) UpperCAmelCase_ : List[Any] = ["""#version: 0.2""", """a p""", """ap t</w>""", """r e""", """a d""", """ad apt</w>""", """"""] UpperCAmelCase_ : Optional[Any] = {"""unk_token""": """<unk>"""} UpperCAmelCase_ : Union[str, Any] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""vocab_file"""] ) UpperCAmelCase_ : Optional[Any] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""merges_file"""] ) with open(self.vocab_file ,"""w""" ,encoding="""utf-8""" ) as fp: fp.write(json.dumps(lowerCamelCase_ ) + """\n""" ) with open(self.merges_file ,"""w""" ,encoding="""utf-8""" ) as fp: fp.write("""\n""".join(lowerCamelCase_ ) ) def A__ ( self: Optional[int] ,**lowerCamelCase_: Any ) -> str: kwargs.update(self.special_tokens_map ) return CTRLTokenizer.from_pretrained(self.tmpdirname ,**lowerCamelCase_ ) def A__ ( self: int ,lowerCamelCase_: int ) -> str: UpperCAmelCase_ : List[str] = """adapt react readapt apt""" UpperCAmelCase_ : List[Any] = """adapt react readapt apt""" return input_text, output_text def A__ ( self: Union[str, Any] ) -> Optional[int]: UpperCAmelCase_ : Union[str, Any] = CTRLTokenizer(self.vocab_file ,self.merges_file ,**self.special_tokens_map ) UpperCAmelCase_ : List[Any] = """adapt react readapt apt""" UpperCAmelCase_ : Optional[int] = """adapt re@@ a@@ c@@ t re@@ adapt apt""".split() UpperCAmelCase_ : Tuple = tokenizer.tokenize(lowerCamelCase_ ) self.assertListEqual(lowerCamelCase_ ,lowerCamelCase_ ) UpperCAmelCase_ : Union[str, Any] = tokens + [tokenizer.unk_token] UpperCAmelCase_ : List[str] = [0, 1, 2, 4, 5, 1, 0, 3, 6] self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase_ ) ,lowerCamelCase_ )
345
1
import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch if is_torch_available(): import torch from transformers.activations import gelu_new, gelu_python, get_activation @require_torch class _snake_case ( unittest.TestCase ): '''simple docstring''' def A__ ( self: Optional[int] ) -> Any: UpperCAmelCase_ : List[Any] = torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100] ) UpperCAmelCase_ : List[str] = get_activation("""gelu""" ) self.assertTrue(torch.allclose(gelu_python(lowerCamelCase_ ) ,torch_builtin(lowerCamelCase_ ) ) ) self.assertFalse(torch.allclose(gelu_python(lowerCamelCase_ ) ,gelu_new(lowerCamelCase_ ) ) ) def A__ ( self: int ) -> Optional[int]: UpperCAmelCase_ : Any = torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100] ) UpperCAmelCase_ : Tuple = get_activation("""gelu""" ) UpperCAmelCase_ : Union[str, Any] = get_activation("""gelu_10""" ) UpperCAmelCase_ : Tuple = torch_builtin(lowerCamelCase_ ) UpperCAmelCase_ : Union[str, Any] = geluaa(lowerCamelCase_ ) UpperCAmelCase_ : Optional[Any] = torch.where(y_gelu_aa < 1_0.0 ,1 ,0 ) self.assertTrue(torch.max(lowerCamelCase_ ).item() == 1_0.0 ) self.assertTrue(torch.allclose(y_gelu * clipped_mask ,y_gelu_aa * clipped_mask ) ) def A__ ( self: Tuple ) -> List[str]: get_activation("""gelu""" ) get_activation("""gelu_10""" ) get_activation("""gelu_fast""" ) get_activation("""gelu_new""" ) get_activation("""gelu_python""" ) get_activation("""gelu_pytorch_tanh""" ) get_activation("""linear""" ) get_activation("""mish""" ) get_activation("""quick_gelu""" ) get_activation("""relu""" ) get_activation("""sigmoid""" ) get_activation("""silu""" ) get_activation("""swish""" ) get_activation("""tanh""" ) with self.assertRaises(lowerCamelCase_ ): get_activation("""bogus""" ) with self.assertRaises(lowerCamelCase_ ): get_activation(lowerCamelCase_ ) def A__ ( self: Union[str, Any] ) -> List[Any]: UpperCAmelCase_ : str = get_activation("""gelu""" ) UpperCAmelCase_ : Optional[Any] = 1 UpperCAmelCase_ : Optional[int] = get_activation("""gelu""" ) self.assertEqual(acta.a ,1 ) with self.assertRaises(lowerCamelCase_ ): UpperCAmelCase_ : Optional[Any] = acta.a
345
from __future__ import annotations from typing import Dict from ...configuration_utils import PretrainedConfig UpperCamelCase_ = { '''susnato/ernie-m-base_pytorch''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/config.json''', '''susnato/ernie-m-large_pytorch''': '''https://huggingface.co/susnato/ernie-m-large_pytorch/blob/main/config.json''', } class _snake_case ( __snake_case ): '''simple docstring''' A__ : Union[str, Any] = "ernie_m" A__ : Dict[str, str] = {"dropout": "classifier_dropout", "num_classes": "num_labels"} def __init__( self: str ,lowerCamelCase_: int = 250002 ,lowerCamelCase_: int = 768 ,lowerCamelCase_: int = 12 ,lowerCamelCase_: int = 12 ,lowerCamelCase_: int = 3072 ,lowerCamelCase_: str = "gelu" ,lowerCamelCase_: float = 0.1 ,lowerCamelCase_: float = 0.1 ,lowerCamelCase_: int = 514 ,lowerCamelCase_: float = 0.0_2 ,lowerCamelCase_: int = 1 ,lowerCamelCase_: float = 1e-05 ,lowerCamelCase_: Any=None ,lowerCamelCase_: List[Any]=False ,lowerCamelCase_: Tuple=0.0 ,**lowerCamelCase_: Optional[int] ,) -> Optional[Any]: super().__init__(pad_token_id=lowerCamelCase_ ,**lowerCamelCase_ ) UpperCAmelCase_ : Optional[Any] = vocab_size UpperCAmelCase_ : Any = hidden_size UpperCAmelCase_ : Optional[Any] = num_hidden_layers UpperCAmelCase_ : Union[str, Any] = num_attention_heads UpperCAmelCase_ : List[Any] = intermediate_size UpperCAmelCase_ : List[Any] = hidden_act UpperCAmelCase_ : Any = hidden_dropout_prob UpperCAmelCase_ : List[Any] = attention_probs_dropout_prob UpperCAmelCase_ : str = max_position_embeddings UpperCAmelCase_ : Union[str, Any] = initializer_range UpperCAmelCase_ : Union[str, Any] = layer_norm_eps UpperCAmelCase_ : List[Any] = classifier_dropout UpperCAmelCase_ : str = is_decoder UpperCAmelCase_ : List[str] = act_dropout
345
1
from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices UpperCamelCase_ = logging.get_logger(__name__) UpperCamelCase_ = { '''shi-labs/nat-mini-in1k-224''': '''https://huggingface.co/shi-labs/nat-mini-in1k-224/resolve/main/config.json''', # See all Nat models at https://huggingface.co/models?filter=nat } class _snake_case ( __snake_case , __snake_case ): '''simple docstring''' A__ : Tuple = "nat" A__ : int = { "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers", } def __init__( self: List[Any] ,lowerCamelCase_: Optional[Any]=4 ,lowerCamelCase_: Optional[int]=3 ,lowerCamelCase_: List[Any]=64 ,lowerCamelCase_: Optional[int]=[3, 4, 6, 5] ,lowerCamelCase_: List[str]=[2, 4, 8, 16] ,lowerCamelCase_: Tuple=7 ,lowerCamelCase_: str=3.0 ,lowerCamelCase_: Optional[Any]=True ,lowerCamelCase_: int=0.0 ,lowerCamelCase_: List[str]=0.0 ,lowerCamelCase_: Dict=0.1 ,lowerCamelCase_: Union[str, Any]="gelu" ,lowerCamelCase_: Any=0.0_2 ,lowerCamelCase_: List[str]=1e-5 ,lowerCamelCase_: int=0.0 ,lowerCamelCase_: Tuple=None ,lowerCamelCase_: List[str]=None ,**lowerCamelCase_: Optional[int] ,) -> Union[str, Any]: super().__init__(**lowerCamelCase_ ) UpperCAmelCase_ : Tuple = patch_size UpperCAmelCase_ : Optional[Any] = num_channels UpperCAmelCase_ : List[str] = embed_dim UpperCAmelCase_ : str = depths UpperCAmelCase_ : int = len(lowerCamelCase_ ) UpperCAmelCase_ : Optional[Any] = num_heads UpperCAmelCase_ : Optional[Any] = kernel_size UpperCAmelCase_ : Dict = mlp_ratio UpperCAmelCase_ : Dict = qkv_bias UpperCAmelCase_ : List[str] = hidden_dropout_prob UpperCAmelCase_ : Any = attention_probs_dropout_prob UpperCAmelCase_ : Tuple = drop_path_rate UpperCAmelCase_ : Tuple = hidden_act UpperCAmelCase_ : int = layer_norm_eps UpperCAmelCase_ : List[str] = initializer_range # we set the hidden_size attribute in order to make Nat work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model UpperCAmelCase_ : Tuple = int(embed_dim * 2 ** (len(lowerCamelCase_ ) - 1) ) UpperCAmelCase_ : Dict = layer_scale_init_value UpperCAmelCase_ : Optional[int] = ["""stem"""] + [F'''stage{idx}''' for idx in range(1 ,len(lowerCamelCase_ ) + 1 )] UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = get_aligned_output_features_output_indices( out_features=lowerCamelCase_ ,out_indices=lowerCamelCase_ ,stage_names=self.stage_names )
345
import logging import os from dataclasses import dataclass, field from functools import partial from pathlib import Path from tempfile import TemporaryDirectory from typing import List, Optional import faiss import torch from datasets import Features, Sequence, Value, load_dataset from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser UpperCamelCase_ = logging.getLogger(__name__) torch.set_grad_enabled(False) UpperCamelCase_ = '''cuda''' if torch.cuda.is_available() else '''cpu''' def lowerCamelCase_ ( _a : str , _a : Any=100 , _a : int=" " ): '''simple docstring''' UpperCAmelCase_ : Optional[int] = text.split(_a ) return [character.join(text[i : i + n] ).strip() for i in range(0 , len(_a ) , _a )] def lowerCamelCase_ ( _a : dict ): '''simple docstring''' UpperCAmelCase_ , UpperCAmelCase_ : Dict = [], [] for title, text in zip(documents["""title"""] , documents["""text"""] ): if text is not None: for passage in split_text(_a ): titles.append(title if title is not None else """""" ) texts.append(_a ) return {"title": titles, "text": texts} def lowerCamelCase_ ( _a : dict , _a : DPRContextEncoder , _a : DPRContextEncoderTokenizerFast ): '''simple docstring''' UpperCAmelCase_ : List[str] = ctx_tokenizer( documents["""title"""] , documents["""text"""] , truncation=_a , padding="""longest""" , return_tensors="""pt""" )["""input_ids"""] UpperCAmelCase_ : Tuple = ctx_encoder(input_ids.to(device=_a ) , return_dict=_a ).pooler_output return {"embeddings": embeddings.detach().cpu().numpy()} def lowerCamelCase_ ( _a : "RagExampleArguments" , _a : "ProcessingArguments" , _a : "IndexHnswArguments" , ): '''simple docstring''' logger.info("""Step 1 - Create the dataset""" ) ###################################### # The dataset needed for RAG must have three columns: # - title (string): title of the document # - text (string): text of a passage of the document # - embeddings (array of dimension d): DPR representation of the passage # Let's say you have documents in tab-separated csv files with columns "title" and "text" assert os.path.isfile(rag_example_args.csv_path ), "Please provide a valid path to a csv file" # You can load a Dataset object this way UpperCAmelCase_ : Optional[int] = load_dataset( """csv""" , data_files=[rag_example_args.csv_path] , split="""train""" , delimiter="""\t""" , column_names=["""title""", """text"""] ) # More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files # Then split the documents into passages of 100 words UpperCAmelCase_ : Tuple = dataset.map(_a , batched=_a , num_proc=processing_args.num_proc ) # And compute the embeddings UpperCAmelCase_ : List[str] = DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ).to(device=_a ) UpperCAmelCase_ : Dict = DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ) UpperCAmelCase_ : Any = Features( {"""text""": Value("""string""" ), """title""": Value("""string""" ), """embeddings""": Sequence(Value("""float32""" ) )} ) # optional, save as float32 instead of float64 to save space UpperCAmelCase_ : List[str] = dataset.map( partial(_a , ctx_encoder=_a , ctx_tokenizer=_a ) , batched=_a , batch_size=processing_args.batch_size , features=_a , ) # And finally save your dataset UpperCAmelCase_ : Union[str, Any] = os.path.join(rag_example_args.output_dir , """my_knowledge_dataset""" ) dataset.save_to_disk(_a ) # from datasets import load_from_disk # dataset = load_from_disk(passages_path) # to reload the dataset ###################################### logger.info("""Step 2 - Index the dataset""" ) ###################################### # Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search UpperCAmelCase_ : Union[str, Any] = faiss.IndexHNSWFlat(index_hnsw_args.d , index_hnsw_args.m , faiss.METRIC_INNER_PRODUCT ) dataset.add_faiss_index("""embeddings""" , custom_index=_a ) # And save the index UpperCAmelCase_ : Optional[Any] = os.path.join(rag_example_args.output_dir , """my_knowledge_dataset_hnsw_index.faiss""" ) dataset.get_index("""embeddings""" ).save(_a ) # dataset.load_faiss_index("embeddings", index_path) # to reload the index @dataclass class _snake_case : '''simple docstring''' A__ : str = field( default=str(Path(__snake_case ).parent / "test_run" / "dummy-kb" / "my_knowledge_dataset.csv" ) , metadata={"help": "Path to a tab-separated csv file with columns 'title' and 'text'"} , ) A__ : Optional[str] = field( default=__snake_case , metadata={"help": "Question that is passed as input to RAG. Default is 'What does Moses' rod turn into ?'."} , ) A__ : str = field( default="facebook/rag-sequence-nq" , metadata={"help": "The RAG model to use. Either 'facebook/rag-sequence-nq' or 'facebook/rag-token-nq'"} , ) A__ : str = field( default="facebook/dpr-ctx_encoder-multiset-base" , metadata={ "help": ( "The DPR context encoder model to use. Either 'facebook/dpr-ctx_encoder-single-nq-base' or" " 'facebook/dpr-ctx_encoder-multiset-base'" ) } , ) A__ : Optional[str] = field( default=str(Path(__snake_case ).parent / "test_run" / "dummy-kb" ) , metadata={"help": "Path to a directory where the dataset passages and the index will be saved"} , ) @dataclass class _snake_case : '''simple docstring''' A__ : Optional[int] = field( default=__snake_case , metadata={ "help": "The number of processes to use to split the documents into passages. Default is single process." } , ) A__ : int = field( default=16 , metadata={ "help": "The batch size to use when computing the passages embeddings using the DPR context encoder." } , ) @dataclass class _snake_case : '''simple docstring''' A__ : int = field( default=768 , metadata={"help": "The dimension of the embeddings to pass to the HNSW Faiss index."} , ) A__ : int = field( default=128 , metadata={ "help": ( "The number of bi-directional links created for every new element during the HNSW index construction." ) } , ) if __name__ == "__main__": logging.basicConfig(level=logging.WARNING) logger.setLevel(logging.INFO) UpperCamelCase_ = HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments)) UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ = parser.parse_args_into_dataclasses() with TemporaryDirectory() as tmp_dir: UpperCamelCase_ = rag_example_args.output_dir or tmp_dir main(rag_example_args, processing_args, index_hnsw_args)
345
1
# NOTE: This file is deprecated and will be removed in a future version. # It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works from ...utils import deprecate from ..controlnet.pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline # noqa: F401 deprecate( '''stable diffusion controlnet''', '''0.22.0''', '''Importing `FlaxStableDiffusionControlNetPipeline` from diffusers.pipelines.stable_diffusion.flax_pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import FlaxStableDiffusionControlNetPipeline` instead.''', standard_warn=False, stacklevel=3, )
345
import gc import unittest import torch from parameterized import parameterized from diffusers import AutoencoderKL from diffusers.utils import floats_tensor, load_hf_numpy, require_torch_gpu, slow, torch_all_close, torch_device from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import enable_full_determinism from .test_modeling_common import ModelTesterMixin, UNetTesterMixin enable_full_determinism() class _snake_case ( __snake_case , __snake_case , unittest.TestCase ): '''simple docstring''' A__ : Dict = AutoencoderKL A__ : Optional[int] = "sample" A__ : Tuple = 1E-2 @property def A__ ( self: List[Any] ) -> Union[str, Any]: UpperCAmelCase_ : Tuple = 4 UpperCAmelCase_ : str = 3 UpperCAmelCase_ : Any = (32, 32) UpperCAmelCase_ : Optional[int] = floats_tensor((batch_size, num_channels) + sizes ).to(lowerCamelCase_ ) return {"sample": image} @property def A__ ( self: List[str] ) -> Tuple: return (3, 32, 32) @property def A__ ( self: Optional[Any] ) -> Any: return (3, 32, 32) def A__ ( self: Any ) -> Tuple: UpperCAmelCase_ : List[Any] = { """block_out_channels""": [32, 64], """in_channels""": 3, """out_channels""": 3, """down_block_types""": ["""DownEncoderBlock2D""", """DownEncoderBlock2D"""], """up_block_types""": ["""UpDecoderBlock2D""", """UpDecoderBlock2D"""], """latent_channels""": 4, } UpperCAmelCase_ : int = self.dummy_input return init_dict, inputs_dict def A__ ( self: Optional[Any] ) -> int: pass def A__ ( self: str ) -> Any: pass @unittest.skipIf(torch_device == """mps""" ,"""Gradient checkpointing skipped on MPS""" ) def A__ ( self: Union[str, Any] ) -> Dict: # enable deterministic behavior for gradient checkpointing UpperCAmelCase_ , UpperCAmelCase_ : List[str] = self.prepare_init_args_and_inputs_for_common() UpperCAmelCase_ : List[Any] = self.model_class(**lowerCamelCase_ ) model.to(lowerCamelCase_ ) assert not model.is_gradient_checkpointing and model.training UpperCAmelCase_ : Optional[Any] = model(**lowerCamelCase_ ).sample # run the backwards pass on the model. For backwards pass, for simplicity purpose, # we won't calculate the loss and rather backprop on out.sum() model.zero_grad() UpperCAmelCase_ : Any = torch.randn_like(lowerCamelCase_ ) UpperCAmelCase_ : Optional[int] = (out - labels).mean() loss.backward() # re-instantiate the model now enabling gradient checkpointing UpperCAmelCase_ : str = self.model_class(**lowerCamelCase_ ) # clone model model_a.load_state_dict(model.state_dict() ) model_a.to(lowerCamelCase_ ) model_a.enable_gradient_checkpointing() assert model_a.is_gradient_checkpointing and model_a.training UpperCAmelCase_ : Optional[int] = model_a(**lowerCamelCase_ ).sample # run the backwards pass on the model. For backwards pass, for simplicity purpose, # we won't calculate the loss and rather backprop on out.sum() model_a.zero_grad() UpperCAmelCase_ : Dict = (out_a - labels).mean() loss_a.backward() # compare the output and parameters gradients self.assertTrue((loss - loss_a).abs() < 1e-5 ) UpperCAmelCase_ : Dict = dict(model.named_parameters() ) UpperCAmelCase_ : Union[str, Any] = dict(model_a.named_parameters() ) for name, param in named_params.items(): self.assertTrue(torch_all_close(param.grad.data ,named_params_a[name].grad.data ,atol=5e-5 ) ) def A__ ( self: Optional[Any] ) -> str: UpperCAmelCase_ , UpperCAmelCase_ : int = AutoencoderKL.from_pretrained("""fusing/autoencoder-kl-dummy""" ,output_loading_info=lowerCamelCase_ ) self.assertIsNotNone(lowerCamelCase_ ) self.assertEqual(len(loading_info["""missing_keys"""] ) ,0 ) model.to(lowerCamelCase_ ) UpperCAmelCase_ : Dict = model(**self.dummy_input ) assert image is not None, "Make sure output is not None" def A__ ( self: Optional[int] ) -> int: UpperCAmelCase_ : Dict = AutoencoderKL.from_pretrained("""fusing/autoencoder-kl-dummy""" ) UpperCAmelCase_ : Tuple = model.to(lowerCamelCase_ ) model.eval() if torch_device == "mps": UpperCAmelCase_ : Tuple = torch.manual_seed(0 ) else: UpperCAmelCase_ : Optional[int] = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 ) UpperCAmelCase_ : str = torch.randn( 1 ,model.config.in_channels ,model.config.sample_size ,model.config.sample_size ,generator=torch.manual_seed(0 ) ,) UpperCAmelCase_ : int = image.to(lowerCamelCase_ ) with torch.no_grad(): UpperCAmelCase_ : Dict = model(lowerCamelCase_ ,sample_posterior=lowerCamelCase_ ,generator=lowerCamelCase_ ).sample UpperCAmelCase_ : Optional[int] = output[0, -1, -3:, -3:].flatten().cpu() # Since the VAE Gaussian prior's generator is seeded on the appropriate device, # the expected output slices are not the same for CPU and GPU. if torch_device == "mps": UpperCAmelCase_ : Tuple = torch.tensor( [ -4.0078e-01, -3.8323e-04, -1.2681e-01, -1.1462e-01, 2.0095e-01, 1.0893e-01, -8.8247e-02, -3.0361e-01, -9.8644e-03, ] ) elif torch_device == "cpu": UpperCAmelCase_ : List[str] = torch.tensor( [-0.1_3_5_2, 0.0_8_7_8, 0.0_4_1_9, -0.0_8_1_8, -0.1_0_6_9, 0.0_6_8_8, -0.1_4_5_8, -0.4_4_4_6, -0.0_0_2_6] ) else: UpperCAmelCase_ : List[str] = torch.tensor( [-0.2_4_2_1, 0.4_6_4_2, 0.2_5_0_7, -0.0_4_3_8, 0.0_6_8_2, 0.3_1_6_0, -0.2_0_1_8, -0.0_7_2_7, 0.2_4_8_5] ) self.assertTrue(torch_all_close(lowerCamelCase_ ,lowerCamelCase_ ,rtol=1e-2 ) ) @slow class _snake_case ( unittest.TestCase ): '''simple docstring''' def A__ ( self: Any ,lowerCamelCase_: List[Any] ,lowerCamelCase_: Any ) -> Optional[Any]: return F'''gaussian_noise_s={seed}_shape={'_'.join([str(lowerCamelCase_ ) for s in shape] )}.npy''' def A__ ( self: Union[str, Any] ) -> Optional[int]: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def A__ ( self: List[str] ,lowerCamelCase_: Optional[int]=0 ,lowerCamelCase_: List[Any]=(4, 3, 512, 512) ,lowerCamelCase_: Optional[Any]=False ) -> Optional[int]: UpperCAmelCase_ : Tuple = torch.floataa if fpaa else torch.floataa UpperCAmelCase_ : Tuple = torch.from_numpy(load_hf_numpy(self.get_file_format(lowerCamelCase_ ,lowerCamelCase_ ) ) ).to(lowerCamelCase_ ).to(lowerCamelCase_ ) return image def A__ ( self: List[Any] ,lowerCamelCase_: List[str]="CompVis/stable-diffusion-v1-4" ,lowerCamelCase_: Union[str, Any]=False ) -> Any: UpperCAmelCase_ : Optional[Any] = """fp16""" if fpaa else None UpperCAmelCase_ : str = torch.floataa if fpaa else torch.floataa UpperCAmelCase_ : int = AutoencoderKL.from_pretrained( lowerCamelCase_ ,subfolder="""vae""" ,torch_dtype=lowerCamelCase_ ,revision=lowerCamelCase_ ,) model.to(lowerCamelCase_ ).eval() return model def A__ ( self: Dict ,lowerCamelCase_: Union[str, Any]=0 ) -> Optional[int]: if torch_device == "mps": return torch.manual_seed(lowerCamelCase_ ) return torch.Generator(device=lowerCamelCase_ ).manual_seed(lowerCamelCase_ ) @parameterized.expand( [ # fmt: off [33, [-0.1_6_0_3, 0.9_8_7_8, -0.0_4_9_5, -0.0_7_9_0, -0.2_7_0_9, 0.8_3_7_5, -0.2_0_6_0, -0.0_8_2_4], [-0.2_3_9_5, 0.0_0_9_8, 0.0_1_0_2, -0.0_7_0_9, -0.2_8_4_0, -0.0_2_7_4, -0.0_7_1_8, -0.1_8_2_4]], [47, [-0.2_3_7_6, 0.1_1_6_8, 0.1_3_3_2, -0.4_8_4_0, -0.2_5_0_8, -0.0_7_9_1, -0.0_4_9_3, -0.4_0_8_9], [0.0_3_5_0, 0.0_8_4_7, 0.0_4_6_7, 0.0_3_4_4, -0.0_8_4_2, -0.0_5_4_7, -0.0_6_3_3, -0.1_1_3_1]], # fmt: on ] ) def A__ ( self: List[Any] ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: str ,lowerCamelCase_: Dict ) -> Tuple: UpperCAmelCase_ : List[Any] = self.get_sd_vae_model() UpperCAmelCase_ : int = self.get_sd_image(lowerCamelCase_ ) UpperCAmelCase_ : Optional[int] = self.get_generator(lowerCamelCase_ ) with torch.no_grad(): UpperCAmelCase_ : Union[str, Any] = model(lowerCamelCase_ ,generator=lowerCamelCase_ ,sample_posterior=lowerCamelCase_ ).sample assert sample.shape == image.shape UpperCAmelCase_ : Optional[Any] = sample[-1, -2:, -2:, :2].flatten().float().cpu() UpperCAmelCase_ : Tuple = torch.tensor(expected_slice_mps if torch_device == """mps""" else expected_slice ) assert torch_all_close(lowerCamelCase_ ,lowerCamelCase_ ,atol=3e-3 ) @parameterized.expand( [ # fmt: off [33, [-0.0_5_1_3, 0.0_2_8_9, 1.3_7_9_9, 0.2_1_6_6, -0.2_5_7_3, -0.0_8_7_1, 0.5_1_0_3, -0.0_9_9_9]], [47, [-0.4_1_2_8, -0.1_3_2_0, -0.3_7_0_4, 0.1_9_6_5, -0.4_1_1_6, -0.2_3_3_2, -0.3_3_4_0, 0.2_2_4_7]], # fmt: on ] ) @require_torch_gpu def A__ ( self: Union[str, Any] ,lowerCamelCase_: Any ,lowerCamelCase_: List[str] ) -> Tuple: UpperCAmelCase_ : List[str] = self.get_sd_vae_model(fpaa=lowerCamelCase_ ) UpperCAmelCase_ : Any = self.get_sd_image(lowerCamelCase_ ,fpaa=lowerCamelCase_ ) UpperCAmelCase_ : Union[str, Any] = self.get_generator(lowerCamelCase_ ) with torch.no_grad(): UpperCAmelCase_ : Union[str, Any] = model(lowerCamelCase_ ,generator=lowerCamelCase_ ,sample_posterior=lowerCamelCase_ ).sample assert sample.shape == image.shape UpperCAmelCase_ : Tuple = sample[-1, -2:, :2, -2:].flatten().float().cpu() UpperCAmelCase_ : Optional[int] = torch.tensor(lowerCamelCase_ ) assert torch_all_close(lowerCamelCase_ ,lowerCamelCase_ ,atol=1e-2 ) @parameterized.expand( [ # fmt: off [33, [-0.1_6_0_9, 0.9_8_6_6, -0.0_4_8_7, -0.0_7_7_7, -0.2_7_1_6, 0.8_3_6_8, -0.2_0_5_5, -0.0_8_1_4], [-0.2_3_9_5, 0.0_0_9_8, 0.0_1_0_2, -0.0_7_0_9, -0.2_8_4_0, -0.0_2_7_4, -0.0_7_1_8, -0.1_8_2_4]], [47, [-0.2_3_7_7, 0.1_1_4_7, 0.1_3_3_3, -0.4_8_4_1, -0.2_5_0_6, -0.0_8_0_5, -0.0_4_9_1, -0.4_0_8_5], [0.0_3_5_0, 0.0_8_4_7, 0.0_4_6_7, 0.0_3_4_4, -0.0_8_4_2, -0.0_5_4_7, -0.0_6_3_3, -0.1_1_3_1]], # fmt: on ] ) def A__ ( self: Tuple ,lowerCamelCase_: List[Any] ,lowerCamelCase_: Optional[int] ,lowerCamelCase_: List[str] ) -> Dict: UpperCAmelCase_ : Optional[int] = self.get_sd_vae_model() UpperCAmelCase_ : Dict = self.get_sd_image(lowerCamelCase_ ) with torch.no_grad(): UpperCAmelCase_ : str = model(lowerCamelCase_ ).sample assert sample.shape == image.shape UpperCAmelCase_ : List[Any] = sample[-1, -2:, -2:, :2].flatten().float().cpu() UpperCAmelCase_ : Any = torch.tensor(expected_slice_mps if torch_device == """mps""" else expected_slice ) assert torch_all_close(lowerCamelCase_ ,lowerCamelCase_ ,atol=3e-3 ) @parameterized.expand( [ # fmt: off [13, [-0.2_0_5_1, -0.1_8_0_3, -0.2_3_1_1, -0.2_1_1_4, -0.3_2_9_2, -0.3_5_7_4, -0.2_9_5_3, -0.3_3_2_3]], [37, [-0.2_6_3_2, -0.2_6_2_5, -0.2_1_9_9, -0.2_7_4_1, -0.4_5_3_9, -0.4_9_9_0, -0.3_7_2_0, -0.4_9_2_5]], # fmt: on ] ) @require_torch_gpu def A__ ( self: Optional[Any] ,lowerCamelCase_: Tuple ,lowerCamelCase_: str ) -> Optional[Any]: UpperCAmelCase_ : List[str] = self.get_sd_vae_model() UpperCAmelCase_ : Optional[int] = self.get_sd_image(lowerCamelCase_ ,shape=(3, 4, 64, 64) ) with torch.no_grad(): UpperCAmelCase_ : str = model.decode(lowerCamelCase_ ).sample assert list(sample.shape ) == [3, 3, 512, 512] UpperCAmelCase_ : Any = sample[-1, -2:, :2, -2:].flatten().cpu() UpperCAmelCase_ : Union[str, Any] = torch.tensor(lowerCamelCase_ ) assert torch_all_close(lowerCamelCase_ ,lowerCamelCase_ ,atol=1e-3 ) @parameterized.expand( [ # fmt: off [27, [-0.0_3_6_9, 0.0_2_0_7, -0.0_7_7_6, -0.0_6_8_2, -0.1_7_4_7, -0.1_9_3_0, -0.1_4_6_5, -0.2_0_3_9]], [16, [-0.1_6_2_8, -0.2_1_3_4, -0.2_7_4_7, -0.2_6_4_2, -0.3_7_7_4, -0.4_4_0_4, -0.3_6_8_7, -0.4_2_7_7]], # fmt: on ] ) @require_torch_gpu def A__ ( self: str ,lowerCamelCase_: List[Any] ,lowerCamelCase_: Any ) -> Optional[Any]: UpperCAmelCase_ : Dict = self.get_sd_vae_model(fpaa=lowerCamelCase_ ) UpperCAmelCase_ : List[Any] = self.get_sd_image(lowerCamelCase_ ,shape=(3, 4, 64, 64) ,fpaa=lowerCamelCase_ ) with torch.no_grad(): UpperCAmelCase_ : List[str] = model.decode(lowerCamelCase_ ).sample assert list(sample.shape ) == [3, 3, 512, 512] UpperCAmelCase_ : str = sample[-1, -2:, :2, -2:].flatten().float().cpu() UpperCAmelCase_ : str = torch.tensor(lowerCamelCase_ ) assert torch_all_close(lowerCamelCase_ ,lowerCamelCase_ ,atol=5e-3 ) @parameterized.expand([(13,), (16,), (27,)] ) @require_torch_gpu @unittest.skipIf(not is_xformers_available() ,reason="""xformers is not required when using PyTorch 2.0.""" ) def A__ ( self: List[Any] ,lowerCamelCase_: Union[str, Any] ) -> int: UpperCAmelCase_ : Optional[Any] = self.get_sd_vae_model(fpaa=lowerCamelCase_ ) UpperCAmelCase_ : List[str] = self.get_sd_image(lowerCamelCase_ ,shape=(3, 4, 64, 64) ,fpaa=lowerCamelCase_ ) with torch.no_grad(): UpperCAmelCase_ : Optional[Any] = model.decode(lowerCamelCase_ ).sample model.enable_xformers_memory_efficient_attention() with torch.no_grad(): UpperCAmelCase_ : List[str] = model.decode(lowerCamelCase_ ).sample assert list(sample.shape ) == [3, 3, 512, 512] assert torch_all_close(lowerCamelCase_ ,lowerCamelCase_ ,atol=1e-1 ) @parameterized.expand([(13,), (16,), (37,)] ) @require_torch_gpu @unittest.skipIf(not is_xformers_available() ,reason="""xformers is not required when using PyTorch 2.0.""" ) def A__ ( self: Optional[Any] ,lowerCamelCase_: Dict ) -> Union[str, Any]: UpperCAmelCase_ : Tuple = self.get_sd_vae_model() UpperCAmelCase_ : Any = self.get_sd_image(lowerCamelCase_ ,shape=(3, 4, 64, 64) ) with torch.no_grad(): UpperCAmelCase_ : Union[str, Any] = model.decode(lowerCamelCase_ ).sample model.enable_xformers_memory_efficient_attention() with torch.no_grad(): UpperCAmelCase_ : Optional[Any] = model.decode(lowerCamelCase_ ).sample assert list(sample.shape ) == [3, 3, 512, 512] assert torch_all_close(lowerCamelCase_ ,lowerCamelCase_ ,atol=1e-2 ) @parameterized.expand( [ # fmt: off [33, [-0.3_0_0_1, 0.0_9_1_8, -2.6_9_8_4, -3.9_7_2_0, -3.2_0_9_9, -5.0_3_5_3, 1.7_3_3_8, -0.2_0_6_5, 3.4_2_6_7]], [47, [-1.5_0_3_0, -4.3_8_7_1, -6.0_3_5_5, -9.1_1_5_7, -1.6_6_6_1, -2.7_8_5_3, 2.1_6_0_7, -5.0_8_2_3, 2.5_6_3_3]], # fmt: on ] ) def A__ ( self: Union[str, Any] ,lowerCamelCase_: Any ,lowerCamelCase_: Union[str, Any] ) -> Union[str, Any]: UpperCAmelCase_ : Dict = self.get_sd_vae_model() UpperCAmelCase_ : Optional[Any] = self.get_sd_image(lowerCamelCase_ ) UpperCAmelCase_ : str = self.get_generator(lowerCamelCase_ ) with torch.no_grad(): UpperCAmelCase_ : int = model.encode(lowerCamelCase_ ).latent_dist UpperCAmelCase_ : Optional[Any] = dist.sample(generator=lowerCamelCase_ ) assert list(sample.shape ) == [image.shape[0], 4] + [i // 8 for i in image.shape[2:]] UpperCAmelCase_ : Tuple = sample[0, -1, -3:, -3:].flatten().cpu() UpperCAmelCase_ : Optional[Any] = torch.tensor(lowerCamelCase_ ) UpperCAmelCase_ : List[Any] = 3e-3 if torch_device != """mps""" else 1e-2 assert torch_all_close(lowerCamelCase_ ,lowerCamelCase_ ,atol=lowerCamelCase_ )
345
1
from ...utils import is_torch_available, is_transformers_available if is_transformers_available() and is_torch_available(): from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
345
import copy from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import Audio, Features, Value from .base import TaskTemplate @dataclass(frozen=__snake_case ) class _snake_case ( __snake_case ): '''simple docstring''' A__ : str = field(default="automatic-speech-recognition" , metadata={"include_in_asdict_even_if_is_default": True} ) A__ : ClassVar[Features] = Features({"audio": Audio()} ) A__ : ClassVar[Features] = Features({"transcription": Value("string" )} ) A__ : str = "audio" A__ : str = "transcription" def A__ ( self: int ,lowerCamelCase_: Union[str, Any] ) -> Optional[Any]: if self.audio_column not in features: raise ValueError(F'''Column {self.audio_column} is not present in features.''' ) if not isinstance(features[self.audio_column] ,lowerCamelCase_ ): raise ValueError(F'''Column {self.audio_column} is not an Audio type.''' ) UpperCAmelCase_ : Any = copy.deepcopy(self ) UpperCAmelCase_ : Union[str, Any] = self.input_schema.copy() UpperCAmelCase_ : Any = features[self.audio_column] UpperCAmelCase_ : Union[str, Any] = input_schema return task_template @property def A__ ( self: List[str] ) -> Dict[str, str]: return {self.audio_column: "audio", self.transcription_column: "transcription"}
345
1
from math import acos, sin from typing import List, Tuple, Union import numpy as np import torch from PIL import Image from ...models import AutoencoderKL, UNetaDConditionModel from ...schedulers import DDIMScheduler, DDPMScheduler from ...utils import randn_tensor from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput from .mel import Mel class _snake_case ( __snake_case ): '''simple docstring''' A__ : str = ["vqvae"] def __init__( self: int ,lowerCamelCase_: AutoencoderKL ,lowerCamelCase_: UNetaDConditionModel ,lowerCamelCase_: Mel ,lowerCamelCase_: Union[DDIMScheduler, DDPMScheduler] ,) -> Optional[int]: super().__init__() self.register_modules(unet=lowerCamelCase_ ,scheduler=lowerCamelCase_ ,mel=lowerCamelCase_ ,vqvae=lowerCamelCase_ ) def A__ ( self: Optional[int] ) -> int: return 50 if isinstance(self.scheduler ,lowerCamelCase_ ) else 1000 @torch.no_grad() def __call__( self: Optional[Any] ,lowerCamelCase_: int = 1 ,lowerCamelCase_: str = None ,lowerCamelCase_: np.ndarray = None ,lowerCamelCase_: int = 0 ,lowerCamelCase_: int = 0 ,lowerCamelCase_: int = None ,lowerCamelCase_: torch.Generator = None ,lowerCamelCase_: float = 0 ,lowerCamelCase_: float = 0 ,lowerCamelCase_: torch.Generator = None ,lowerCamelCase_: float = 0 ,lowerCamelCase_: torch.Tensor = None ,lowerCamelCase_: torch.Tensor = None ,lowerCamelCase_: Tuple=True ,) -> Union[ Union[AudioPipelineOutput, ImagePipelineOutput], Tuple[List[Image.Image], Tuple[int, List[np.ndarray]]], ]: UpperCAmelCase_ : Union[str, Any] = steps or self.get_default_steps() self.scheduler.set_timesteps(lowerCamelCase_ ) UpperCAmelCase_ : Dict = step_generator or generator # For backwards compatibility if type(self.unet.config.sample_size ) == int: UpperCAmelCase_ : List[Any] = (self.unet.config.sample_size, self.unet.config.sample_size) if noise is None: UpperCAmelCase_ : str = randn_tensor( ( batch_size, self.unet.config.in_channels, self.unet.config.sample_size[0], self.unet.config.sample_size[1], ) ,generator=lowerCamelCase_ ,device=self.device ,) UpperCAmelCase_ : int = noise UpperCAmelCase_ : Tuple = None if audio_file is not None or raw_audio is not None: self.mel.load_audio(lowerCamelCase_ ,lowerCamelCase_ ) UpperCAmelCase_ : Optional[Any] = self.mel.audio_slice_to_image(lowerCamelCase_ ) UpperCAmelCase_ : int = np.frombuffer(input_image.tobytes() ,dtype="""uint8""" ).reshape( (input_image.height, input_image.width) ) UpperCAmelCase_ : Any = (input_image / 255) * 2 - 1 UpperCAmelCase_ : List[str] = torch.tensor(input_image[np.newaxis, :, :] ,dtype=torch.float ).to(self.device ) if self.vqvae is not None: UpperCAmelCase_ : Optional[Any] = self.vqvae.encode(torch.unsqueeze(lowerCamelCase_ ,0 ) ).latent_dist.sample( generator=lowerCamelCase_ )[0] UpperCAmelCase_ : Any = self.vqvae.config.scaling_factor * input_images if start_step > 0: UpperCAmelCase_ : Union[str, Any] = self.scheduler.add_noise(lowerCamelCase_ ,lowerCamelCase_ ,self.scheduler.timesteps[start_step - 1] ) UpperCAmelCase_ : Any = ( self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length ) UpperCAmelCase_ : Optional[int] = int(mask_start_secs * pixels_per_second ) UpperCAmelCase_ : Union[str, Any] = int(mask_end_secs * pixels_per_second ) UpperCAmelCase_ : Optional[Any] = self.scheduler.add_noise(lowerCamelCase_ ,lowerCamelCase_ ,torch.tensor(self.scheduler.timesteps[start_step:] ) ) for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ): if isinstance(self.unet ,lowerCamelCase_ ): UpperCAmelCase_ : Dict = self.unet(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ )["""sample"""] else: UpperCAmelCase_ : List[str] = self.unet(lowerCamelCase_ ,lowerCamelCase_ )["""sample"""] if isinstance(self.scheduler ,lowerCamelCase_ ): UpperCAmelCase_ : int = self.scheduler.step( model_output=lowerCamelCase_ ,timestep=lowerCamelCase_ ,sample=lowerCamelCase_ ,eta=lowerCamelCase_ ,generator=lowerCamelCase_ ,)["""prev_sample"""] else: UpperCAmelCase_ : Optional[Any] = self.scheduler.step( model_output=lowerCamelCase_ ,timestep=lowerCamelCase_ ,sample=lowerCamelCase_ ,generator=lowerCamelCase_ ,)["""prev_sample"""] if mask is not None: if mask_start > 0: UpperCAmelCase_ : Union[str, Any] = mask[:, step, :, :mask_start] if mask_end > 0: UpperCAmelCase_ : List[Any] = mask[:, step, :, -mask_end:] if self.vqvae is not None: # 0.18215 was scaling factor used in training to ensure unit variance UpperCAmelCase_ : Dict = 1 / self.vqvae.config.scaling_factor * images UpperCAmelCase_ : Optional[int] = self.vqvae.decode(lowerCamelCase_ )["""sample"""] UpperCAmelCase_ : Optional[int] = (images / 2 + 0.5).clamp(0 ,1 ) UpperCAmelCase_ : List[Any] = images.cpu().permute(0 ,2 ,3 ,1 ).numpy() UpperCAmelCase_ : Tuple = (images * 255).round().astype("""uint8""" ) UpperCAmelCase_ : Any = list( (Image.fromarray(_[:, :, 0] ) for _ in images) if images.shape[3] == 1 else (Image.fromarray(lowerCamelCase_ ,mode="""RGB""" ).convert("""L""" ) for _ in images) ) UpperCAmelCase_ : str = [self.mel.image_to_audio(lowerCamelCase_ ) for _ in images] if not return_dict: return images, (self.mel.get_sample_rate(), audios) return BaseOutput(**AudioPipelineOutput(np.array(lowerCamelCase_ )[:, np.newaxis, :] ) ,**ImagePipelineOutput(lowerCamelCase_ ) ) @torch.no_grad() def A__ ( self: Dict ,lowerCamelCase_: List[Image.Image] ,lowerCamelCase_: int = 50 ) -> np.ndarray: assert isinstance(self.scheduler ,lowerCamelCase_ ) self.scheduler.set_timesteps(lowerCamelCase_ ) UpperCAmelCase_ : Union[str, Any] = np.array( [np.frombuffer(image.tobytes() ,dtype="""uint8""" ).reshape((1, image.height, image.width) ) for image in images] ) UpperCAmelCase_ : Union[str, Any] = (sample / 255) * 2 - 1 UpperCAmelCase_ : List[Any] = torch.Tensor(lowerCamelCase_ ).to(self.device ) for t in self.progress_bar(torch.flip(self.scheduler.timesteps ,(0,) ) ): UpperCAmelCase_ : str = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps UpperCAmelCase_ : Tuple = self.scheduler.alphas_cumprod[t] UpperCAmelCase_ : Optional[Any] = ( self.scheduler.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.scheduler.final_alpha_cumprod ) UpperCAmelCase_ : Optional[Any] = 1 - alpha_prod_t UpperCAmelCase_ : Any = self.unet(lowerCamelCase_ ,lowerCamelCase_ )["""sample"""] UpperCAmelCase_ : int = (1 - alpha_prod_t_prev) ** 0.5 * model_output UpperCAmelCase_ : int = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5) UpperCAmelCase_ : Tuple = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output return sample @staticmethod def A__ ( lowerCamelCase_: torch.Tensor ,lowerCamelCase_: torch.Tensor ,lowerCamelCase_: float ) -> torch.Tensor: UpperCAmelCase_ : List[Any] = acos(torch.dot(torch.flatten(lowerCamelCase_ ) ,torch.flatten(lowerCamelCase_ ) ) / torch.norm(lowerCamelCase_ ) / torch.norm(lowerCamelCase_ ) ) return sin((1 - alpha) * theta ) * xa / sin(lowerCamelCase_ ) + sin(alpha * theta ) * xa / sin(lowerCamelCase_ )
345
from collections import OrderedDict from typing import TYPE_CHECKING, Any, Mapping, Optional from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...onnx.utils import compute_effective_axis_dimension from ...utils import logging if TYPE_CHECKING: from ...processing_utils import ProcessorMixin from ...utils import TensorType UpperCamelCase_ = logging.get_logger(__name__) UpperCamelCase_ = { '''microsoft/layoutlmv3-base''': '''https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json''', } class _snake_case ( __snake_case ): '''simple docstring''' A__ : Optional[Any] = "layoutlmv3" def __init__( self: str ,lowerCamelCase_: Any=50265 ,lowerCamelCase_: int=768 ,lowerCamelCase_: Any=12 ,lowerCamelCase_: Any=12 ,lowerCamelCase_: List[Any]=3072 ,lowerCamelCase_: str="gelu" ,lowerCamelCase_: List[str]=0.1 ,lowerCamelCase_: Any=0.1 ,lowerCamelCase_: Tuple=512 ,lowerCamelCase_: Union[str, Any]=2 ,lowerCamelCase_: Dict=0.0_2 ,lowerCamelCase_: List[str]=1e-5 ,lowerCamelCase_: int=1 ,lowerCamelCase_: int=0 ,lowerCamelCase_: List[str]=2 ,lowerCamelCase_: Dict=1024 ,lowerCamelCase_: Tuple=128 ,lowerCamelCase_: Tuple=128 ,lowerCamelCase_: Dict=True ,lowerCamelCase_: Union[str, Any]=32 ,lowerCamelCase_: Union[str, Any]=128 ,lowerCamelCase_: Tuple=64 ,lowerCamelCase_: Tuple=256 ,lowerCamelCase_: List[str]=True ,lowerCamelCase_: Optional[int]=True ,lowerCamelCase_: Any=True ,lowerCamelCase_: Dict=224 ,lowerCamelCase_: Optional[int]=3 ,lowerCamelCase_: Optional[int]=16 ,lowerCamelCase_: Dict=None ,**lowerCamelCase_: str ,) -> List[Any]: super().__init__( vocab_size=lowerCamelCase_ ,hidden_size=lowerCamelCase_ ,num_hidden_layers=lowerCamelCase_ ,num_attention_heads=lowerCamelCase_ ,intermediate_size=lowerCamelCase_ ,hidden_act=lowerCamelCase_ ,hidden_dropout_prob=lowerCamelCase_ ,attention_probs_dropout_prob=lowerCamelCase_ ,max_position_embeddings=lowerCamelCase_ ,type_vocab_size=lowerCamelCase_ ,initializer_range=lowerCamelCase_ ,layer_norm_eps=lowerCamelCase_ ,pad_token_id=lowerCamelCase_ ,bos_token_id=lowerCamelCase_ ,eos_token_id=lowerCamelCase_ ,**lowerCamelCase_ ,) UpperCAmelCase_ : List[Any] = max_ad_position_embeddings UpperCAmelCase_ : Optional[int] = coordinate_size UpperCAmelCase_ : Optional[int] = shape_size UpperCAmelCase_ : Optional[Any] = has_relative_attention_bias UpperCAmelCase_ : Optional[int] = rel_pos_bins UpperCAmelCase_ : Union[str, Any] = max_rel_pos UpperCAmelCase_ : Dict = has_spatial_attention_bias UpperCAmelCase_ : Optional[int] = rel_ad_pos_bins UpperCAmelCase_ : Tuple = max_rel_ad_pos UpperCAmelCase_ : Union[str, Any] = text_embed UpperCAmelCase_ : Optional[Any] = visual_embed UpperCAmelCase_ : List[str] = input_size UpperCAmelCase_ : str = num_channels UpperCAmelCase_ : Optional[int] = patch_size UpperCAmelCase_ : Tuple = classifier_dropout class _snake_case ( __snake_case ): '''simple docstring''' A__ : Optional[Any] = version.parse("1.12" ) @property def A__ ( self: Dict ) -> Mapping[str, Mapping[int, str]]: # The order of inputs is different for question answering and sequence classification if self.task in ["question-answering", "sequence-classification"]: return OrderedDict( [ ("""input_ids""", {0: """batch""", 1: """sequence"""}), ("""attention_mask""", {0: """batch""", 1: """sequence"""}), ("""bbox""", {0: """batch""", 1: """sequence"""}), ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ] ) else: return OrderedDict( [ ("""input_ids""", {0: """batch""", 1: """sequence"""}), ("""bbox""", {0: """batch""", 1: """sequence"""}), ("""attention_mask""", {0: """batch""", 1: """sequence"""}), ("""pixel_values""", {0: """batch""", 1: """num_channels"""}), ] ) @property def A__ ( self: Any ) -> float: return 1e-5 @property def A__ ( self: int ) -> int: return 12 def A__ ( self: List[str] ,lowerCamelCase_: "ProcessorMixin" ,lowerCamelCase_: int = -1 ,lowerCamelCase_: int = -1 ,lowerCamelCase_: bool = False ,lowerCamelCase_: Optional["TensorType"] = None ,lowerCamelCase_: int = 3 ,lowerCamelCase_: int = 40 ,lowerCamelCase_: int = 40 ,) -> Mapping[str, Any]: setattr(processor.image_processor ,"""apply_ocr""" ,lowerCamelCase_ ) # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX UpperCAmelCase_ : List[str] = compute_effective_axis_dimension( lowerCamelCase_ ,fixed_dimension=OnnxConfig.default_fixed_batch ,num_token_to_add=0 ) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX UpperCAmelCase_ : int = processor.tokenizer.num_special_tokens_to_add(lowerCamelCase_ ) UpperCAmelCase_ : int = compute_effective_axis_dimension( lowerCamelCase_ ,fixed_dimension=OnnxConfig.default_fixed_sequence ,num_token_to_add=lowerCamelCase_ ) # Generate dummy inputs according to compute batch and sequence UpperCAmelCase_ : Optional[int] = [[""" """.join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size # Generate dummy bounding boxes UpperCAmelCase_ : List[Any] = [[[48, 84, 73, 128]]] * batch_size # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX # batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch) UpperCAmelCase_ : Any = self._generate_dummy_images(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ) UpperCAmelCase_ : Optional[Any] = dict( processor( lowerCamelCase_ ,text=lowerCamelCase_ ,boxes=lowerCamelCase_ ,return_tensors=lowerCamelCase_ ,) ) return inputs
345
1
from __future__ import annotations def lowerCamelCase_ ( _a : list[float] ): '''simple docstring''' if len(_a ) < 2: raise ValueError("""Monogons and Digons are not polygons in the Euclidean space""" ) if any(i <= 0 for i in nums ): raise ValueError("""All values must be greater than 0""" ) UpperCAmelCase_ : str = nums.copy() copy_nums.sort() return copy_nums[-1] < sum(copy_nums[:-1] ) if __name__ == "__main__": import doctest doctest.testmod()
345
import argparse from argparse import Namespace import torch from torch import nn from transformers import XGLMConfig, XGLMForCausalLM def lowerCamelCase_ ( _a : List[Any] ): '''simple docstring''' UpperCAmelCase_ : Optional[int] = [ """decoder.version""", """decoder.output_projection.weight""", """_float_tensor""", """decoder.embed_positions._float_tensor""", ] for k in ignore_keys: state_dict.pop(_a , _a ) def lowerCamelCase_ ( _a : Any ): '''simple docstring''' UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = emb.weight.shape UpperCAmelCase_ : Tuple = nn.Linear(_a , _a , bias=_a ) UpperCAmelCase_ : List[Any] = emb.weight.data return lin_layer def lowerCamelCase_ ( _a : Dict ): '''simple docstring''' UpperCAmelCase_ : int = torch.load(_a , map_location="""cpu""" ) UpperCAmelCase_ : Dict = Namespace(**checkpoint["""cfg"""]["""model"""] ) UpperCAmelCase_ : Optional[int] = checkpoint["""model"""] remove_ignore_keys_(_a ) UpperCAmelCase_ : str = state_dict["""decoder.embed_tokens.weight"""].shape[0] UpperCAmelCase_ : List[str] = {key.replace("""decoder""" , """model""" ): val for key, val in state_dict.items()} UpperCAmelCase_ : int = XGLMConfig( vocab_size=_a , max_position_embeddings=args.max_target_positions , num_layers=args.decoder_layers , attention_heads=args.decoder_attention_heads , ffn_dim=args.decoder_ffn_embed_dim , d_model=args.decoder_embed_dim , layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="""gelu""" , scale_embedding=not args.no_scale_embedding , tie_word_embeddings=args.share_decoder_input_output_embed , ) UpperCAmelCase_ : List[str] = XGLMForCausalLM(_a ) UpperCAmelCase_ : Tuple = model.load_state_dict(_a , strict=_a ) print(_a ) UpperCAmelCase_ : Optional[Any] = make_linear_from_emb(model.model.embed_tokens ) return model if __name__ == "__main__": UpperCamelCase_ = argparse.ArgumentParser() # Required parameters parser.add_argument('''fairseq_path''', type=str, help='''path to a model.pt on local filesystem.''') parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') UpperCamelCase_ = parser.parse_args() UpperCamelCase_ = convert_fairseq_xglm_checkpoint_from_disk(args.fairseq_path) model.save_pretrained(args.pytorch_dump_folder_path)
345
1
import unittest from transformers import load_tool from .test_tools_common import ToolTesterMixin class _snake_case ( unittest.TestCase , __snake_case ): '''simple docstring''' def A__ ( self: Any ) -> Tuple: UpperCAmelCase_ : int = load_tool("""text-classification""" ) self.tool.setup() UpperCAmelCase_ : List[str] = load_tool("""text-classification""" ,remote=lowerCamelCase_ ) def A__ ( self: str ) -> Optional[int]: UpperCAmelCase_ : Optional[Any] = self.tool("""That's quite cool""" ,["""positive""", """negative"""] ) self.assertEqual(lowerCamelCase_ ,"""positive""" ) def A__ ( self: str ) -> List[str]: UpperCAmelCase_ : List[str] = self.remote_tool("""That's quite cool""" ,["""positive""", """negative"""] ) self.assertEqual(lowerCamelCase_ ,"""positive""" ) def A__ ( self: Optional[Any] ) -> int: UpperCAmelCase_ : Dict = self.tool(text="""That's quite cool""" ,labels=["""positive""", """negative"""] ) self.assertEqual(lowerCamelCase_ ,"""positive""" ) def A__ ( self: Dict ) -> Optional[Any]: UpperCAmelCase_ : Tuple = self.remote_tool(text="""That's quite cool""" ,labels=["""positive""", """negative"""] ) self.assertEqual(lowerCamelCase_ ,"""positive""" )
345
import collections import inspect import unittest from transformers import FocalNetConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( FocalNetBackbone, FocalNetForImageClassification, FocalNetForMaskedImageModeling, FocalNetModel, ) from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class _snake_case : '''simple docstring''' def __init__( self: List[Any] ,lowerCamelCase_: Tuple ,lowerCamelCase_: Union[str, Any]=13 ,lowerCamelCase_: Optional[int]=32 ,lowerCamelCase_: List[str]=2 ,lowerCamelCase_: Optional[Any]=3 ,lowerCamelCase_: int=16 ,lowerCamelCase_: Optional[Any]=[32, 64, 128] ,lowerCamelCase_: Optional[int]=[1, 2, 1] ,lowerCamelCase_: Union[str, Any]=[2, 2, 4] ,lowerCamelCase_: int=2 ,lowerCamelCase_: List[str]=2.0 ,lowerCamelCase_: List[Any]=True ,lowerCamelCase_: List[str]=0.0 ,lowerCamelCase_: List[str]=0.0 ,lowerCamelCase_: Optional[int]=0.1 ,lowerCamelCase_: Optional[int]="gelu" ,lowerCamelCase_: Any=False ,lowerCamelCase_: Dict=True ,lowerCamelCase_: Union[str, Any]=0.0_2 ,lowerCamelCase_: int=1e-5 ,lowerCamelCase_: int=True ,lowerCamelCase_: Tuple=None ,lowerCamelCase_: str=True ,lowerCamelCase_: Dict=10 ,lowerCamelCase_: str=8 ,lowerCamelCase_: Union[str, Any]=["stage1", "stage2"] ,lowerCamelCase_: Optional[Any]=[1, 2] ,) -> str: UpperCAmelCase_ : List[Any] = parent UpperCAmelCase_ : Tuple = batch_size UpperCAmelCase_ : Any = image_size UpperCAmelCase_ : str = patch_size UpperCAmelCase_ : List[str] = num_channels UpperCAmelCase_ : Dict = embed_dim UpperCAmelCase_ : Dict = hidden_sizes UpperCAmelCase_ : str = depths UpperCAmelCase_ : int = num_heads UpperCAmelCase_ : List[Any] = window_size UpperCAmelCase_ : Union[str, Any] = mlp_ratio UpperCAmelCase_ : int = qkv_bias UpperCAmelCase_ : List[str] = hidden_dropout_prob UpperCAmelCase_ : Union[str, Any] = attention_probs_dropout_prob UpperCAmelCase_ : Optional[int] = drop_path_rate UpperCAmelCase_ : Union[str, Any] = hidden_act UpperCAmelCase_ : List[Any] = use_absolute_embeddings UpperCAmelCase_ : List[Any] = patch_norm UpperCAmelCase_ : int = layer_norm_eps UpperCAmelCase_ : int = initializer_range UpperCAmelCase_ : Optional[Any] = is_training UpperCAmelCase_ : Optional[Any] = scope UpperCAmelCase_ : Union[str, Any] = use_labels UpperCAmelCase_ : Union[str, Any] = type_sequence_label_size UpperCAmelCase_ : Optional[int] = encoder_stride UpperCAmelCase_ : Optional[int] = out_features UpperCAmelCase_ : Optional[int] = out_indices def A__ ( self: Union[str, Any] ) -> List[Any]: UpperCAmelCase_ : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) UpperCAmelCase_ : int = None if self.use_labels: UpperCAmelCase_ : str = ids_tensor([self.batch_size] ,self.type_sequence_label_size ) UpperCAmelCase_ : Any = self.get_config() return config, pixel_values, labels def A__ ( self: List[Any] ) -> Tuple: return FocalNetConfig( image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,embed_dim=self.embed_dim ,hidden_sizes=self.hidden_sizes ,depths=self.depths ,num_heads=self.num_heads ,window_size=self.window_size ,mlp_ratio=self.mlp_ratio ,qkv_bias=self.qkv_bias ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,drop_path_rate=self.drop_path_rate ,hidden_act=self.hidden_act ,use_absolute_embeddings=self.use_absolute_embeddings ,path_norm=self.patch_norm ,layer_norm_eps=self.layer_norm_eps ,initializer_range=self.initializer_range ,encoder_stride=self.encoder_stride ,out_features=self.out_features ,out_indices=self.out_indices ,) def A__ ( self: Dict ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: str ,lowerCamelCase_: str ) -> List[str]: UpperCAmelCase_ : Optional[int] = FocalNetModel(config=lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() UpperCAmelCase_ : List[Any] = model(lowerCamelCase_ ) UpperCAmelCase_ : Dict = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1)) UpperCAmelCase_ : Optional[Any] = int(config.embed_dim * 2 ** (len(config.depths ) - 1) ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, expected_seq_len, expected_dim) ) def A__ ( self: Union[str, Any] ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: Any ,lowerCamelCase_: Optional[int] ) -> List[str]: UpperCAmelCase_ : List[str] = FocalNetBackbone(config=lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() UpperCAmelCase_ : Tuple = model(lowerCamelCase_ ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) ,len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) ,[self.batch_size, self.image_size, 8, 8] ) # verify channels self.parent.assertEqual(len(model.channels ) ,len(config.out_features ) ) self.parent.assertListEqual(model.channels ,config.hidden_sizes[:-1] ) # verify backbone works with out_features=None UpperCAmelCase_ : Union[str, Any] = None UpperCAmelCase_ : List[str] = FocalNetBackbone(config=lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() UpperCAmelCase_ : Tuple = model(lowerCamelCase_ ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) ,1 ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) ,[self.batch_size, self.image_size * 2, 4, 4] ) # verify channels self.parent.assertEqual(len(model.channels ) ,1 ) self.parent.assertListEqual(model.channels ,[config.hidden_sizes[-1]] ) def A__ ( self: Optional[int] ,lowerCamelCase_: List[str] ,lowerCamelCase_: Tuple ,lowerCamelCase_: Union[str, Any] ) -> List[Any]: UpperCAmelCase_ : Any = FocalNetForMaskedImageModeling(config=lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() UpperCAmelCase_ : Optional[Any] = model(lowerCamelCase_ ) self.parent.assertEqual( result.reconstruction.shape ,(self.batch_size, self.num_channels, self.image_size, self.image_size) ) # test greyscale images UpperCAmelCase_ : int = 1 UpperCAmelCase_ : List[str] = FocalNetForMaskedImageModeling(lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() UpperCAmelCase_ : int = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) UpperCAmelCase_ : Optional[int] = model(lowerCamelCase_ ) self.parent.assertEqual(result.reconstruction.shape ,(self.batch_size, 1, self.image_size, self.image_size) ) def A__ ( self: List[str] ,lowerCamelCase_: List[str] ,lowerCamelCase_: List[str] ,lowerCamelCase_: Any ) -> int: UpperCAmelCase_ : List[Any] = self.type_sequence_label_size UpperCAmelCase_ : int = FocalNetForImageClassification(lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() UpperCAmelCase_ : Union[str, Any] = model(lowerCamelCase_ ,labels=lowerCamelCase_ ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) ) # test greyscale images UpperCAmelCase_ : List[Any] = 1 UpperCAmelCase_ : Optional[int] = FocalNetForImageClassification(lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() UpperCAmelCase_ : List[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) UpperCAmelCase_ : List[str] = model(lowerCamelCase_ ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) ) def A__ ( self: Union[str, Any] ) -> Optional[int]: UpperCAmelCase_ : List[Any] = self.prepare_config_and_inputs() UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = config_and_inputs UpperCAmelCase_ : int = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class _snake_case ( __snake_case , __snake_case , unittest.TestCase ): '''simple docstring''' A__ : List[Any] = ( ( FocalNetModel, FocalNetForImageClassification, FocalNetForMaskedImageModeling, FocalNetBackbone, ) if is_torch_available() else () ) A__ : Union[str, Any] = ( {"feature-extraction": FocalNetModel, "image-classification": FocalNetForImageClassification} if is_torch_available() else {} ) A__ : Optional[Any] = False A__ : Any = False A__ : List[str] = False A__ : Any = False A__ : Any = False def A__ ( self: List[str] ) -> Tuple: UpperCAmelCase_ : Dict = FocalNetModelTester(self ) UpperCAmelCase_ : int = ConfigTester(self ,config_class=lowerCamelCase_ ,embed_dim=37 ,has_text_modality=lowerCamelCase_ ) def A__ ( self: List[str] ) -> int: self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def A__ ( self: List[str] ) -> Union[str, Any]: return def A__ ( self: str ) -> List[str]: UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCamelCase_ ) def A__ ( self: Tuple ) -> int: UpperCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*lowerCamelCase_ ) def A__ ( self: Dict ) -> List[str]: UpperCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*lowerCamelCase_ ) def A__ ( self: int ) -> int: UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*lowerCamelCase_ ) @unittest.skip(reason="""FocalNet does not use inputs_embeds""" ) def A__ ( self: int ) -> Dict: pass @unittest.skip(reason="""FocalNet does not use feedforward chunking""" ) def A__ ( self: Optional[Any] ) -> Optional[Any]: pass def A__ ( self: Optional[Any] ) -> List[str]: UpperCAmelCase_ , UpperCAmelCase_ : str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes[:-1]: UpperCAmelCase_ : Optional[Any] = model_class(lowerCamelCase_ ) self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) ) UpperCAmelCase_ : List[Any] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(lowerCamelCase_ ,nn.Linear ) ) def A__ ( self: str ) -> Optional[int]: UpperCAmelCase_ , UpperCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes[:-1]: UpperCAmelCase_ : str = model_class(lowerCamelCase_ ) UpperCAmelCase_ : Dict = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCAmelCase_ : Any = [*signature.parameters.keys()] UpperCAmelCase_ : List[str] = ["""pixel_values"""] self.assertListEqual(arg_names[:1] ,lowerCamelCase_ ) def A__ ( self: Dict ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: List[str] ,lowerCamelCase_: Dict ,lowerCamelCase_: Any ) -> List[str]: UpperCAmelCase_ : Tuple = model_class(lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() with torch.no_grad(): UpperCAmelCase_ : Optional[int] = model(**self._prepare_for_class(lowerCamelCase_ ,lowerCamelCase_ ) ) UpperCAmelCase_ : Any = outputs.hidden_states UpperCAmelCase_ : List[Any] = getattr( self.model_tester ,"""expected_num_hidden_layers""" ,len(self.model_tester.depths ) + 1 ) self.assertEqual(len(lowerCamelCase_ ) ,lowerCamelCase_ ) # FocalNet has a different seq_length UpperCAmelCase_ : int = ( config.patch_size if isinstance(config.patch_size ,collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) UpperCAmelCase_ : Optional[int] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.assertListEqual( list(hidden_states[0].shape[-2:] ) ,[num_patches, self.model_tester.embed_dim] ,) UpperCAmelCase_ : Union[str, Any] = outputs.reshaped_hidden_states self.assertEqual(len(lowerCamelCase_ ) ,lowerCamelCase_ ) UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Tuple = reshaped_hidden_states[0].shape UpperCAmelCase_ : List[Any] = ( reshaped_hidden_states[0].view(lowerCamelCase_ ,lowerCamelCase_ ,height * width ).permute(0 ,2 ,1 ) ) self.assertListEqual( list(reshaped_hidden_states.shape[-2:] ) ,[num_patches, self.model_tester.embed_dim] ,) def A__ ( self: Any ) -> List[Any]: UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase_ : Optional[int] = ( self.model_tester.image_size if isinstance(self.model_tester.image_size ,collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) for model_class in self.all_model_classes[:-1]: UpperCAmelCase_ : str = True self.check_hidden_states_output(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] UpperCAmelCase_ : Union[str, Any] = True self.check_hidden_states_output(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ) def A__ ( self: List[str] ) -> str: UpperCAmelCase_ , UpperCAmelCase_ : int = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase_ : Tuple = 3 UpperCAmelCase_ : Tuple = ( self.model_tester.image_size if isinstance(self.model_tester.image_size ,collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) UpperCAmelCase_ : Union[str, Any] = ( config.patch_size if isinstance(config.patch_size ,collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) UpperCAmelCase_ : Union[str, Any] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0]) UpperCAmelCase_ : Any = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1]) for model_class in self.all_model_classes[:-1]: UpperCAmelCase_ : Optional[Any] = True self.check_hidden_states_output(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,(padded_height, padded_width) ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] UpperCAmelCase_ : Optional[int] = True self.check_hidden_states_output(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,(padded_height, padded_width) ) @slow def A__ ( self: Optional[int] ) -> Optional[Any]: for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase_ : Tuple = FocalNetModel.from_pretrained(lowerCamelCase_ ) self.assertIsNotNone(lowerCamelCase_ ) def A__ ( self: Optional[Any] ) -> Optional[int]: UpperCAmelCase_ , UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase_ : Optional[int] = _config_zero_init(lowerCamelCase_ ) for model_class in self.all_model_classes: UpperCAmelCase_ : List[Any] = model_class(config=lowerCamelCase_ ) for name, param in model.named_parameters(): if "embeddings" not in name and param.requires_grad: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item() ,[0.0, 1.0] ,msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' ,) @require_vision @require_torch class _snake_case ( unittest.TestCase ): '''simple docstring''' @cached_property def A__ ( self: Optional[int] ) -> str: # TODO update organization return AutoImageProcessor.from_pretrained("""microsoft/focalnet-tiny""" ) if is_vision_available() else None @slow def A__ ( self: List[Any] ) -> List[str]: UpperCAmelCase_ : Optional[int] = FocalNetForImageClassification.from_pretrained("""microsoft/focalnet-tiny""" ).to(lowerCamelCase_ ) UpperCAmelCase_ : Tuple = self.default_image_processor UpperCAmelCase_ : Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) UpperCAmelCase_ : Dict = image_processor(images=lowerCamelCase_ ,return_tensors="""pt""" ).to(lowerCamelCase_ ) # forward pass with torch.no_grad(): UpperCAmelCase_ : Dict = model(**lowerCamelCase_ ) # verify the logits UpperCAmelCase_ : str = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape ,lowerCamelCase_ ) UpperCAmelCase_ : List[Any] = torch.tensor([0.2_1_6_6, -0.4_3_6_8, 0.2_1_9_1] ).to(lowerCamelCase_ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] ,lowerCamelCase_ ,atol=1e-4 ) ) self.assertTrue(outputs.logits.argmax(dim=-1 ).item() ,281 ) @require_torch class _snake_case ( __snake_case , unittest.TestCase ): '''simple docstring''' A__ : List[Any] = (FocalNetBackbone,) if is_torch_available() else () A__ : int = FocalNetConfig A__ : List[str] = False def A__ ( self: Any ) -> Optional[int]: UpperCAmelCase_ : str = FocalNetModelTester(self )
345
1
import baseaa import io import json import os from copy import deepcopy from ..optimizer import AcceleratedOptimizer from ..scheduler import AcceleratedScheduler class _snake_case : '''simple docstring''' def __init__( self: int ,lowerCamelCase_: Tuple ) -> List[Any]: if isinstance(lowerCamelCase_ ,lowerCamelCase_ ): # Don't modify user's data should they want to reuse it (e.g. in tests), because once we # modified it, it will not be accepted here again, since `auto` values would have been overridden UpperCAmelCase_ : List[str] = deepcopy(lowerCamelCase_ ) elif os.path.exists(lowerCamelCase_ ): with io.open(lowerCamelCase_ ,"""r""" ,encoding="""utf-8""" ) as f: UpperCAmelCase_ : List[str] = json.load(lowerCamelCase_ ) else: try: UpperCAmelCase_ : int = baseaa.urlsafe_baadecode(lowerCamelCase_ ).decode("""utf-8""" ) UpperCAmelCase_ : Optional[Any] = json.loads(lowerCamelCase_ ) except (UnicodeDecodeError, AttributeError, ValueError): raise ValueError( F'''Expected a string path to an existing deepspeed config, or a dictionary, or a base64 encoded string. Received: {config_file_or_dict}''' ) UpperCAmelCase_ : int = config self.set_stage_and_offload() def A__ ( self: Union[str, Any] ) -> str: # zero stage - this is done as early as possible, before model is created, to allow # ``is_deepspeed_zero3_enabled`` query and getting to the early deepspeed config object # during ``zero.Init()`` which needs to know the dtype, and some other hparams. UpperCAmelCase_ : Optional[int] = self.get_value("""zero_optimization.stage""" ,-1 ) # offload UpperCAmelCase_ : Optional[Any] = False if self.is_zeroa() or self.is_zeroa(): UpperCAmelCase_ : Any = set(["""cpu""", """nvme"""] ) UpperCAmelCase_ : int = set( [ self.get_value("""zero_optimization.offload_optimizer.device""" ), self.get_value("""zero_optimization.offload_param.device""" ), ] ) if len(offload_devices & offload_devices_valid ) > 0: UpperCAmelCase_ : Optional[int] = True def A__ ( self: int ,lowerCamelCase_: Optional[Any] ) -> Dict: UpperCAmelCase_ : Optional[Any] = self.config # find the config node of interest if it exists UpperCAmelCase_ : str = ds_key_long.split(""".""" ) UpperCAmelCase_ : int = nodes.pop() for node in nodes: UpperCAmelCase_ : List[Any] = config.get(lowerCamelCase_ ) if config is None: return None, ds_key return config, ds_key def A__ ( self: List[Any] ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: Any=None ) -> Optional[Any]: UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = self.find_config_node(lowerCamelCase_ ) if config is None: return default return config.get(lowerCamelCase_ ,lowerCamelCase_ ) def A__ ( self: int ,lowerCamelCase_: Tuple ,lowerCamelCase_: int=False ) -> str: UpperCAmelCase_ : List[Any] = self.config # find the config node of interest if it exists UpperCAmelCase_ : Union[str, Any] = ds_key_long.split(""".""" ) for node in nodes: UpperCAmelCase_ : Tuple = config UpperCAmelCase_ : Dict = config.get(lowerCamelCase_ ) if config is None: if must_exist: raise ValueError(F'''Can\'t find {ds_key_long} entry in the config: {self.config}''' ) else: return # if found remove it if parent_config is not None: parent_config.pop(lowerCamelCase_ ) def A__ ( self: Union[str, Any] ,lowerCamelCase_: Optional[Any] ) -> Any: UpperCAmelCase_ : Dict = self.get_value(lowerCamelCase_ ) return False if value is None else bool(lowerCamelCase_ ) def A__ ( self: Dict ,lowerCamelCase_: Optional[Any] ) -> Union[str, Any]: UpperCAmelCase_ : Optional[Any] = self.get_value(lowerCamelCase_ ) return False if value is None else not bool(lowerCamelCase_ ) def A__ ( self: Dict ) -> int: return self._stage == 2 def A__ ( self: List[str] ) -> Union[str, Any]: return self._stage == 3 def A__ ( self: List[str] ) -> str: return self._offload class _snake_case : '''simple docstring''' def __init__( self: str ,lowerCamelCase_: Any ) -> Union[str, Any]: UpperCAmelCase_ : Optional[Any] = engine def A__ ( self: Optional[Any] ,lowerCamelCase_: Tuple ,**lowerCamelCase_: int ) -> List[str]: # runs backpropagation and handles mixed precision self.engine.backward(lowerCamelCase_ ,**lowerCamelCase_ ) # Deepspeed's `engine.step` performs the following operations: # - gradient accumulation check # - gradient clipping # - optimizer step # - zero grad # - checking overflow # - lr_scheduler step (only if engine.lr_scheduler is not None) self.engine.step() # and this plugin overrides the above calls with no-ops when Accelerate runs under # Deepspeed, but allows normal functionality for non-Deepspeed cases thus enabling a simple # training loop that works transparently under many training regimes. class _snake_case ( __snake_case ): '''simple docstring''' def __init__( self: Dict ,lowerCamelCase_: Tuple ) -> Union[str, Any]: super().__init__(lowerCamelCase_ ,device_placement=lowerCamelCase_ ,scaler=lowerCamelCase_ ) UpperCAmelCase_ : Optional[int] = hasattr(self.optimizer ,"""overflow""" ) def A__ ( self: Union[str, Any] ,lowerCamelCase_: List[str]=None ) -> Any: pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed def A__ ( self: str ) -> List[Any]: pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed @property def A__ ( self: Union[str, Any] ) -> Tuple: if self.__has_overflow__: return self.optimizer.overflow return False class _snake_case ( __snake_case ): '''simple docstring''' def __init__( self: str ,lowerCamelCase_: int ,lowerCamelCase_: Optional[int] ) -> Optional[Any]: super().__init__(lowerCamelCase_ ,lowerCamelCase_ ) def A__ ( self: Optional[int] ) -> int: pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed class _snake_case : '''simple docstring''' def __init__( self: Union[str, Any] ,lowerCamelCase_: Optional[int] ,lowerCamelCase_: Tuple=0.0_0_1 ,lowerCamelCase_: Any=0 ,**lowerCamelCase_: str ) -> int: UpperCAmelCase_ : Union[str, Any] = params UpperCAmelCase_ : Union[str, Any] = lr UpperCAmelCase_ : str = weight_decay UpperCAmelCase_ : Optional[int] = kwargs class _snake_case : '''simple docstring''' def __init__( self: Union[str, Any] ,lowerCamelCase_: str ,lowerCamelCase_: int=None ,lowerCamelCase_: str=0 ,**lowerCamelCase_: int ) -> int: UpperCAmelCase_ : Dict = optimizer UpperCAmelCase_ : Optional[int] = total_num_steps UpperCAmelCase_ : str = warmup_num_steps UpperCAmelCase_ : Optional[Any] = kwargs
345
import collections import inspect import unittest from transformers import SwinvaConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class _snake_case : '''simple docstring''' def __init__( self: Tuple ,lowerCamelCase_: List[str] ,lowerCamelCase_: int=13 ,lowerCamelCase_: int=32 ,lowerCamelCase_: Optional[int]=2 ,lowerCamelCase_: Any=3 ,lowerCamelCase_: str=16 ,lowerCamelCase_: Optional[Any]=[1, 2, 1] ,lowerCamelCase_: Tuple=[2, 2, 4] ,lowerCamelCase_: int=2 ,lowerCamelCase_: List[Any]=2.0 ,lowerCamelCase_: str=True ,lowerCamelCase_: Optional[int]=0.0 ,lowerCamelCase_: List[Any]=0.0 ,lowerCamelCase_: List[str]=0.1 ,lowerCamelCase_: Tuple="gelu" ,lowerCamelCase_: Union[str, Any]=False ,lowerCamelCase_: Union[str, Any]=True ,lowerCamelCase_: Optional[int]=0.0_2 ,lowerCamelCase_: int=1e-5 ,lowerCamelCase_: Optional[int]=True ,lowerCamelCase_: Union[str, Any]=None ,lowerCamelCase_: Union[str, Any]=True ,lowerCamelCase_: Optional[int]=10 ,lowerCamelCase_: Tuple=8 ,) -> List[Any]: UpperCAmelCase_ : List[str] = parent UpperCAmelCase_ : int = batch_size UpperCAmelCase_ : int = image_size UpperCAmelCase_ : Union[str, Any] = patch_size UpperCAmelCase_ : Optional[Any] = num_channels UpperCAmelCase_ : int = embed_dim UpperCAmelCase_ : Union[str, Any] = depths UpperCAmelCase_ : List[str] = num_heads UpperCAmelCase_ : int = window_size UpperCAmelCase_ : List[str] = mlp_ratio UpperCAmelCase_ : Tuple = qkv_bias UpperCAmelCase_ : Tuple = hidden_dropout_prob UpperCAmelCase_ : str = attention_probs_dropout_prob UpperCAmelCase_ : Tuple = drop_path_rate UpperCAmelCase_ : List[str] = hidden_act UpperCAmelCase_ : int = use_absolute_embeddings UpperCAmelCase_ : Any = patch_norm UpperCAmelCase_ : Optional[int] = layer_norm_eps UpperCAmelCase_ : Tuple = initializer_range UpperCAmelCase_ : Optional[Any] = is_training UpperCAmelCase_ : Dict = scope UpperCAmelCase_ : int = use_labels UpperCAmelCase_ : Optional[Any] = type_sequence_label_size UpperCAmelCase_ : List[str] = encoder_stride def A__ ( self: Any ) -> int: UpperCAmelCase_ : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) UpperCAmelCase_ : List[Any] = None if self.use_labels: UpperCAmelCase_ : Optional[int] = ids_tensor([self.batch_size] ,self.type_sequence_label_size ) UpperCAmelCase_ : str = self.get_config() return config, pixel_values, labels def A__ ( self: List[Any] ) -> Union[str, Any]: return SwinvaConfig( image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,embed_dim=self.embed_dim ,depths=self.depths ,num_heads=self.num_heads ,window_size=self.window_size ,mlp_ratio=self.mlp_ratio ,qkv_bias=self.qkv_bias ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,drop_path_rate=self.drop_path_rate ,hidden_act=self.hidden_act ,use_absolute_embeddings=self.use_absolute_embeddings ,path_norm=self.patch_norm ,layer_norm_eps=self.layer_norm_eps ,initializer_range=self.initializer_range ,encoder_stride=self.encoder_stride ,) def A__ ( self: Dict ,lowerCamelCase_: Tuple ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: List[str] ) -> str: UpperCAmelCase_ : str = SwinvaModel(config=lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() UpperCAmelCase_ : Optional[Any] = model(lowerCamelCase_ ) UpperCAmelCase_ : List[Any] = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1)) UpperCAmelCase_ : List[Any] = int(config.embed_dim * 2 ** (len(config.depths ) - 1) ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, expected_seq_len, expected_dim) ) def A__ ( self: List[Any] ,lowerCamelCase_: List[Any] ,lowerCamelCase_: int ,lowerCamelCase_: int ) -> int: UpperCAmelCase_ : Any = SwinvaForMaskedImageModeling(config=lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() UpperCAmelCase_ : Union[str, Any] = model(lowerCamelCase_ ) self.parent.assertEqual( result.logits.shape ,(self.batch_size, self.num_channels, self.image_size, self.image_size) ) # test greyscale images UpperCAmelCase_ : str = 1 UpperCAmelCase_ : Optional[Any] = SwinvaForMaskedImageModeling(lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() UpperCAmelCase_ : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) UpperCAmelCase_ : int = model(lowerCamelCase_ ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, 1, self.image_size, self.image_size) ) def A__ ( self: int ,lowerCamelCase_: int ,lowerCamelCase_: List[Any] ,lowerCamelCase_: Optional[Any] ) -> int: UpperCAmelCase_ : Union[str, Any] = self.type_sequence_label_size UpperCAmelCase_ : int = SwinvaForImageClassification(lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() UpperCAmelCase_ : Optional[int] = model(lowerCamelCase_ ,labels=lowerCamelCase_ ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) ) def A__ ( self: str ) -> Union[str, Any]: UpperCAmelCase_ : Optional[Any] = self.prepare_config_and_inputs() UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = config_and_inputs UpperCAmelCase_ : Optional[int] = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class _snake_case ( __snake_case , __snake_case , unittest.TestCase ): '''simple docstring''' A__ : Tuple = ( (SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else () ) A__ : Optional[Any] = ( {"feature-extraction": SwinvaModel, "image-classification": SwinvaForImageClassification} if is_torch_available() else {} ) A__ : List[Any] = False A__ : Tuple = False A__ : int = False A__ : Union[str, Any] = False def A__ ( self: List[str] ) -> Optional[Any]: UpperCAmelCase_ : Any = SwinvaModelTester(self ) UpperCAmelCase_ : str = ConfigTester(self ,config_class=lowerCamelCase_ ,embed_dim=37 ) def A__ ( self: Optional[int] ) -> List[Any]: self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def A__ ( self: Any ) -> Dict: UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCamelCase_ ) @unittest.skip(reason="""Got `CUDA error: misaligned address` with PyTorch 2.0.0.""" ) def A__ ( self: int ) -> Dict: pass @unittest.skip(reason="""Swinv2 does not use inputs_embeds""" ) def A__ ( self: Tuple ) -> List[str]: pass def A__ ( self: str ) -> List[Any]: UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase_ : int = model_class(lowerCamelCase_ ) self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) ) UpperCAmelCase_ : Tuple = model.get_output_embeddings() self.assertTrue(x is None or isinstance(lowerCamelCase_ ,nn.Linear ) ) def A__ ( self: Optional[Any] ) -> Optional[int]: UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase_ : Dict = model_class(lowerCamelCase_ ) UpperCAmelCase_ : Any = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCAmelCase_ : int = [*signature.parameters.keys()] UpperCAmelCase_ : Tuple = ["""pixel_values"""] self.assertListEqual(arg_names[:1] ,lowerCamelCase_ ) def A__ ( self: Union[str, Any] ) -> Optional[Any]: UpperCAmelCase_ , UpperCAmelCase_ : int = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase_ : Any = True for model_class in self.all_model_classes: UpperCAmelCase_ : Optional[Any] = True UpperCAmelCase_ : Union[str, Any] = False UpperCAmelCase_ : str = True UpperCAmelCase_ : List[Any] = model_class(lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() with torch.no_grad(): UpperCAmelCase_ : Optional[int] = model(**self._prepare_for_class(lowerCamelCase_ ,lowerCamelCase_ ) ) UpperCAmelCase_ : Optional[Any] = outputs.attentions UpperCAmelCase_ : List[str] = len(self.model_tester.depths ) self.assertEqual(len(lowerCamelCase_ ) ,lowerCamelCase_ ) # check that output_attentions also work using config del inputs_dict["output_attentions"] UpperCAmelCase_ : str = True UpperCAmelCase_ : Optional[Any] = config.window_size**2 UpperCAmelCase_ : Optional[int] = model_class(lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() with torch.no_grad(): UpperCAmelCase_ : Optional[Any] = model(**self._prepare_for_class(lowerCamelCase_ ,lowerCamelCase_ ) ) UpperCAmelCase_ : List[Any] = outputs.attentions self.assertEqual(len(lowerCamelCase_ ) ,lowerCamelCase_ ) self.assertListEqual( list(attentions[0].shape[-3:] ) ,[self.model_tester.num_heads[0], window_size_squared, window_size_squared] ,) UpperCAmelCase_ : Optional[Any] = len(lowerCamelCase_ ) # Check attention is always last and order is fine UpperCAmelCase_ : Tuple = True UpperCAmelCase_ : List[Any] = True UpperCAmelCase_ : Tuple = model_class(lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() with torch.no_grad(): UpperCAmelCase_ : Union[str, Any] = model(**self._prepare_for_class(lowerCamelCase_ ,lowerCamelCase_ ) ) if hasattr(self.model_tester ,"""num_hidden_states_types""" ): UpperCAmelCase_ : List[Any] = self.model_tester.num_hidden_states_types else: # also another +1 for reshaped_hidden_states UpperCAmelCase_ : List[str] = 2 self.assertEqual(out_len + added_hidden_states ,len(lowerCamelCase_ ) ) UpperCAmelCase_ : Any = outputs.attentions self.assertEqual(len(lowerCamelCase_ ) ,lowerCamelCase_ ) self.assertListEqual( list(self_attentions[0].shape[-3:] ) ,[self.model_tester.num_heads[0], window_size_squared, window_size_squared] ,) def A__ ( self: List[str] ,lowerCamelCase_: Dict ,lowerCamelCase_: Tuple ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: Optional[int] ) -> List[Any]: UpperCAmelCase_ : str = model_class(lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() with torch.no_grad(): UpperCAmelCase_ : int = model(**self._prepare_for_class(lowerCamelCase_ ,lowerCamelCase_ ) ) UpperCAmelCase_ : List[str] = outputs.hidden_states UpperCAmelCase_ : Optional[Any] = getattr( self.model_tester ,"""expected_num_hidden_layers""" ,len(self.model_tester.depths ) + 1 ) self.assertEqual(len(lowerCamelCase_ ) ,lowerCamelCase_ ) # Swinv2 has a different seq_length UpperCAmelCase_ : Optional[Any] = ( config.patch_size if isinstance(config.patch_size ,collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) UpperCAmelCase_ : int = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.assertListEqual( list(hidden_states[0].shape[-2:] ) ,[num_patches, self.model_tester.embed_dim] ,) UpperCAmelCase_ : Optional[int] = outputs.reshaped_hidden_states self.assertEqual(len(lowerCamelCase_ ) ,lowerCamelCase_ ) UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = reshaped_hidden_states[0].shape UpperCAmelCase_ : Optional[Any] = ( reshaped_hidden_states[0].view(lowerCamelCase_ ,lowerCamelCase_ ,height * width ).permute(0 ,2 ,1 ) ) self.assertListEqual( list(reshaped_hidden_states.shape[-2:] ) ,[num_patches, self.model_tester.embed_dim] ,) def A__ ( self: Any ) -> int: UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase_ : Dict = ( self.model_tester.image_size if isinstance(self.model_tester.image_size ,collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) for model_class in self.all_model_classes: UpperCAmelCase_ : Any = True self.check_hidden_states_output(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] UpperCAmelCase_ : str = True self.check_hidden_states_output(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ) def A__ ( self: List[str] ) -> Dict: UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase_ : Union[str, Any] = 3 UpperCAmelCase_ : Optional[int] = ( self.model_tester.image_size if isinstance(self.model_tester.image_size ,collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) UpperCAmelCase_ : List[str] = ( config.patch_size if isinstance(config.patch_size ,collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) UpperCAmelCase_ : List[Any] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0]) UpperCAmelCase_ : Optional[Any] = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1]) for model_class in self.all_model_classes: UpperCAmelCase_ : Optional[Any] = True self.check_hidden_states_output(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,(padded_height, padded_width) ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] UpperCAmelCase_ : List[str] = True self.check_hidden_states_output(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,(padded_height, padded_width) ) def A__ ( self: Optional[int] ) -> str: UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*lowerCamelCase_ ) def A__ ( self: Union[str, Any] ) -> Dict: UpperCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*lowerCamelCase_ ) @slow def A__ ( self: str ) -> Tuple: for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase_ : Dict = SwinvaModel.from_pretrained(lowerCamelCase_ ) self.assertIsNotNone(lowerCamelCase_ ) def A__ ( self: Any ) -> int: UpperCAmelCase_ , UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase_ : List[str] = _config_zero_init(lowerCamelCase_ ) for model_class in self.all_model_classes: UpperCAmelCase_ : int = model_class(config=lowerCamelCase_ ) for name, param in model.named_parameters(): if "embeddings" not in name and "logit_scale" not in name and param.requires_grad: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item() ,[0.0, 1.0] ,msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' ,) @require_vision @require_torch class _snake_case ( unittest.TestCase ): '''simple docstring''' @cached_property def A__ ( self: Dict ) -> Optional[Any]: return ( AutoImageProcessor.from_pretrained("""microsoft/swinv2-tiny-patch4-window8-256""" ) if is_vision_available() else None ) @slow def A__ ( self: str ) -> List[Any]: UpperCAmelCase_ : Tuple = SwinvaForImageClassification.from_pretrained("""microsoft/swinv2-tiny-patch4-window8-256""" ).to( lowerCamelCase_ ) UpperCAmelCase_ : Any = self.default_image_processor UpperCAmelCase_ : List[str] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) UpperCAmelCase_ : Optional[int] = image_processor(images=lowerCamelCase_ ,return_tensors="""pt""" ).to(lowerCamelCase_ ) # forward pass with torch.no_grad(): UpperCAmelCase_ : Optional[Any] = model(**lowerCamelCase_ ) # verify the logits UpperCAmelCase_ : Dict = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape ,lowerCamelCase_ ) UpperCAmelCase_ : Any = torch.tensor([-0.3_9_4_7, -0.4_3_0_6, 0.0_0_2_6] ).to(lowerCamelCase_ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] ,lowerCamelCase_ ,atol=1e-4 ) )
345
1
from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class _snake_case ( __snake_case ): '''simple docstring''' A__ : List[str] = ["image_processor", "tokenizer"] A__ : Optional[Any] = "AutoImageProcessor" A__ : Optional[Any] = "AutoTokenizer" def __init__( self: List[str] ,lowerCamelCase_: Any ,lowerCamelCase_: List[str] ) -> int: super().__init__(lowerCamelCase_ ,lowerCamelCase_ ) UpperCAmelCase_ : Any = self.image_processor def __call__( self: Dict ,lowerCamelCase_: int=None ,lowerCamelCase_: Optional[Any]=None ,lowerCamelCase_: Optional[Any]=None ,**lowerCamelCase_: Tuple ) -> Optional[int]: if text is None and images is None: raise ValueError("""You have to specify either text or images. Both cannot be none.""" ) if text is not None: UpperCAmelCase_ : Union[str, Any] = self.tokenizer(lowerCamelCase_ ,return_tensors=lowerCamelCase_ ,**lowerCamelCase_ ) if images is not None: UpperCAmelCase_ : List[str] = self.image_processor(lowerCamelCase_ ,return_tensors=lowerCamelCase_ ,**lowerCamelCase_ ) if text is not None and images is not None: UpperCAmelCase_ : List[Any] = image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**lowerCamelCase_ ) ,tensor_type=lowerCamelCase_ ) def A__ ( self: Optional[Any] ,*lowerCamelCase_: Tuple ,**lowerCamelCase_: int ) -> Dict: return self.tokenizer.batch_decode(*lowerCamelCase_ ,**lowerCamelCase_ ) def A__ ( self: List[Any] ,*lowerCamelCase_: Optional[int] ,**lowerCamelCase_: List[str] ) -> str: return self.tokenizer.decode(*lowerCamelCase_ ,**lowerCamelCase_ ) @property def A__ ( self: Tuple ) -> Optional[int]: return ["input_ids", "attention_mask", "pixel_values"]
345
import json import os from functools import lru_cache from typing import Dict, List, Optional, Tuple, Union import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...tokenization_utils_base import BatchEncoding, EncodedInput from ...utils import PaddingStrategy, logging UpperCamelCase_ = logging.get_logger(__name__) UpperCamelCase_ = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt'''} # See all LED models at https://huggingface.co/models?filter=LED UpperCamelCase_ = { '''vocab_file''': { '''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json''', }, '''merges_file''': { '''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt''', }, '''tokenizer_file''': { '''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json''', }, } UpperCamelCase_ = { '''allenai/led-base-16384''': 16384, } @lru_cache() # Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode def lowerCamelCase_ ( ): '''simple docstring''' UpperCAmelCase_ : int = ( list(range(ord("""!""" ) , ord("""~""" ) + 1 ) ) + list(range(ord("""¡""" ) , ord("""¬""" ) + 1 ) ) + list(range(ord("""®""" ) , ord("""ÿ""" ) + 1 ) ) ) UpperCAmelCase_ : Dict = bs[:] UpperCAmelCase_ : Any = 0 for b in range(2**8 ): if b not in bs: bs.append(_a ) cs.append(2**8 + n ) n += 1 UpperCAmelCase_ : Any = [chr(_a ) for n in cs] return dict(zip(_a , _a ) ) def lowerCamelCase_ ( _a : List[str] ): '''simple docstring''' UpperCAmelCase_ : Union[str, Any] = set() UpperCAmelCase_ : List[Any] = word[0] for char in word[1:]: pairs.add((prev_char, char) ) UpperCAmelCase_ : Optional[int] = char return pairs class _snake_case ( __snake_case ): '''simple docstring''' A__ : str = VOCAB_FILES_NAMES A__ : List[str] = PRETRAINED_VOCAB_FILES_MAP A__ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES A__ : Optional[int] = ["input_ids", "attention_mask"] def __init__( self: Union[str, Any] ,lowerCamelCase_: Tuple ,lowerCamelCase_: Any ,lowerCamelCase_: Union[str, Any]="replace" ,lowerCamelCase_: Optional[Any]="<s>" ,lowerCamelCase_: List[Any]="</s>" ,lowerCamelCase_: List[str]="</s>" ,lowerCamelCase_: int="<s>" ,lowerCamelCase_: int="<unk>" ,lowerCamelCase_: str="<pad>" ,lowerCamelCase_: Optional[Any]="<mask>" ,lowerCamelCase_: List[str]=False ,**lowerCamelCase_: Tuple ,) -> Any: UpperCAmelCase_ : Union[str, Any] = AddedToken(lowerCamelCase_ ,lstrip=lowerCamelCase_ ,rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ ,lowerCamelCase_ ) else bos_token UpperCAmelCase_ : int = AddedToken(lowerCamelCase_ ,lstrip=lowerCamelCase_ ,rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ ,lowerCamelCase_ ) else eos_token UpperCAmelCase_ : List[str] = AddedToken(lowerCamelCase_ ,lstrip=lowerCamelCase_ ,rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ ,lowerCamelCase_ ) else sep_token UpperCAmelCase_ : List[str] = AddedToken(lowerCamelCase_ ,lstrip=lowerCamelCase_ ,rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ ,lowerCamelCase_ ) else cls_token UpperCAmelCase_ : Optional[Any] = AddedToken(lowerCamelCase_ ,lstrip=lowerCamelCase_ ,rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ ,lowerCamelCase_ ) else unk_token UpperCAmelCase_ : List[str] = AddedToken(lowerCamelCase_ ,lstrip=lowerCamelCase_ ,rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ ,lowerCamelCase_ ) else pad_token # Mask token behave like a normal word, i.e. include the space before it UpperCAmelCase_ : str = AddedToken(lowerCamelCase_ ,lstrip=lowerCamelCase_ ,rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ ,lowerCamelCase_ ) else mask_token super().__init__( errors=lowerCamelCase_ ,bos_token=lowerCamelCase_ ,eos_token=lowerCamelCase_ ,unk_token=lowerCamelCase_ ,sep_token=lowerCamelCase_ ,cls_token=lowerCamelCase_ ,pad_token=lowerCamelCase_ ,mask_token=lowerCamelCase_ ,add_prefix_space=lowerCamelCase_ ,**lowerCamelCase_ ,) with open(lowerCamelCase_ ,encoding="""utf-8""" ) as vocab_handle: UpperCAmelCase_ : Union[str, Any] = json.load(lowerCamelCase_ ) UpperCAmelCase_ : Optional[int] = {v: k for k, v in self.encoder.items()} UpperCAmelCase_ : Any = errors # how to handle errors in decoding UpperCAmelCase_ : int = bytes_to_unicode() UpperCAmelCase_ : Dict = {v: k for k, v in self.byte_encoder.items()} with open(lowerCamelCase_ ,encoding="""utf-8""" ) as merges_handle: UpperCAmelCase_ : Any = merges_handle.read().split("""\n""" )[1:-1] UpperCAmelCase_ : int = [tuple(merge.split() ) for merge in bpe_merges] UpperCAmelCase_ : Union[str, Any] = dict(zip(lowerCamelCase_ ,range(len(lowerCamelCase_ ) ) ) ) UpperCAmelCase_ : Tuple = {} UpperCAmelCase_ : Optional[int] = add_prefix_space # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions UpperCAmelCase_ : int = re.compile(R"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""" ) @property # Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size def A__ ( self: List[str] ) -> List[str]: return len(self.encoder ) def A__ ( self: Any ) -> Union[str, Any]: return dict(self.encoder ,**self.added_tokens_encoder ) def A__ ( self: Tuple ,lowerCamelCase_: Dict ) -> Optional[Any]: if token in self.cache: return self.cache[token] UpperCAmelCase_ : Union[str, Any] = tuple(lowerCamelCase_ ) UpperCAmelCase_ : Union[str, Any] = get_pairs(lowerCamelCase_ ) if not pairs: return token while True: UpperCAmelCase_ : Union[str, Any] = min(lowerCamelCase_ ,key=lambda lowerCamelCase_ : self.bpe_ranks.get(lowerCamelCase_ ,float("""inf""" ) ) ) if bigram not in self.bpe_ranks: break UpperCAmelCase_ , UpperCAmelCase_ : Any = bigram UpperCAmelCase_ : Optional[Any] = [] UpperCAmelCase_ : List[str] = 0 while i < len(lowerCamelCase_ ): try: UpperCAmelCase_ : str = word.index(lowerCamelCase_ ,lowerCamelCase_ ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) UpperCAmelCase_ : Union[str, Any] = j if word[i] == first and i < len(lowerCamelCase_ ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 UpperCAmelCase_ : List[str] = tuple(lowerCamelCase_ ) UpperCAmelCase_ : List[Any] = new_word if len(lowerCamelCase_ ) == 1: break else: UpperCAmelCase_ : List[str] = get_pairs(lowerCamelCase_ ) UpperCAmelCase_ : int = """ """.join(lowerCamelCase_ ) UpperCAmelCase_ : Optional[Any] = word return word def A__ ( self: Union[str, Any] ,lowerCamelCase_: Tuple ) -> List[str]: UpperCAmelCase_ : str = [] for token in re.findall(self.pat ,lowerCamelCase_ ): UpperCAmelCase_ : List[Any] = """""".join( self.byte_encoder[b] for b in token.encode("""utf-8""" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowerCamelCase_ ).split(""" """ ) ) return bpe_tokens def A__ ( self: List[Any] ,lowerCamelCase_: Optional[Any] ) -> Optional[int]: return self.encoder.get(lowerCamelCase_ ,self.encoder.get(self.unk_token ) ) def A__ ( self: List[str] ,lowerCamelCase_: str ) -> Optional[Any]: return self.decoder.get(lowerCamelCase_ ) def A__ ( self: List[str] ,lowerCamelCase_: List[str] ) -> List[Any]: UpperCAmelCase_ : str = """""".join(lowerCamelCase_ ) UpperCAmelCase_ : int = bytearray([self.byte_decoder[c] for c in text] ).decode("""utf-8""" ,errors=self.errors ) return text def A__ ( self: Optional[Any] ,lowerCamelCase_: str ,lowerCamelCase_: Optional[str] = None ) -> Tuple[str]: if not os.path.isdir(lowerCamelCase_ ): logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' ) return UpperCAmelCase_ : List[Any] = os.path.join( lowerCamelCase_ ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) UpperCAmelCase_ : List[str] = os.path.join( lowerCamelCase_ ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] ) with open(lowerCamelCase_ ,"""w""" ,encoding="""utf-8""" ) as f: f.write(json.dumps(self.encoder ,indent=2 ,sort_keys=lowerCamelCase_ ,ensure_ascii=lowerCamelCase_ ) + """\n""" ) UpperCAmelCase_ : str = 0 with open(lowerCamelCase_ ,"""w""" ,encoding="""utf-8""" ) as writer: writer.write("""#version: 0.2\n""" ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() ,key=lambda lowerCamelCase_ : kv[1] ): if index != token_index: logger.warning( F'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.''' """ Please check that the tokenizer is not corrupted!""" ) UpperCAmelCase_ : Tuple = token_index writer.write(""" """.join(lowerCamelCase_ ) + """\n""" ) index += 1 return vocab_file, merge_file def A__ ( self: str ,lowerCamelCase_: List[int] ,lowerCamelCase_: Optional[List[int]] = None ) -> List[int]: if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] UpperCAmelCase_ : int = [self.cls_token_id] UpperCAmelCase_ : Optional[int] = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def A__ ( self: Union[str, Any] ,lowerCamelCase_: List[int] ,lowerCamelCase_: Optional[List[int]] = None ,lowerCamelCase_: bool = False ) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=lowerCamelCase_ ,token_ids_a=lowerCamelCase_ ,already_has_special_tokens=lowerCamelCase_ ) if token_ids_a is None: return [1] + ([0] * len(lowerCamelCase_ )) + [1] return [1] + ([0] * len(lowerCamelCase_ )) + [1, 1] + ([0] * len(lowerCamelCase_ )) + [1] def A__ ( self: str ,lowerCamelCase_: List[int] ,lowerCamelCase_: Optional[List[int]] = None ) -> List[int]: UpperCAmelCase_ : Optional[Any] = [self.sep_token_id] UpperCAmelCase_ : Optional[Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def A__ ( self: Optional[Any] ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: str=False ,**lowerCamelCase_: List[str] ) -> Optional[int]: UpperCAmelCase_ : Optional[int] = kwargs.pop("""add_prefix_space""" ,self.add_prefix_space ) if (is_split_into_words or add_prefix_space) and (len(lowerCamelCase_ ) > 0 and not text[0].isspace()): UpperCAmelCase_ : Dict = """ """ + text return (text, kwargs) def A__ ( self: List[str] ,lowerCamelCase_: Union[Dict[str, EncodedInput], BatchEncoding] ,lowerCamelCase_: Optional[int] = None ,lowerCamelCase_: PaddingStrategy = PaddingStrategy.DO_NOT_PAD ,lowerCamelCase_: Optional[int] = None ,lowerCamelCase_: Optional[bool] = None ,) -> dict: UpperCAmelCase_ : Optional[int] = super()._pad( encoded_inputs=lowerCamelCase_ ,max_length=lowerCamelCase_ ,padding_strategy=lowerCamelCase_ ,pad_to_multiple_of=lowerCamelCase_ ,return_attention_mask=lowerCamelCase_ ,) # Load from model defaults if return_attention_mask is None: UpperCAmelCase_ : str = """attention_mask""" in self.model_input_names if return_attention_mask and "global_attention_mask" in encoded_inputs: UpperCAmelCase_ : str = encoded_inputs[self.model_input_names[0]] # `global_attention_mask` need to have the same length as other (sequential) inputs. UpperCAmelCase_ : List[Any] = len(encoded_inputs["""global_attention_mask"""] ) != len(lowerCamelCase_ ) if needs_to_be_padded: UpperCAmelCase_ : Dict = len(lowerCamelCase_ ) - len(encoded_inputs["""global_attention_mask"""] ) if self.padding_side == "right": # Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend` UpperCAmelCase_ : str = ( encoded_inputs["""global_attention_mask"""] + [-1] * difference ) elif self.padding_side == "left": UpperCAmelCase_ : List[str] = [-1] * difference + encoded_inputs[ """global_attention_mask""" ] else: raise ValueError("""Invalid padding strategy:""" + str(self.padding_side ) ) return encoded_inputs
345
1
import unittest from transformers import is_flax_available from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow if is_flax_available(): import optax from flax.training.common_utils import onehot from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration from transformers.models.ta.modeling_flax_ta import shift_tokens_right @require_torch @require_sentencepiece @require_tokenizers @require_flax class _snake_case ( unittest.TestCase ): '''simple docstring''' @slow def A__ ( self: Optional[Any] ) -> Tuple: UpperCAmelCase_ : Tuple = FlaxMTaForConditionalGeneration.from_pretrained("""google/mt5-small""" ) UpperCAmelCase_ : Union[str, Any] = AutoTokenizer.from_pretrained("""google/mt5-small""" ) UpperCAmelCase_ : Union[str, Any] = tokenizer("""Hello there""" ,return_tensors="""np""" ).input_ids UpperCAmelCase_ : Union[str, Any] = tokenizer("""Hi I am""" ,return_tensors="""np""" ).input_ids UpperCAmelCase_ : str = shift_tokens_right(lowerCamelCase_ ,model.config.pad_token_id ,model.config.decoder_start_token_id ) UpperCAmelCase_ : Dict = model(lowerCamelCase_ ,decoder_input_ids=lowerCamelCase_ ).logits UpperCAmelCase_ : int = optax.softmax_cross_entropy(lowerCamelCase_ ,onehot(lowerCamelCase_ ,logits.shape[-1] ) ).mean() UpperCAmelCase_ : List[str] = -(labels.shape[-1] * loss.item()) UpperCAmelCase_ : Optional[Any] = -8_4.9_1_2_7 self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
345
import gc import random import tempfile import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline from diffusers.utils import floats_tensor, nightly, torch_device from diffusers.utils.testing_utils import require_torch_gpu class _snake_case ( unittest.TestCase ): '''simple docstring''' def A__ ( self: Union[str, Any] ) -> Union[str, Any]: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() @property def A__ ( self: List[str] ) -> Dict: UpperCAmelCase_ : Union[str, Any] = 1 UpperCAmelCase_ : Tuple = 3 UpperCAmelCase_ : Optional[Any] = (32, 32) UpperCAmelCase_ : Optional[int] = floats_tensor((batch_size, num_channels) + sizes ,rng=random.Random(0 ) ).to(lowerCamelCase_ ) return image @property def A__ ( self: List[Any] ) -> Optional[Any]: torch.manual_seed(0 ) UpperCAmelCase_ : int = UNetaDConditionModel( block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") ,up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") ,cross_attention_dim=32 ,) return model @property def A__ ( self: str ) -> List[str]: torch.manual_seed(0 ) UpperCAmelCase_ : Optional[int] = AutoencoderKL( block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] ,up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] ,latent_channels=4 ,) return model @property def A__ ( self: Optional[int] ) -> int: torch.manual_seed(0 ) UpperCAmelCase_ : Dict = CLIPTextConfig( bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1e-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1000 ,) return CLIPTextModel(lowerCamelCase_ ) @property def A__ ( self: Tuple ) -> Tuple: def extract(*lowerCamelCase_: Optional[Any] ,**lowerCamelCase_: str ): class _snake_case : '''simple docstring''' def __init__( self: List[Any] ) -> Optional[Any]: UpperCAmelCase_ : List[str] = torch.ones([0] ) def A__ ( self: List[Any] ,lowerCamelCase_: str ) -> int: self.pixel_values.to(lowerCamelCase_ ) return self return Out() return extract def A__ ( self: Union[str, Any] ) -> Tuple: UpperCAmelCase_ : int = """cpu""" # ensure determinism for the device-dependent torch.Generator UpperCAmelCase_ : int = self.dummy_cond_unet UpperCAmelCase_ : Optional[Any] = DDIMScheduler( beta_start=0.0_0_0_8_5 ,beta_end=0.0_1_2 ,beta_schedule="""scaled_linear""" ,clip_sample=lowerCamelCase_ ,set_alpha_to_one=lowerCamelCase_ ,) UpperCAmelCase_ : str = self.dummy_vae UpperCAmelCase_ : List[str] = self.dummy_text_encoder UpperCAmelCase_ : int = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) # make sure here that pndm scheduler skips prk UpperCAmelCase_ : str = StableDiffusionPipeline( unet=lowerCamelCase_ ,scheduler=lowerCamelCase_ ,vae=lowerCamelCase_ ,text_encoder=lowerCamelCase_ ,tokenizer=lowerCamelCase_ ,safety_checker=lowerCamelCase_ ,feature_extractor=self.dummy_extractor ,) UpperCAmelCase_ : List[str] = sd_pipe.to(lowerCamelCase_ ) sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ ) UpperCAmelCase_ : List[str] = """A painting of a squirrel eating a burger""" UpperCAmelCase_ : str = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 ) UpperCAmelCase_ : int = sd_pipe([prompt] ,generator=lowerCamelCase_ ,guidance_scale=6.0 ,num_inference_steps=2 ,output_type="""np""" ) UpperCAmelCase_ : List[Any] = output.images UpperCAmelCase_ : str = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 ) UpperCAmelCase_ : Dict = sd_pipe( [prompt] ,generator=lowerCamelCase_ ,guidance_scale=6.0 ,num_inference_steps=2 ,output_type="""np""" ,return_dict=lowerCamelCase_ ,)[0] UpperCAmelCase_ : int = image[0, -3:, -3:, -1] UpperCAmelCase_ : Union[str, Any] = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) UpperCAmelCase_ : Tuple = np.array([0.5_7_5_6, 0.6_1_1_8, 0.5_0_0_5, 0.5_0_4_1, 0.5_4_7_1, 0.4_7_2_6, 0.4_9_7_6, 0.4_8_6_5, 0.4_8_6_4] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 def A__ ( self: Optional[Any] ) -> Any: UpperCAmelCase_ : Tuple = """cpu""" # ensure determinism for the device-dependent torch.Generator UpperCAmelCase_ : Dict = self.dummy_cond_unet UpperCAmelCase_ : List[Any] = PNDMScheduler(skip_prk_steps=lowerCamelCase_ ) UpperCAmelCase_ : str = self.dummy_vae UpperCAmelCase_ : Union[str, Any] = self.dummy_text_encoder UpperCAmelCase_ : str = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) # make sure here that pndm scheduler skips prk UpperCAmelCase_ : Any = StableDiffusionPipeline( unet=lowerCamelCase_ ,scheduler=lowerCamelCase_ ,vae=lowerCamelCase_ ,text_encoder=lowerCamelCase_ ,tokenizer=lowerCamelCase_ ,safety_checker=lowerCamelCase_ ,feature_extractor=self.dummy_extractor ,) UpperCAmelCase_ : int = sd_pipe.to(lowerCamelCase_ ) sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ ) UpperCAmelCase_ : Optional[Any] = """A painting of a squirrel eating a burger""" UpperCAmelCase_ : Optional[Any] = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 ) UpperCAmelCase_ : Optional[Any] = sd_pipe([prompt] ,generator=lowerCamelCase_ ,guidance_scale=6.0 ,num_inference_steps=2 ,output_type="""np""" ) UpperCAmelCase_ : str = output.images UpperCAmelCase_ : Union[str, Any] = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 ) UpperCAmelCase_ : int = sd_pipe( [prompt] ,generator=lowerCamelCase_ ,guidance_scale=6.0 ,num_inference_steps=2 ,output_type="""np""" ,return_dict=lowerCamelCase_ ,)[0] UpperCAmelCase_ : Dict = image[0, -3:, -3:, -1] UpperCAmelCase_ : List[Any] = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) UpperCAmelCase_ : Tuple = np.array([0.5_1_2_5, 0.5_7_1_6, 0.4_8_2_8, 0.5_0_6_0, 0.5_6_5_0, 0.4_7_6_8, 0.5_1_8_5, 0.4_8_9_5, 0.4_9_9_3] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 def A__ ( self: str ) -> Dict: UpperCAmelCase_ : Any = StableDiffusionPipeline.from_pretrained( """hf-internal-testing/tiny-stable-diffusion-lms-pipe""" ,safety_checker=lowerCamelCase_ ) assert isinstance(lowerCamelCase_ ,lowerCamelCase_ ) assert isinstance(pipe.scheduler ,lowerCamelCase_ ) assert pipe.safety_checker is None UpperCAmelCase_ : List[Any] = pipe("""example prompt""" ,num_inference_steps=2 ).images[0] assert image is not None # check that there's no error when saving a pipeline with one of the models being None with tempfile.TemporaryDirectory() as tmpdirname: pipe.save_pretrained(lowerCamelCase_ ) UpperCAmelCase_ : Any = StableDiffusionPipeline.from_pretrained(lowerCamelCase_ ) # sanity check that the pipeline still works assert pipe.safety_checker is None UpperCAmelCase_ : Optional[int] = pipe("""example prompt""" ,num_inference_steps=2 ).images[0] assert image is not None @unittest.skipIf(torch_device != """cuda""" ,"""This test requires a GPU""" ) def A__ ( self: List[str] ) -> Any: UpperCAmelCase_ : Tuple = self.dummy_cond_unet UpperCAmelCase_ : Dict = PNDMScheduler(skip_prk_steps=lowerCamelCase_ ) UpperCAmelCase_ : List[Any] = self.dummy_vae UpperCAmelCase_ : List[str] = self.dummy_text_encoder UpperCAmelCase_ : Union[str, Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) # put models in fp16 UpperCAmelCase_ : Optional[Any] = unet.half() UpperCAmelCase_ : Optional[int] = vae.half() UpperCAmelCase_ : int = bert.half() # make sure here that pndm scheduler skips prk UpperCAmelCase_ : Any = StableDiffusionPipeline( unet=lowerCamelCase_ ,scheduler=lowerCamelCase_ ,vae=lowerCamelCase_ ,text_encoder=lowerCamelCase_ ,tokenizer=lowerCamelCase_ ,safety_checker=lowerCamelCase_ ,feature_extractor=self.dummy_extractor ,) UpperCAmelCase_ : List[Any] = sd_pipe.to(lowerCamelCase_ ) sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ ) UpperCAmelCase_ : Tuple = """A painting of a squirrel eating a burger""" UpperCAmelCase_ : Optional[int] = sd_pipe([prompt] ,num_inference_steps=2 ,output_type="""np""" ).images assert image.shape == (1, 64, 64, 3) @nightly @require_torch_gpu class _snake_case ( unittest.TestCase ): '''simple docstring''' def A__ ( self: Optional[int] ) -> Optional[Any]: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def A__ ( self: List[str] ) -> List[Any]: UpperCAmelCase_ : Tuple = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" ,safety_checker=lowerCamelCase_ ) UpperCAmelCase_ : Optional[int] = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config ) UpperCAmelCase_ : str = sd_pipe.to(lowerCamelCase_ ) sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ ) UpperCAmelCase_ : str = ( """portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle""" """ coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with""" """ anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and""" """ children from bahnhof zoo, detailed """ ) UpperCAmelCase_ : Optional[int] = 4003660346 UpperCAmelCase_ : int = 7 # without safety guidance (sld_guidance_scale = 0) UpperCAmelCase_ : Dict = torch.manual_seed(lowerCamelCase_ ) UpperCAmelCase_ : List[Any] = sd_pipe( [prompt] ,generator=lowerCamelCase_ ,guidance_scale=lowerCamelCase_ ,num_inference_steps=50 ,output_type="""np""" ,width=512 ,height=512 ,sld_guidance_scale=0 ,) UpperCAmelCase_ : Optional[int] = output.images UpperCAmelCase_ : Union[str, Any] = image[0, -3:, -3:, -1] UpperCAmelCase_ : Dict = [0.2_2_7_8, 0.2_2_3_1, 0.2_2_4_9, 0.2_3_3_3, 0.2_3_0_3, 0.1_8_8_5, 0.2_2_7_3, 0.2_1_4_4, 0.2_1_7_6] assert image.shape == (1, 512, 512, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 # without safety guidance (strong configuration) UpperCAmelCase_ : Union[str, Any] = torch.manual_seed(lowerCamelCase_ ) UpperCAmelCase_ : Any = sd_pipe( [prompt] ,generator=lowerCamelCase_ ,guidance_scale=lowerCamelCase_ ,num_inference_steps=50 ,output_type="""np""" ,width=512 ,height=512 ,sld_guidance_scale=2000 ,sld_warmup_steps=7 ,sld_threshold=0.0_2_5 ,sld_momentum_scale=0.5 ,sld_mom_beta=0.7 ,) UpperCAmelCase_ : Tuple = output.images UpperCAmelCase_ : Union[str, Any] = image[0, -3:, -3:, -1] UpperCAmelCase_ : str = [0.2_3_8_3, 0.2_2_7_6, 0.2_3_6, 0.2_1_9_2, 0.2_1_8_6, 0.2_0_5_3, 0.1_9_7_1, 0.1_9_0_1, 0.1_7_1_9] assert image.shape == (1, 512, 512, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def A__ ( self: Optional[int] ) -> Any: UpperCAmelCase_ : Any = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" ,safety_checker=lowerCamelCase_ ) UpperCAmelCase_ : Any = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config ) UpperCAmelCase_ : str = sd_pipe.to(lowerCamelCase_ ) sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ ) UpperCAmelCase_ : Any = """padme amidala taking a bath artwork, safe for work, no nudity""" UpperCAmelCase_ : List[Any] = 2734971755 UpperCAmelCase_ : Optional[Any] = 7 UpperCAmelCase_ : int = torch.manual_seed(lowerCamelCase_ ) UpperCAmelCase_ : Optional[int] = sd_pipe( [prompt] ,generator=lowerCamelCase_ ,guidance_scale=lowerCamelCase_ ,num_inference_steps=50 ,output_type="""np""" ,width=512 ,height=512 ,sld_guidance_scale=0 ,) UpperCAmelCase_ : Dict = output.images UpperCAmelCase_ : Tuple = image[0, -3:, -3:, -1] UpperCAmelCase_ : Optional[Any] = [0.3_5_0_2, 0.3_6_2_2, 0.3_3_9_6, 0.3_6_4_2, 0.3_4_7_8, 0.3_3_1_8, 0.3_5, 0.3_3_4_8, 0.3_2_9_7] assert image.shape == (1, 512, 512, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 UpperCAmelCase_ : Any = torch.manual_seed(lowerCamelCase_ ) UpperCAmelCase_ : Tuple = sd_pipe( [prompt] ,generator=lowerCamelCase_ ,guidance_scale=lowerCamelCase_ ,num_inference_steps=50 ,output_type="""np""" ,width=512 ,height=512 ,sld_guidance_scale=2000 ,sld_warmup_steps=7 ,sld_threshold=0.0_2_5 ,sld_momentum_scale=0.5 ,sld_mom_beta=0.7 ,) UpperCAmelCase_ : Dict = output.images UpperCAmelCase_ : List[Any] = image[0, -3:, -3:, -1] UpperCAmelCase_ : Tuple = [0.5_5_3_1, 0.5_2_0_6, 0.4_8_9_5, 0.5_1_5_6, 0.5_1_8_2, 0.4_7_5_1, 0.4_8_0_2, 0.4_8_0_3, 0.4_4_4_3] assert image.shape == (1, 512, 512, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def A__ ( self: Union[str, Any] ) -> int: UpperCAmelCase_ : List[Any] = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" ) UpperCAmelCase_ : List[str] = sd_pipe.to(lowerCamelCase_ ) sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ ) UpperCAmelCase_ : Any = ( """the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c.""" """ leyendecker""" ) UpperCAmelCase_ : Optional[Any] = 1044355234 UpperCAmelCase_ : List[str] = 12 UpperCAmelCase_ : List[Any] = torch.manual_seed(lowerCamelCase_ ) UpperCAmelCase_ : List[Any] = sd_pipe( [prompt] ,generator=lowerCamelCase_ ,guidance_scale=lowerCamelCase_ ,num_inference_steps=50 ,output_type="""np""" ,width=512 ,height=512 ,sld_guidance_scale=0 ,) UpperCAmelCase_ : Any = output.images UpperCAmelCase_ : Dict = image[0, -3:, -3:, -1] UpperCAmelCase_ : Optional[Any] = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] ) assert image.shape == (1, 512, 512, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-7 UpperCAmelCase_ : Optional[int] = torch.manual_seed(lowerCamelCase_ ) UpperCAmelCase_ : Optional[Any] = sd_pipe( [prompt] ,generator=lowerCamelCase_ ,guidance_scale=lowerCamelCase_ ,num_inference_steps=50 ,output_type="""np""" ,width=512 ,height=512 ,sld_guidance_scale=2000 ,sld_warmup_steps=7 ,sld_threshold=0.0_2_5 ,sld_momentum_scale=0.5 ,sld_mom_beta=0.7 ,) UpperCAmelCase_ : List[str] = output.images UpperCAmelCase_ : Any = image[0, -3:, -3:, -1] UpperCAmelCase_ : Any = np.array([0.5_8_1_8, 0.6_2_8_5, 0.6_8_3_5, 0.6_0_1_9, 0.6_2_5, 0.6_7_5_4, 0.6_0_9_6, 0.6_3_3_4, 0.6_5_6_1] ) assert image.shape == (1, 512, 512, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
345
1