code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
import heapq
import sys
import numpy as np
snake_case : List[Any] = tuple[int, int]
class _snake_case :
def __init__( self ):
__magic_name__ : Any = []
__magic_name__ : List[Any] = set()
def SCREAMING_SNAKE_CASE ( self ):
if not self.empty():
return self.elements[0][0]
else:
return float("inf" )
def SCREAMING_SNAKE_CASE ( self ):
return len(self.elements ) == 0
def SCREAMING_SNAKE_CASE ( self , _a , _a ):
if item not in self.set:
heapq.heappush(self.elements , (priority, item) )
self.set.add(_a )
else:
# update
# print("update", item)
__magic_name__ : Dict = []
((__magic_name__) , (__magic_name__)) : Tuple = heapq.heappop(self.elements )
while x != item:
temp.append((pri, x) )
((__magic_name__) , (__magic_name__)) : Optional[Any] = heapq.heappop(self.elements )
temp.append((priority, item) )
for pro, xxx in temp:
heapq.heappush(self.elements , (pro, xxx) )
def SCREAMING_SNAKE_CASE ( self , _a ):
if item in self.set:
self.set.remove(_a )
__magic_name__ : int = []
((__magic_name__) , (__magic_name__)) : int = heapq.heappop(self.elements )
while x != item:
temp.append((pro, x) )
((__magic_name__) , (__magic_name__)) : Dict = heapq.heappop(self.elements )
for prito, yyy in temp:
heapq.heappush(self.elements , (prito, yyy) )
def SCREAMING_SNAKE_CASE ( self ):
return self.elements[0][1]
def SCREAMING_SNAKE_CASE ( self ):
((__magic_name__) , (__magic_name__)) : Union[str, Any] = heapq.heappop(self.elements )
self.set.remove(_a )
return (priority, item)
def lowerCAmelCase_ ( _snake_case : TPos , _snake_case : TPos ) -> Any:
'''simple docstring'''
__magic_name__ : int = np.array(_snake_case )
__magic_name__ : Any = np.array(_snake_case )
return np.linalg.norm(a - b )
def lowerCAmelCase_ ( _snake_case : TPos , _snake_case : TPos ) -> str:
'''simple docstring'''
return consistent_heuristic(_snake_case , _snake_case ) // t
def lowerCAmelCase_ ( _snake_case : TPos , _snake_case : TPos ) -> Optional[int]:
'''simple docstring'''
return abs(p[0] - goal[0] ) + abs(p[1] - goal[1] )
def lowerCAmelCase_ ( _snake_case : TPos , _snake_case : int , _snake_case : TPos , _snake_case : dict[TPos, float] ) -> int:
'''simple docstring'''
__magic_name__ : Tuple = g_function[start] + Wa * heuristics[i](_snake_case , _snake_case )
return ans
def lowerCAmelCase_ ( _snake_case : str , _snake_case : List[Any] , _snake_case : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
__magic_name__ : Any = np.chararray((n, n) )
for i in range(_snake_case ):
for j in range(_snake_case ):
__magic_name__ : List[str] = "*"
for i in range(_snake_case ):
for j in range(_snake_case ):
if (j, (n - 1) - i) in blocks:
__magic_name__ : str = "#"
__magic_name__ : int = "-"
__magic_name__ : List[Any] = back_pointer[goal]
while x != start:
((__magic_name__) , (__magic_name__)) : Tuple = x
# print(x)
__magic_name__ : List[Any] = "-"
__magic_name__ : Dict = back_pointer[x]
__magic_name__ : Optional[Any] = "-"
for i in range(_snake_case ):
for j in range(_snake_case ):
if (i, j) == (0, n - 1):
print(grid[i][j] , end=" " )
print("<-- End position" , end=" " )
else:
print(grid[i][j] , end=" " )
print()
print("^" )
print("Start position" )
print()
print("# is an obstacle" )
print("- is the path taken by algorithm" )
print("PATH TAKEN BY THE ALGORITHM IS:-" )
__magic_name__ : List[str] = back_pointer[goal]
while x != start:
print(_snake_case , end=" " )
__magic_name__ : Optional[int] = back_pointer[x]
print(_snake_case )
sys.exit()
def lowerCAmelCase_ ( _snake_case : TPos ) -> Optional[int]:
'''simple docstring'''
if p[0] < 0 or p[0] > n - 1:
return False
if p[1] < 0 or p[1] > n - 1:
return False
return True
def lowerCAmelCase_ ( _snake_case : Any , _snake_case : Optional[Any] , _snake_case : List[str] , _snake_case : Dict , _snake_case : Dict , _snake_case : Optional[int] , _snake_case : Any , _snake_case : str , ) -> List[str]:
'''simple docstring'''
for itera in range(_snake_case ):
open_list[itera].remove_element(_snake_case )
# print("s", s)
# print("j", j)
((__magic_name__) , (__magic_name__)) : List[str] = s
__magic_name__ : Optional[int] = (x - 1, y)
__magic_name__ : Tuple = (x + 1, y)
__magic_name__ : List[Any] = (x, y + 1)
__magic_name__ : Optional[Any] = (x, y - 1)
for neighbours in [left, right, up, down]:
if neighbours not in blocks:
if valid(_snake_case ) and neighbours not in visited:
# print("neighbour", neighbours)
visited.add(_snake_case )
__magic_name__ : Any = -1
__magic_name__ : Tuple = float("inf" )
if valid(_snake_case ) and g_function[neighbours] > g_function[s] + 1:
__magic_name__ : List[Any] = g_function[s] + 1
__magic_name__ : Optional[int] = s
if neighbours not in close_list_anchor:
open_list[0].put(_snake_case , key(_snake_case , 0 , _snake_case , _snake_case ) )
if neighbours not in close_list_inad:
for var in range(1 , _snake_case ):
if key(_snake_case , _snake_case , _snake_case , _snake_case ) <= Wa * key(
_snake_case , 0 , _snake_case , _snake_case ):
open_list[j].put(
_snake_case , key(_snake_case , _snake_case , _snake_case , _snake_case ) )
def lowerCAmelCase_ ( ) -> Tuple:
'''simple docstring'''
__magic_name__ : Union[str, Any] = []
for x in range(1 , 5 ):
for y in range(1 , 6 ):
some_list.append((x, y) )
for x in range(15 , 20 ):
some_list.append((x, 17) )
for x in range(10 , 19 ):
for y in range(1 , 15 ):
some_list.append((x, y) )
# L block
for x in range(1 , 4 ):
for y in range(12 , 19 ):
some_list.append((x, y) )
for x in range(3 , 13 ):
for y in range(16 , 19 ):
some_list.append((x, y) )
return some_list
snake_case : Union[str, Any] = {0: consistent_heuristic, 1: heuristic_a, 2: heuristic_a}
snake_case : str = [
(0, 1),
(1, 1),
(2, 1),
(3, 1),
(4, 1),
(5, 1),
(6, 1),
(7, 1),
(8, 1),
(9, 1),
(10, 1),
(11, 1),
(12, 1),
(13, 1),
(14, 1),
(15, 1),
(16, 1),
(17, 1),
(18, 1),
(19, 1),
]
snake_case : str = make_common_ground()
snake_case : Dict = blocks_blk
# hyper parameters
snake_case : Union[str, Any] = 1
snake_case : Optional[int] = 1
snake_case : Dict = 20
snake_case : Optional[int] = 3 # one consistent and two other inconsistent
# start and end destination
snake_case : Optional[Any] = (0, 0)
snake_case : Any = (n - 1, n - 1)
snake_case : Any = 1
def lowerCAmelCase_ ( _snake_case : TPos , _snake_case : TPos , _snake_case : int ) -> Union[str, Any]:
'''simple docstring'''
__magic_name__ : List[str] = {start: 0, goal: float("inf" )}
__magic_name__ : Optional[int] = {start: -1, goal: -1}
__magic_name__ : Union[str, Any] = []
__magic_name__ : Tuple = set()
for i in range(_snake_case ):
open_list.append(PriorityQueue() )
open_list[i].put(_snake_case , key(_snake_case , _snake_case , _snake_case , _snake_case ) )
__magic_name__ : list[int] = []
__magic_name__ : list[int] = []
while open_list[0].minkey() < float("inf" ):
for i in range(1 , _snake_case ):
# print(open_list[0].minkey(), open_list[i].minkey())
if open_list[i].minkey() <= Wa * open_list[0].minkey():
global t
t += 1
if g_function[goal] <= open_list[i].minkey():
if g_function[goal] < float("inf" ):
do_something(_snake_case , _snake_case , _snake_case )
else:
__magic_name__ , __magic_name__ : Optional[Any] = open_list[i].top_show()
visited.add(_snake_case )
expand_state(
_snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , )
close_list_inad.append(_snake_case )
else:
if g_function[goal] <= open_list[0].minkey():
if g_function[goal] < float("inf" ):
do_something(_snake_case , _snake_case , _snake_case )
else:
__magic_name__ : Tuple = open_list[0].top_show()
visited.add(_snake_case )
expand_state(
_snake_case , 0 , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , )
close_list_anchor.append(_snake_case )
print("No path found to goal" )
print()
for i in range(n - 1 , -1 , -1 ):
for j in range(_snake_case ):
if (j, i) in blocks:
print("#" , end=" " )
elif (j, i) in back_pointer:
if (j, i) == (n - 1, n - 1):
print("*" , end=" " )
else:
print("-" , end=" " )
else:
print("*" , end=" " )
if (j, i) == (n - 1, n - 1):
print("<-- End position" , end=" " )
print()
print("^" )
print("Start position" )
print()
print("# is an obstacle" )
print("- is the path taken by algorithm" )
if __name__ == "__main__":
multi_a_star(start, goal, n_heuristic)
| 124 |
import argparse
from pathlib import Path
import fairseq
import torch
from fairseq.models.xmod import XMODModel as FairseqXmodModel
from packaging import version
from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse("0.12.2"):
raise Exception("requires fairseq >= 0.12.2")
if version.parse(fairseq.__version__) > version.parse("2"):
raise Exception("requires fairseq < v2")
logging.set_verbosity_info()
snake_case : Any = logging.get_logger(__name__)
snake_case : Optional[int] = "Hello, World!"
snake_case : Optional[int] = "en_XX"
def lowerCAmelCase_ ( _snake_case : str , _snake_case : str , _snake_case : bool ) -> Dict:
'''simple docstring'''
__magic_name__ : List[str] = Path("data_bin" )
__magic_name__ : List[Any] = FairseqXmodModel.from_pretrained(
model_name_or_path=str(Path(_snake_case ).parent ) , checkpoint_file=Path(_snake_case ).name , _name="xmod_base" , arch="xmod_base" , task="multilingual_masked_lm" , data_name_or_path=str(_snake_case ) , bpe="sentencepiece" , sentencepiece_model=str(Path(_snake_case ).parent / "sentencepiece.bpe.model" ) , src_dict=str(data_dir / "dict.txt" ) , )
xmod.eval() # disable dropout
print(_snake_case )
__magic_name__ : Optional[int] = xmod.model.encoder.sentence_encoder
__magic_name__ : int = XmodConfig(
vocab_size=xmod_sent_encoder.embed_tokens.num_embeddings , hidden_size=xmod.cfg.model.encoder_embed_dim , num_hidden_layers=xmod.cfg.model.encoder_layers , num_attention_heads=xmod.cfg.model.encoder_attention_heads , intermediate_size=xmod.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=514 , type_vocab_size=1 , layer_norm_eps=1E-5 , pre_norm=xmod.cfg.model.encoder_normalize_before , adapter_reduction_factor=getattr(xmod.cfg.model , "bottleneck" , 2 ) , adapter_layer_norm=xmod.cfg.model.adapter_layer_norm , adapter_reuse_layer_norm=xmod.cfg.model.adapter_reuse_layer_norm , ln_before_adapter=xmod.cfg.model.ln_before_adapter , languages=xmod.cfg.model.languages , )
if classification_head:
__magic_name__ : List[Any] = xmod.model.classification_heads["mnli"].out_proj.weight.shape[0]
print("Our X-MOD config:" , _snake_case )
__magic_name__ : str = XmodForSequenceClassification(_snake_case ) if classification_head else XmodForMaskedLM(_snake_case )
model.eval()
# Now let's copy all the weights.
# Embeddings
__magic_name__ : str = xmod_sent_encoder.embed_tokens.weight
__magic_name__ : List[Any] = xmod_sent_encoder.embed_positions.weight
__magic_name__ : List[Any] = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c xmod doesn't use them.
__magic_name__ : Any = xmod_sent_encoder.layernorm_embedding.weight
__magic_name__ : List[Any] = xmod_sent_encoder.layernorm_embedding.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
__magic_name__ : str = model.roberta.encoder.layer[i]
__magic_name__ : Tuple = xmod_sent_encoder.layers[i]
# self attention
__magic_name__ : List[str] = layer.attention.self
if not (
xmod_layer.self_attn.k_proj.weight.data.shape
== xmod_layer.self_attn.q_proj.weight.data.shape
== xmod_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
):
raise AssertionError("Dimensions of self-attention weights do not match." )
__magic_name__ : List[Any] = xmod_layer.self_attn.q_proj.weight
__magic_name__ : Union[str, Any] = xmod_layer.self_attn.q_proj.bias
__magic_name__ : Optional[int] = xmod_layer.self_attn.k_proj.weight
__magic_name__ : Any = xmod_layer.self_attn.k_proj.bias
__magic_name__ : Dict = xmod_layer.self_attn.v_proj.weight
__magic_name__ : List[Any] = xmod_layer.self_attn.v_proj.bias
# self-attention output
__magic_name__ : Optional[int] = layer.attention.output
if self_output.dense.weight.shape != xmod_layer.self_attn.out_proj.weight.shape:
raise AssertionError("Dimensions of self-attention output weights do not match." )
__magic_name__ : Optional[Any] = xmod_layer.self_attn.out_proj.weight
__magic_name__ : List[Any] = xmod_layer.self_attn.out_proj.bias
__magic_name__ : Tuple = xmod_layer.self_attn_layer_norm.weight
__magic_name__ : Union[str, Any] = xmod_layer.self_attn_layer_norm.bias
# intermediate
__magic_name__ : Any = layer.intermediate
if intermediate.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError("Dimensions of intermediate weights do not match." )
__magic_name__ : List[Any] = xmod_layer.fca.weight
__magic_name__ : List[str] = xmod_layer.fca.bias
# output
__magic_name__ : List[Any] = layer.output
if bert_output.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError("Dimensions of feed-forward weights do not match." )
__magic_name__ : Optional[Any] = xmod_layer.fca.weight
__magic_name__ : List[str] = xmod_layer.fca.bias
__magic_name__ : Optional[int] = xmod_layer.final_layer_norm.weight
__magic_name__ : Union[str, Any] = xmod_layer.final_layer_norm.bias
if bert_output.adapter_layer_norm is not None:
__magic_name__ : Dict = xmod_layer.adapter_layer_norm.weight
__magic_name__ : Dict = xmod_layer.adapter_layer_norm.bias
if sorted(bert_output.adapter_modules.keys() ) != sorted(xmod_layer.adapter_modules.keys() ):
raise AssertionError("Lists of language adapters do not match." )
for lang_code, adapter in xmod_layer.adapter_modules.items():
__magic_name__ : Optional[int] = bert_output.adapter_modules[lang_code]
__magic_name__ : Union[str, Any] = xmod_layer.adapter_modules[lang_code]
__magic_name__ : Any = from_adapter.fca.weight
__magic_name__ : str = from_adapter.fca.bias
__magic_name__ : Optional[int] = from_adapter.fca.weight
__magic_name__ : Optional[Any] = from_adapter.fca.bias
# end of layer
if xmod_sent_encoder.layer_norm is not None:
__magic_name__ : List[Any] = xmod_sent_encoder.layer_norm.weight
__magic_name__ : Any = xmod_sent_encoder.layer_norm.bias
if classification_head:
__magic_name__ : Optional[int] = xmod.model.classification_heads["mnli"].dense.weight
__magic_name__ : List[Any] = xmod.model.classification_heads["mnli"].dense.bias
__magic_name__ : Union[str, Any] = xmod.model.classification_heads["mnli"].out_proj.weight
__magic_name__ : Optional[int] = xmod.model.classification_heads["mnli"].out_proj.bias
else:
# LM Head
__magic_name__ : List[Any] = xmod.model.encoder.lm_head.dense.weight
__magic_name__ : Dict = xmod.model.encoder.lm_head.dense.bias
__magic_name__ : int = xmod.model.encoder.lm_head.layer_norm.weight
__magic_name__ : int = xmod.model.encoder.lm_head.layer_norm.bias
__magic_name__ : List[Any] = xmod.model.encoder.lm_head.weight
__magic_name__ : List[str] = xmod.model.encoder.lm_head.bias
# Let's check that we get the same results.
__magic_name__ : Optional[int] = xmod.encode(_snake_case ).unsqueeze(0 ) # batch of size 1
model.roberta.set_default_language(_snake_case )
__magic_name__ : Tuple = model(_snake_case )[0]
if classification_head:
__magic_name__ : Optional[Any] = xmod.model.classification_heads["mnli"](xmod.extract_features(_snake_case ) )
else:
__magic_name__ : List[Any] = xmod.model(_snake_case , lang_id=[SAMPLE_LANGUAGE] )[0]
print(our_output.shape , their_output.shape )
__magic_name__ : Union[str, Any] = torch.max(torch.abs(our_output - their_output ) ).item()
print(F'''max_absolute_diff = {max_absolute_diff}''' ) # ~ 1e-7
__magic_name__ : List[str] = torch.allclose(_snake_case , _snake_case , atol=1E-3 )
print("Do both models output the same tensors?" , "🔥" if success else "💩" )
if not success:
raise Exception("Something went wRoNg" )
Path(_snake_case ).mkdir(parents=_snake_case , exist_ok=_snake_case )
print(F'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(_snake_case )
if __name__ == "__main__":
snake_case : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--xmod_checkpoint_path", default=None, type=str, required=True, help="Path the official PyTorch dump."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--classification_head", action="store_true", help="Whether to convert a final classification head."
)
snake_case : Tuple = parser.parse_args()
convert_xmod_checkpoint_to_pytorch(
args.xmod_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 124 | 1 |
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline
from diffusers.utils import floats_tensor, nightly, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
class __a (unittest.TestCase ):
def UpperCAmelCase__ ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def UpperCAmelCase__ ( self : Optional[int] ) -> int:
"""simple docstring"""
UpperCAmelCase_ : int = 1
UpperCAmelCase_ : Union[str, Any] = 3
UpperCAmelCase_ : Union[str, Any] = (32, 32)
UpperCAmelCase_ : List[str] = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(__magic_name__ )
return image
@property
def UpperCAmelCase__ ( self : Tuple ) -> List[Any]:
"""simple docstring"""
torch.manual_seed(0 )
UpperCAmelCase_ : List[Any] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
return model
@property
def UpperCAmelCase__ ( self : Optional[Any] ) -> str:
"""simple docstring"""
torch.manual_seed(0 )
UpperCAmelCase_ : List[Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
return model
@property
def UpperCAmelCase__ ( self : List[str] ) -> Dict:
"""simple docstring"""
torch.manual_seed(0 )
UpperCAmelCase_ : int = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
return CLIPTextModel(__magic_name__ )
@property
def UpperCAmelCase__ ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
def extract(*__magic_name__ : int , **__magic_name__ : Optional[Any] ):
class __a :
def __init__( self : Any ) -> Any:
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = torch.ones([0] )
def UpperCAmelCase__ ( self : List[str] , __magic_name__ : Tuple ) -> Dict:
"""simple docstring"""
self.pixel_values.to(__magic_name__ )
return self
return Out()
return extract
def UpperCAmelCase__ ( self : str ) -> int:
"""simple docstring"""
UpperCAmelCase_ : List[Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ : Any = self.dummy_cond_unet
UpperCAmelCase_ : Any = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='''scaled_linear''' , clip_sample=__magic_name__ , set_alpha_to_one=__magic_name__ , )
UpperCAmelCase_ : List[str] = self.dummy_vae
UpperCAmelCase_ : Any = self.dummy_text_encoder
UpperCAmelCase_ : str = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
# make sure here that pndm scheduler skips prk
UpperCAmelCase_ : Dict = StableDiffusionPipeline(
unet=__magic_name__ , scheduler=__magic_name__ , vae=__magic_name__ , text_encoder=__magic_name__ , tokenizer=__magic_name__ , safety_checker=__magic_name__ , feature_extractor=self.dummy_extractor , )
UpperCAmelCase_ : str = sd_pipe.to(__magic_name__ )
sd_pipe.set_progress_bar_config(disable=__magic_name__ )
UpperCAmelCase_ : Optional[Any] = '''A painting of a squirrel eating a burger'''
UpperCAmelCase_ : List[str] = torch.Generator(device=__magic_name__ ).manual_seed(0 )
UpperCAmelCase_ : Tuple = sd_pipe([prompt] , generator=__magic_name__ , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' )
UpperCAmelCase_ : List[Any] = output.images
UpperCAmelCase_ : List[str] = torch.Generator(device=__magic_name__ ).manual_seed(0 )
UpperCAmelCase_ : List[Any] = sd_pipe(
[prompt] , generator=__magic_name__ , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' , return_dict=__magic_name__ , )[0]
UpperCAmelCase_ : Optional[Any] = image[0, -3:, -3:, -1]
UpperCAmelCase_ : Optional[int] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase_ : Union[str, Any] = np.array([0.5_7_5_6, 0.6_1_1_8, 0.5_0_0_5, 0.5_0_4_1, 0.5_4_7_1, 0.4_7_2_6, 0.4_9_7_6, 0.4_8_6_5, 0.4_8_6_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCAmelCase__ ( self : Optional[Any] ) -> int:
"""simple docstring"""
UpperCAmelCase_ : Any = '''cpu''' # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ : Union[str, Any] = self.dummy_cond_unet
UpperCAmelCase_ : Union[str, Any] = PNDMScheduler(skip_prk_steps=__magic_name__ )
UpperCAmelCase_ : Tuple = self.dummy_vae
UpperCAmelCase_ : List[Any] = self.dummy_text_encoder
UpperCAmelCase_ : List[Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
# make sure here that pndm scheduler skips prk
UpperCAmelCase_ : int = StableDiffusionPipeline(
unet=__magic_name__ , scheduler=__magic_name__ , vae=__magic_name__ , text_encoder=__magic_name__ , tokenizer=__magic_name__ , safety_checker=__magic_name__ , feature_extractor=self.dummy_extractor , )
UpperCAmelCase_ : Union[str, Any] = sd_pipe.to(__magic_name__ )
sd_pipe.set_progress_bar_config(disable=__magic_name__ )
UpperCAmelCase_ : str = '''A painting of a squirrel eating a burger'''
UpperCAmelCase_ : str = torch.Generator(device=__magic_name__ ).manual_seed(0 )
UpperCAmelCase_ : List[str] = sd_pipe([prompt] , generator=__magic_name__ , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' )
UpperCAmelCase_ : int = output.images
UpperCAmelCase_ : str = torch.Generator(device=__magic_name__ ).manual_seed(0 )
UpperCAmelCase_ : Optional[Any] = sd_pipe(
[prompt] , generator=__magic_name__ , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' , return_dict=__magic_name__ , )[0]
UpperCAmelCase_ : Optional[int] = image[0, -3:, -3:, -1]
UpperCAmelCase_ : List[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase_ : str = np.array([0.5_1_2_5, 0.5_7_1_6, 0.4_8_2_8, 0.5_0_6_0, 0.5_6_5_0, 0.4_7_6_8, 0.5_1_8_5, 0.4_8_9_5, 0.4_9_9_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCAmelCase__ ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase_ : int = StableDiffusionPipeline.from_pretrained(
'''hf-internal-testing/tiny-stable-diffusion-lms-pipe''' , safety_checker=__magic_name__ )
assert isinstance(__magic_name__ , __magic_name__ )
assert isinstance(pipe.scheduler , __magic_name__ )
assert pipe.safety_checker is None
UpperCAmelCase_ : Tuple = pipe('''example prompt''' , num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(__magic_name__ )
UpperCAmelCase_ : Dict = StableDiffusionPipeline.from_pretrained(__magic_name__ )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
UpperCAmelCase_ : int = pipe('''example prompt''' , num_inference_steps=2 ).images[0]
assert image is not None
@unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' )
def UpperCAmelCase__ ( self : List[Any] ) -> int:
"""simple docstring"""
UpperCAmelCase_ : str = self.dummy_cond_unet
UpperCAmelCase_ : Optional[int] = PNDMScheduler(skip_prk_steps=__magic_name__ )
UpperCAmelCase_ : Optional[int] = self.dummy_vae
UpperCAmelCase_ : List[str] = self.dummy_text_encoder
UpperCAmelCase_ : Optional[Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
# put models in fp16
UpperCAmelCase_ : Tuple = unet.half()
UpperCAmelCase_ : List[str] = vae.half()
UpperCAmelCase_ : Union[str, Any] = bert.half()
# make sure here that pndm scheduler skips prk
UpperCAmelCase_ : Union[str, Any] = StableDiffusionPipeline(
unet=__magic_name__ , scheduler=__magic_name__ , vae=__magic_name__ , text_encoder=__magic_name__ , tokenizer=__magic_name__ , safety_checker=__magic_name__ , feature_extractor=self.dummy_extractor , )
UpperCAmelCase_ : Optional[Any] = sd_pipe.to(__magic_name__ )
sd_pipe.set_progress_bar_config(disable=__magic_name__ )
UpperCAmelCase_ : Union[str, Any] = '''A painting of a squirrel eating a burger'''
UpperCAmelCase_ : Any = sd_pipe([prompt] , num_inference_steps=2 , output_type='''np''' ).images
assert image.shape == (1, 64, 64, 3)
@nightly
@require_torch_gpu
class __a (unittest.TestCase ):
def UpperCAmelCase__ ( self : Dict ) -> str:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = StableDiffusionPipeline.from_pretrained('''runwayml/stable-diffusion-v1-5''' , safety_checker=__magic_name__ )
UpperCAmelCase_ : Any = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
UpperCAmelCase_ : Optional[int] = sd_pipe.to(__magic_name__ )
sd_pipe.set_progress_bar_config(disable=__magic_name__ )
UpperCAmelCase_ : Tuple = (
'''portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle'''
''' coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with'''
''' anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and'''
''' children from bahnhof zoo, detailed '''
)
UpperCAmelCase_ : Dict = 40_03_66_03_46
UpperCAmelCase_ : Optional[int] = 7
# without safety guidance (sld_guidance_scale = 0)
UpperCAmelCase_ : Union[str, Any] = torch.manual_seed(__magic_name__ )
UpperCAmelCase_ : Dict = sd_pipe(
[prompt] , generator=__magic_name__ , guidance_scale=__magic_name__ , num_inference_steps=50 , output_type='''np''' , width=5_12 , height=5_12 , sld_guidance_scale=0 , )
UpperCAmelCase_ : Optional[int] = output.images
UpperCAmelCase_ : Union[str, Any] = image[0, -3:, -3:, -1]
UpperCAmelCase_ : Dict = [0.2_2_7_8, 0.2_2_3_1, 0.2_2_4_9, 0.2_3_3_3, 0.2_3_0_3, 0.1_8_8_5, 0.2_2_7_3, 0.2_1_4_4, 0.2_1_7_6]
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
# without safety guidance (strong configuration)
UpperCAmelCase_ : Union[str, Any] = torch.manual_seed(__magic_name__ )
UpperCAmelCase_ : Any = sd_pipe(
[prompt] , generator=__magic_name__ , guidance_scale=__magic_name__ , num_inference_steps=50 , output_type='''np''' , width=5_12 , height=5_12 , sld_guidance_scale=20_00 , sld_warmup_steps=7 , sld_threshold=0.0_2_5 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
UpperCAmelCase_ : str = output.images
UpperCAmelCase_ : Optional[int] = image[0, -3:, -3:, -1]
UpperCAmelCase_ : List[Any] = [0.2_3_8_3, 0.2_2_7_6, 0.2_3_6, 0.2_1_9_2, 0.2_1_8_6, 0.2_0_5_3, 0.1_9_7_1, 0.1_9_0_1, 0.1_7_1_9]
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCAmelCase__ ( self : int ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = StableDiffusionPipeline.from_pretrained('''runwayml/stable-diffusion-v1-5''' , safety_checker=__magic_name__ )
UpperCAmelCase_ : Tuple = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
UpperCAmelCase_ : List[Any] = sd_pipe.to(__magic_name__ )
sd_pipe.set_progress_bar_config(disable=__magic_name__ )
UpperCAmelCase_ : Any = '''padme amidala taking a bath artwork, safe for work, no nudity'''
UpperCAmelCase_ : Union[str, Any] = 27_34_97_17_55
UpperCAmelCase_ : str = 7
UpperCAmelCase_ : List[str] = torch.manual_seed(__magic_name__ )
UpperCAmelCase_ : Tuple = sd_pipe(
[prompt] , generator=__magic_name__ , guidance_scale=__magic_name__ , num_inference_steps=50 , output_type='''np''' , width=5_12 , height=5_12 , sld_guidance_scale=0 , )
UpperCAmelCase_ : str = output.images
UpperCAmelCase_ : int = image[0, -3:, -3:, -1]
UpperCAmelCase_ : Dict = [0.3_5_0_2, 0.3_6_2_2, 0.3_3_9_6, 0.3_6_4_2, 0.3_4_7_8, 0.3_3_1_8, 0.3_5, 0.3_3_4_8, 0.3_2_9_7]
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
UpperCAmelCase_ : Optional[Any] = torch.manual_seed(__magic_name__ )
UpperCAmelCase_ : int = sd_pipe(
[prompt] , generator=__magic_name__ , guidance_scale=__magic_name__ , num_inference_steps=50 , output_type='''np''' , width=5_12 , height=5_12 , sld_guidance_scale=20_00 , sld_warmup_steps=7 , sld_threshold=0.0_2_5 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
UpperCAmelCase_ : Optional[int] = output.images
UpperCAmelCase_ : Union[str, Any] = image[0, -3:, -3:, -1]
UpperCAmelCase_ : Optional[Any] = [0.5_5_3_1, 0.5_2_0_6, 0.4_8_9_5, 0.5_1_5_6, 0.5_1_8_2, 0.4_7_5_1, 0.4_8_0_2, 0.4_8_0_3, 0.4_4_4_3]
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCAmelCase__ ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ : Tuple = StableDiffusionPipeline.from_pretrained('''runwayml/stable-diffusion-v1-5''' )
UpperCAmelCase_ : Optional[Any] = sd_pipe.to(__magic_name__ )
sd_pipe.set_progress_bar_config(disable=__magic_name__ )
UpperCAmelCase_ : str = (
'''the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c.'''
''' leyendecker'''
)
UpperCAmelCase_ : Any = 10_44_35_52_34
UpperCAmelCase_ : Dict = 12
UpperCAmelCase_ : List[str] = torch.manual_seed(__magic_name__ )
UpperCAmelCase_ : int = sd_pipe(
[prompt] , generator=__magic_name__ , guidance_scale=__magic_name__ , num_inference_steps=50 , output_type='''np''' , width=5_12 , height=5_12 , sld_guidance_scale=0 , )
UpperCAmelCase_ : Union[str, Any] = output.images
UpperCAmelCase_ : Optional[int] = image[0, -3:, -3:, -1]
UpperCAmelCase_ : List[Any] = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] )
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-7
UpperCAmelCase_ : Union[str, Any] = torch.manual_seed(__magic_name__ )
UpperCAmelCase_ : Any = sd_pipe(
[prompt] , generator=__magic_name__ , guidance_scale=__magic_name__ , num_inference_steps=50 , output_type='''np''' , width=5_12 , height=5_12 , sld_guidance_scale=20_00 , sld_warmup_steps=7 , sld_threshold=0.0_2_5 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
UpperCAmelCase_ : Tuple = output.images
UpperCAmelCase_ : Any = image[0, -3:, -3:, -1]
UpperCAmelCase_ : Optional[int] = np.array([0.5_8_1_8, 0.6_2_8_5, 0.6_8_3_5, 0.6_0_1_9, 0.6_2_5, 0.6_7_5_4, 0.6_0_9_6, 0.6_3_3_4, 0.6_5_6_1] )
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 704 |
'''simple docstring'''
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : list[int] ) -> list[list[int]]:
UpperCAmelCase_ : int = []
if len(SCREAMING_SNAKE_CASE__ ) == 1:
return [nums.copy()]
for _ in range(len(SCREAMING_SNAKE_CASE__ ) ):
UpperCAmelCase_ : List[Any] = nums.pop(0 )
UpperCAmelCase_ : Optional[Any] = permute(SCREAMING_SNAKE_CASE__ )
for perm in permutations:
perm.append(SCREAMING_SNAKE_CASE__ )
result.extend(SCREAMING_SNAKE_CASE__ )
nums.append(SCREAMING_SNAKE_CASE__ )
return result
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : List[str] ) -> Any:
def backtrack(SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
if start == len(SCREAMING_SNAKE_CASE__ ) - 1:
output.append(nums[:] )
else:
for i in range(SCREAMING_SNAKE_CASE__, len(SCREAMING_SNAKE_CASE__ ) ):
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = nums[i], nums[start]
backtrack(start + 1 )
UpperCAmelCase_ , UpperCAmelCase_ : int = nums[i], nums[start] # backtrack
UpperCAmelCase_ : Optional[int] = []
backtrack(0 )
return output
if __name__ == "__main__":
import doctest
# use res to print the data in permute2 function
snake_case_ : Tuple = permutea([1, 2, 3])
print(res)
doctest.testmod()
| 644 | 0 |
import math
from typing import Any, Callable, List, Optional, Tuple, Union
import numpy as np
import torch
from ...models import TaFilmDecoder
from ...schedulers import DDPMScheduler
from ...utils import is_onnx_available, logging, randn_tensor
if is_onnx_available():
from ..onnx_utils import OnnxRuntimeModel
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
from .continous_encoder import SpectrogramContEncoder
from .notes_encoder import SpectrogramNotesEncoder
_lowerCamelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
_lowerCamelCase = 256
class UpperCamelCase_ ( UpperCamelCase__ ):
lowerCamelCase_ = ["melgan"]
def __init__( self :List[Any] , __A :SpectrogramNotesEncoder , __A :SpectrogramContEncoder , __A :TaFilmDecoder , __A :DDPMScheduler , __A :OnnxRuntimeModel if is_onnx_available() else Any , ) -> None:
"""simple docstring"""
super().__init__()
# From MELGAN
SCREAMING_SNAKE_CASE__ = math.log(1E-5 ) # Matches MelGAN training.
SCREAMING_SNAKE_CASE__ = 4.0 # Largest value for most examples
SCREAMING_SNAKE_CASE__ = 128
self.register_modules(
notes_encoder=__A , continuous_encoder=__A , decoder=__A , scheduler=__A , melgan=__A , )
def _snake_case ( self :str , __A :List[Any] , __A :Optional[int]=(-1.0, 1.0) , __A :Optional[Any]=False ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = output_range
if clip:
SCREAMING_SNAKE_CASE__ = torch.clip(__A , self.min_value , self.max_value )
# Scale to [0, 1].
SCREAMING_SNAKE_CASE__ = (features - self.min_value) / (self.max_value - self.min_value)
# Scale to [min_out, max_out].
return zero_one * (max_out - min_out) + min_out
def _snake_case ( self :Dict , __A :Tuple , __A :str=(-1.0, 1.0) , __A :List[str]=False ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = input_range
SCREAMING_SNAKE_CASE__ = torch.clip(__A , __A , __A ) if clip else outputs
# Scale to [0, 1].
SCREAMING_SNAKE_CASE__ = (outputs - min_out) / (max_out - min_out)
# Scale to [self.min_value, self.max_value].
return zero_one * (self.max_value - self.min_value) + self.min_value
def _snake_case ( self :Union[str, Any] , __A :Any , __A :List[Any] , __A :str ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = input_tokens > 0
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.notes_encoder(
encoder_input_tokens=__A , encoder_inputs_mask=__A )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.continuous_encoder(
encoder_inputs=__A , encoder_inputs_mask=__A )
return [(tokens_encoded, tokens_mask), (continuous_encoded, continuous_mask)]
def _snake_case ( self :Any , __A :int , __A :str , __A :Dict ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = noise_time
if not torch.is_tensor(__A ):
SCREAMING_SNAKE_CASE__ = torch.tensor([timesteps] , dtype=torch.long , device=input_tokens.device )
elif torch.is_tensor(__A ) and len(timesteps.shape ) == 0:
SCREAMING_SNAKE_CASE__ = timesteps[None].to(input_tokens.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
SCREAMING_SNAKE_CASE__ = timesteps * torch.ones(input_tokens.shape[0] , dtype=timesteps.dtype , device=timesteps.device )
SCREAMING_SNAKE_CASE__ = self.decoder(
encodings_and_masks=__A , decoder_input_tokens=__A , decoder_noise_time=__A )
return logits
@torch.no_grad()
def __call__( self :Dict , __A :List[List[int]] , __A :Optional[torch.Generator] = None , __A :int = 100 , __A :bool = True , __A :str = "numpy" , __A :Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __A :int = 1 , ) -> Union[AudioPipelineOutput, Tuple]:
"""simple docstring"""
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(__A , __A ) or callback_steps <= 0)
):
raise ValueError(
f'''`callback_steps` has to be a positive integer but is {callback_steps} of type'''
f''' {type(__A )}.''' )
SCREAMING_SNAKE_CASE__ = np.zeros([1, TARGET_FEATURE_LENGTH, self.n_dims] , dtype=np.floataa )
SCREAMING_SNAKE_CASE__ = np.zeros([1, 0, self.n_dims] , np.floataa )
SCREAMING_SNAKE_CASE__ = torch.ones((1, TARGET_FEATURE_LENGTH) , dtype=__A , device=self.device )
for i, encoder_input_tokens in enumerate(__A ):
if i == 0:
SCREAMING_SNAKE_CASE__ = torch.from_numpy(pred_mel[:1].copy() ).to(
device=self.device , dtype=self.decoder.dtype )
# The first chunk has no previous context.
SCREAMING_SNAKE_CASE__ = torch.zeros((1, TARGET_FEATURE_LENGTH) , dtype=__A , device=self.device )
else:
# The full song pipeline does not feed in a context feature, so the mask
# will be all 0s after the feature converter. Because we know we're
# feeding in a full context chunk from the previous prediction, set it
# to all 1s.
SCREAMING_SNAKE_CASE__ = ones
SCREAMING_SNAKE_CASE__ = self.scale_features(
__A , output_range=[-1.0, 1.0] , clip=__A )
SCREAMING_SNAKE_CASE__ = self.encode(
input_tokens=torch.IntTensor([encoder_input_tokens] ).to(device=self.device ) , continuous_inputs=__A , continuous_mask=__A , )
# Sample encoder_continuous_inputs shaped gaussian noise to begin loop
SCREAMING_SNAKE_CASE__ = randn_tensor(
shape=encoder_continuous_inputs.shape , generator=__A , device=self.device , dtype=self.decoder.dtype , )
# set step values
self.scheduler.set_timesteps(__A )
# Denoising diffusion loop
for j, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
SCREAMING_SNAKE_CASE__ = self.decode(
encodings_and_masks=__A , input_tokens=__A , noise_time=t / self.scheduler.config.num_train_timesteps , )
# Compute previous output: x_t -> x_t-1
SCREAMING_SNAKE_CASE__ = self.scheduler.step(__A , __A , __A , generator=__A ).prev_sample
SCREAMING_SNAKE_CASE__ = self.scale_to_features(__A , input_range=[-1.0, 1.0] )
SCREAMING_SNAKE_CASE__ = mel[:1]
SCREAMING_SNAKE_CASE__ = mel.cpu().float().numpy()
SCREAMING_SNAKE_CASE__ = np.concatenate([full_pred_mel, pred_mel[:1]] , axis=1 )
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(__A , __A )
logger.info("""Generated segment""" , __A )
if output_type == "numpy" and not is_onnx_available():
raise ValueError(
"""Cannot return output in 'np' format if ONNX is not available. Make sure to have ONNX installed or set 'output_type' to 'mel'.""" )
elif output_type == "numpy" and self.melgan is None:
raise ValueError(
"""Cannot return output in 'np' format if melgan component is not defined. Make sure to define `self.melgan` or set 'output_type' to 'mel'.""" )
if output_type == "numpy":
SCREAMING_SNAKE_CASE__ = self.melgan(input_features=full_pred_mel.astype(np.floataa ) )
else:
SCREAMING_SNAKE_CASE__ = full_pred_mel
if not return_dict:
return (output,)
return AudioPipelineOutput(audios=__A ) | 6 |
_SCREAMING_SNAKE_CASE : Dict = [
999,
800,
799,
600,
599,
500,
400,
399,
377,
355,
333,
311,
288,
266,
244,
222,
200,
199,
177,
155,
133,
111,
88,
66,
44,
22,
0,
]
_SCREAMING_SNAKE_CASE : int = [
999,
976,
952,
928,
905,
882,
858,
857,
810,
762,
715,
714,
572,
429,
428,
286,
285,
238,
190,
143,
142,
118,
95,
71,
47,
24,
0,
]
_SCREAMING_SNAKE_CASE : Optional[Any] = [
999,
988,
977,
966,
955,
944,
933,
922,
911,
900,
899,
879,
859,
840,
820,
800,
799,
766,
733,
700,
699,
650,
600,
599,
500,
499,
400,
399,
350,
300,
299,
266,
233,
200,
199,
179,
159,
140,
120,
100,
99,
88,
77,
66,
55,
44,
33,
22,
11,
0,
]
_SCREAMING_SNAKE_CASE : Union[str, Any] = [
999,
995,
992,
989,
985,
981,
978,
975,
971,
967,
964,
961,
957,
956,
951,
947,
942,
937,
933,
928,
923,
919,
914,
913,
908,
903,
897,
892,
887,
881,
876,
871,
870,
864,
858,
852,
846,
840,
834,
828,
827,
820,
813,
806,
799,
792,
785,
784,
777,
770,
763,
756,
749,
742,
741,
733,
724,
716,
707,
699,
698,
688,
677,
666,
656,
655,
645,
634,
623,
613,
612,
598,
584,
570,
569,
555,
541,
527,
526,
505,
484,
483,
462,
440,
439,
396,
395,
352,
351,
308,
307,
264,
263,
220,
219,
176,
132,
88,
44,
0,
]
_SCREAMING_SNAKE_CASE : Tuple = [
999,
997,
995,
992,
990,
988,
986,
984,
981,
979,
977,
975,
972,
970,
968,
966,
964,
961,
959,
957,
956,
954,
951,
949,
946,
944,
941,
939,
936,
934,
931,
929,
926,
924,
921,
919,
916,
914,
913,
910,
907,
905,
902,
899,
896,
893,
891,
888,
885,
882,
879,
877,
874,
871,
870,
867,
864,
861,
858,
855,
852,
849,
846,
843,
840,
837,
834,
831,
828,
827,
824,
821,
817,
814,
811,
808,
804,
801,
798,
795,
791,
788,
785,
784,
780,
777,
774,
770,
766,
763,
760,
756,
752,
749,
746,
742,
741,
737,
733,
730,
726,
722,
718,
714,
710,
707,
703,
699,
698,
694,
690,
685,
681,
677,
673,
669,
664,
660,
656,
655,
650,
646,
641,
636,
632,
627,
622,
618,
613,
612,
607,
602,
596,
591,
586,
580,
575,
570,
569,
563,
557,
551,
545,
539,
533,
527,
526,
519,
512,
505,
498,
491,
484,
483,
474,
466,
457,
449,
440,
439,
428,
418,
407,
396,
395,
381,
366,
352,
351,
330,
308,
307,
286,
264,
263,
242,
220,
219,
176,
175,
132,
131,
88,
44,
0,
]
_SCREAMING_SNAKE_CASE : Optional[int] = [
999,
991,
982,
974,
966,
958,
950,
941,
933,
925,
916,
908,
900,
899,
874,
850,
825,
800,
799,
700,
600,
500,
400,
300,
200,
100,
0,
]
_SCREAMING_SNAKE_CASE : str = [
999,
992,
985,
978,
971,
964,
957,
949,
942,
935,
928,
921,
914,
907,
900,
899,
879,
859,
840,
820,
800,
799,
766,
733,
700,
699,
650,
600,
599,
500,
499,
400,
399,
300,
299,
200,
199,
100,
99,
0,
]
_SCREAMING_SNAKE_CASE : Optional[int] = [
999,
996,
992,
989,
985,
982,
979,
975,
972,
968,
965,
961,
958,
955,
951,
948,
944,
941,
938,
934,
931,
927,
924,
920,
917,
914,
910,
907,
903,
900,
899,
891,
884,
876,
869,
861,
853,
846,
838,
830,
823,
815,
808,
800,
799,
788,
777,
766,
755,
744,
733,
722,
711,
700,
699,
688,
677,
666,
655,
644,
633,
622,
611,
600,
599,
585,
571,
557,
542,
528,
514,
500,
499,
485,
471,
457,
442,
428,
414,
400,
399,
379,
359,
340,
320,
300,
299,
279,
259,
240,
220,
200,
199,
166,
133,
100,
99,
66,
33,
0,
]
| 493 | 0 |
'''simple docstring'''
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
lowercase = logging.get_logger(__name__)
class __lowerCamelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
snake_case__ : Tuple = ['''pixel_values''']
def __init__( self , a__ = True , a__ = None , a__ = PILImageResampling.BILINEAR , a__ = True , a__ = None , a__ = True , a__ = 1 / 255 , a__ = True , a__ = None , a__ = None , **a__ , ):
super().__init__(**a__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = size if size is not None else {"shortest_edge": 256}
__SCREAMING_SNAKE_CASE : Tuple = get_size_dict(a__ , default_to_square=a__ )
__SCREAMING_SNAKE_CASE : Optional[int] = crop_size if crop_size is not None else {"height": 224, "width": 224}
__SCREAMING_SNAKE_CASE : List[Any] = get_size_dict(a__ , param_name="crop_size" )
__SCREAMING_SNAKE_CASE : int = do_resize
__SCREAMING_SNAKE_CASE : List[str] = size
__SCREAMING_SNAKE_CASE : Optional[Any] = resample
__SCREAMING_SNAKE_CASE : Dict = do_center_crop
__SCREAMING_SNAKE_CASE : Optional[Any] = crop_size
__SCREAMING_SNAKE_CASE : str = do_rescale
__SCREAMING_SNAKE_CASE : int = rescale_factor
__SCREAMING_SNAKE_CASE : int = do_normalize
__SCREAMING_SNAKE_CASE : List[Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__SCREAMING_SNAKE_CASE : List[Any] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def a_ ( self , a__ , a__ , a__ = PILImageResampling.BICUBIC , a__ = None , **a__ , ):
__SCREAMING_SNAKE_CASE : List[str] = get_size_dict(a__ , default_to_square=a__ )
if "shortest_edge" not in size:
raise ValueError(f'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' )
__SCREAMING_SNAKE_CASE : Any = get_resize_output_image_size(a__ , size=size["shortest_edge"] , default_to_square=a__ )
return resize(a__ , size=a__ , resample=a__ , data_format=a__ , **a__ )
def a_ ( self , a__ , a__ , a__ = None , **a__ , ):
__SCREAMING_SNAKE_CASE : int = get_size_dict(a__ )
if "height" not in size or "width" not in size:
raise ValueError(f'The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}' )
return center_crop(a__ , size=(size["height"], size["width"]) , data_format=a__ , **a__ )
def a_ ( self , a__ , a__ , a__ = None , **a__ ):
return rescale(a__ , scale=a__ , data_format=a__ , **a__ )
def a_ ( self , a__ , a__ , a__ , a__ = None , **a__ , ):
return normalize(a__ , mean=a__ , std=a__ , data_format=a__ , **a__ )
def a_ ( self , a__ , a__ = None , a__ = None , a__ = None , a__ = None , a__ = None , a__ = None , a__ = None , a__ = None , a__ = None , a__ = None , a__ = None , a__ = ChannelDimension.FIRST , **a__ , ):
__SCREAMING_SNAKE_CASE : List[str] = do_resize if do_resize is not None else self.do_resize
__SCREAMING_SNAKE_CASE : Union[str, Any] = size if size is not None else self.size
__SCREAMING_SNAKE_CASE : Tuple = get_size_dict(a__ , default_to_square=a__ )
__SCREAMING_SNAKE_CASE : Tuple = resample if resample is not None else self.resample
__SCREAMING_SNAKE_CASE : Optional[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
__SCREAMING_SNAKE_CASE : List[Any] = crop_size if crop_size is not None else self.crop_size
__SCREAMING_SNAKE_CASE : Optional[int] = get_size_dict(a__ , param_name="crop_size" )
__SCREAMING_SNAKE_CASE : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale
__SCREAMING_SNAKE_CASE : List[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
__SCREAMING_SNAKE_CASE : Tuple = do_normalize if do_normalize is not None else self.do_normalize
__SCREAMING_SNAKE_CASE : Tuple = image_mean if image_mean is not None else self.image_mean
__SCREAMING_SNAKE_CASE : List[str] = image_std if image_std is not None else self.image_std
__SCREAMING_SNAKE_CASE : int = make_list_of_images(a__ )
if not valid_images(a__ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
__SCREAMING_SNAKE_CASE : Optional[Any] = [to_numpy_array(a__ ) for image in images]
if do_resize:
__SCREAMING_SNAKE_CASE : List[Any] = [self.resize(image=a__ , size=a__ , resample=a__ ) for image in images]
if do_center_crop:
__SCREAMING_SNAKE_CASE : Dict = [self.center_crop(image=a__ , size=a__ ) for image in images]
if do_rescale:
__SCREAMING_SNAKE_CASE : Dict = [self.rescale(image=a__ , scale=a__ ) for image in images]
if do_normalize:
__SCREAMING_SNAKE_CASE : Dict = [self.normalize(image=a__ , mean=a__ , std=a__ ) for image in images]
__SCREAMING_SNAKE_CASE : List[Any] = [to_channel_dimension_format(a__ , a__ ) for image in images]
__SCREAMING_SNAKE_CASE : Dict = {"pixel_values": images}
return BatchFeature(data=a__ , tensor_type=a__ )
def a_ ( self , a__ , a__ = None ):
__SCREAMING_SNAKE_CASE : Optional[Any] = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(a__ ) != len(a__ ):
raise ValueError(
"Make sure that you pass in as many target sizes as the batch dimension of the logits" )
if is_torch_tensor(a__ ):
__SCREAMING_SNAKE_CASE : Tuple = target_sizes.numpy()
__SCREAMING_SNAKE_CASE : Any = []
for idx in range(len(a__ ) ):
__SCREAMING_SNAKE_CASE : Any = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="bilinear" , align_corners=a__ )
__SCREAMING_SNAKE_CASE : int = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(a__ )
else:
__SCREAMING_SNAKE_CASE : Optional[int] = logits.argmax(dim=1 )
__SCREAMING_SNAKE_CASE : Tuple = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 564 |
'''simple docstring'''
import os
import numpy
import onnx
def __A ( _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : List[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = a.name
__SCREAMING_SNAKE_CASE : List[Any] = b.name
__SCREAMING_SNAKE_CASE : int = ""
__SCREAMING_SNAKE_CASE : str = ""
__SCREAMING_SNAKE_CASE : List[Any] = a == b
__SCREAMING_SNAKE_CASE : Any = name_a
__SCREAMING_SNAKE_CASE : Optional[Any] = name_b
return res
def __A ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Union[str, Any] ):
"""simple docstring"""
for i, input_name in enumerate(node_proto.input ):
if input_name == name:
node_proto.input.insert(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
node_proto.input.pop(i + 1 )
if node_proto.op_type == "If":
_graph_replace_input_with(node_proto.attribute[0].g , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_graph_replace_input_with(node_proto.attribute[1].g , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if node_proto.op_type == "Loop":
_graph_replace_input_with(node_proto.attribute[0].g , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __A ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Optional[Any] ):
"""simple docstring"""
for n in graph_proto.node:
_node_replace_input_with(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __A ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Optional[int] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = list(model.graph.initializer )
__SCREAMING_SNAKE_CASE : Tuple = list(model_without_ext.graph.initializer )
for i, ref_i in ind_to_replace:
assert inits_with_data[i].name == inits[i].name
assert inits_with_data[ref_i].name == inits[ref_i].name
assert i > ref_i
__SCREAMING_SNAKE_CASE : str = inits[i].name
__SCREAMING_SNAKE_CASE : str = inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i] )
# for n in model.graph.node:
_graph_replace_input_with(model_without_ext.graph , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __A ( _SCREAMING_SNAKE_CASE : Dict ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = os.path.dirname(_SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE : Tuple = os.path.basename(_SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE : Dict = onnx.load(os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
__SCREAMING_SNAKE_CASE : Dict = list(model.graph.initializer )
__SCREAMING_SNAKE_CASE : int = set()
__SCREAMING_SNAKE_CASE : Optional[Any] = {}
__SCREAMING_SNAKE_CASE : Any = []
__SCREAMING_SNAKE_CASE : str = 0
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
if i in dup_set:
continue
for j in range(i + 1 , len(_SCREAMING_SNAKE_CASE ) ):
if j in dup_set:
continue
if _is_equal_tensor_proto(inits[i] , inits[j] ):
dup_set.add(_SCREAMING_SNAKE_CASE )
dup_set.add(_SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE : int = inits[j].data_type
__SCREAMING_SNAKE_CASE : Any = numpy.prod(inits[j].dims )
if dtype == 1:
mem_size *= 4
elif dtype == 6:
mem_size *= 4
elif dtype == 7 or dtype == 1_1:
mem_size *= 8
else:
print("unexpected data type: " , _SCREAMING_SNAKE_CASE )
total_reduced_size += mem_size
__SCREAMING_SNAKE_CASE : Any = inits[i].name
__SCREAMING_SNAKE_CASE : Optional[int] = inits[j].name
if name_i in dup_map:
dup_map[name_i].append(_SCREAMING_SNAKE_CASE )
else:
__SCREAMING_SNAKE_CASE : List[Any] = [name_j]
ind_to_replace.append((j, i) )
print("total reduced size: " , total_reduced_size / 1_0_2_4 / 1_0_2_4 / 1_0_2_4 , "GB" )
__SCREAMING_SNAKE_CASE : Optional[int] = sorted(_SCREAMING_SNAKE_CASE )
_remove_dup_initializers_from_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE : Any = "optimized_" + model_file_name
__SCREAMING_SNAKE_CASE : Union[str, Any] = os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
onnx.save(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return new_model
| 564 | 1 |
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
class a_ ( A__ ):
UpperCamelCase_ : Dict = 42
UpperCamelCase_ : List[str] = 42
UpperCamelCase_ : str = None
class a_ ( A__ , A__ ):
UpperCamelCase_ : int = 2
@register_to_config
def __init__( self : List[Any] , snake_case__ : float = 0.02 , snake_case__ : float = 100 , snake_case__ : float = 1.007 , snake_case__ : float = 80 , snake_case__ : float = 0.05 , snake_case__ : float = 50 , ):
# standard deviation of the initial noise distribution
lowerCAmelCase__ = sigma_max
# setable values
lowerCAmelCase__ = None
lowerCAmelCase__ = None
lowerCAmelCase__ = None # sigma(t_i)
def _SCREAMING_SNAKE_CASE ( self : Dict , snake_case__ : torch.FloatTensor , snake_case__ : Optional[int] = None ):
return sample
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , snake_case__ : int , snake_case__ : Union[str, torch.device] = None ):
lowerCAmelCase__ = num_inference_steps
lowerCAmelCase__ = np.arange(0 , self.num_inference_steps )[::-1].copy()
lowerCAmelCase__ = torch.from_numpy(lowerCamelCase__ ).to(lowerCamelCase__ )
lowerCAmelCase__ = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in self.timesteps
]
lowerCAmelCase__ = torch.tensor(lowerCamelCase__ , dtype=torch.floataa , device=lowerCamelCase__ )
def _SCREAMING_SNAKE_CASE ( self : List[Any] , snake_case__ : torch.FloatTensor , snake_case__ : float , snake_case__ : Optional[torch.Generator] = None ):
if self.config.s_min <= sigma <= self.config.s_max:
lowerCAmelCase__ = min(self.config.s_churn / self.num_inference_steps , 2**0.5 - 1 )
else:
lowerCAmelCase__ = 0
# sample eps ~ N(0, S_noise^2 * I)
lowerCAmelCase__ = self.config.s_noise * randn_tensor(sample.shape , generator=lowerCamelCase__ ).to(sample.device )
lowerCAmelCase__ = sigma + gamma * sigma
lowerCAmelCase__ = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def _SCREAMING_SNAKE_CASE ( self : Dict , snake_case__ : torch.FloatTensor , snake_case__ : float , snake_case__ : float , snake_case__ : torch.FloatTensor , snake_case__ : bool = True , ):
lowerCAmelCase__ = sample_hat + sigma_hat * model_output
lowerCAmelCase__ = (sample_hat - pred_original_sample) / sigma_hat
lowerCAmelCase__ = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=lowerCamelCase__ , derivative=lowerCamelCase__ , pred_original_sample=lowerCamelCase__ )
def _SCREAMING_SNAKE_CASE ( self : Any , snake_case__ : torch.FloatTensor , snake_case__ : float , snake_case__ : float , snake_case__ : torch.FloatTensor , snake_case__ : torch.FloatTensor , snake_case__ : torch.FloatTensor , snake_case__ : bool = True , ):
lowerCAmelCase__ = sample_prev + sigma_prev * model_output
lowerCAmelCase__ = (sample_prev - pred_original_sample) / sigma_prev
lowerCAmelCase__ = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=lowerCamelCase__ , derivative=lowerCamelCase__ , pred_original_sample=lowerCamelCase__ )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , snake_case__ : int , snake_case__ : int , snake_case__ : int ):
raise NotImplementedError()
| 644 |
import multiprocessing
from typing import TYPE_CHECKING, Optional, Union
from .. import Dataset, Features, config
from ..formatting import query_table
from ..packaged_modules.sql.sql import Sql
from ..utils import logging
from .abc import AbstractDatasetInputStream
if TYPE_CHECKING:
import sqlitea
import sqlalchemy
class A__ ( A__ ):
"""simple docstring"""
def __init__( self : Dict , lowerCamelCase__ : Union[str, "sqlalchemy.sql.Selectable"] , lowerCamelCase__ : Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"] , lowerCamelCase__ : Optional[Features] = None , lowerCamelCase__ : str = None , lowerCamelCase__ : bool = False , **lowerCamelCase__ : Optional[int] , ):
super().__init__(features=lowerCamelCase__ , cache_dir=lowerCamelCase__ , keep_in_memory=lowerCamelCase__ , **lowerCamelCase__ )
a__ : str = Sql(
cache_dir=lowerCamelCase__ , features=lowerCamelCase__ , sql=lowerCamelCase__ , con=lowerCamelCase__ , **lowerCamelCase__ , )
def _UpperCamelCase( self : Tuple ):
a__ : Optional[Any] = None
a__ : Dict = None
a__ : Union[str, Any] = None
a__ : Union[str, Any] = None
self.builder.download_and_prepare(
download_config=lowerCamelCase__ , download_mode=lowerCamelCase__ , verification_mode=lowerCamelCase__ , base_path=lowerCamelCase__ , )
# Build dataset for splits
a__ : List[str] = self.builder.as_dataset(
split="train" , verification_mode=lowerCamelCase__ , in_memory=self.keep_in_memory )
return dataset
class A__ :
"""simple docstring"""
def __init__( self : List[Any] , lowerCamelCase__ : Dataset , lowerCamelCase__ : str , lowerCamelCase__ : Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"] , lowerCamelCase__ : Optional[int] = None , lowerCamelCase__ : Optional[int] = None , **lowerCamelCase__ : Optional[Any] , ):
if num_proc is not None and num_proc <= 0:
raise ValueError(f'''num_proc {num_proc} must be an integer > 0.''' )
a__ : Any = dataset
a__ : str = name
a__ : Tuple = con
a__ : List[Any] = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
a__ : Any = num_proc
a__ : Tuple = to_sql_kwargs
def _UpperCamelCase( self : List[Any] ):
a__ : Any = self.to_sql_kwargs.pop("sql" , lowerCamelCase__ )
a__ : int = self.to_sql_kwargs.pop("con" , lowerCamelCase__ )
a__ : int = self.to_sql_kwargs.pop("index" , lowerCamelCase__ )
a__ : int = self._write(index=lowerCamelCase__ , **self.to_sql_kwargs )
return written
def _UpperCamelCase( self : Any , lowerCamelCase__ : List[str] ):
a__, a__, a__ : Union[str, Any] = args
a__ : Any = {**to_sql_kwargs, "if_exists": "append"} if offset > 0 else to_sql_kwargs
a__ : Tuple = query_table(
table=self.dataset.data , key=slice(lowerCamelCase__ , offset + self.batch_size ) , indices=self.dataset._indices , )
a__ : str = batch.to_pandas()
a__ : List[Any] = df.to_sql(self.name , self.con , index=lowerCamelCase__ , **lowerCamelCase__ )
return num_rows or len(lowerCamelCase__ )
def _UpperCamelCase( self : Optional[Any] , lowerCamelCase__ : Optional[int] , **lowerCamelCase__ : Optional[Any] ):
a__ : str = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating SQL from Arrow format" , ):
written += self._batch_sql((offset, index, to_sql_kwargs) )
else:
a__, a__ : List[str] = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for num_rows in logging.tqdm(
pool.imap(
self._batch_sql , [(offset, index, to_sql_kwargs) for offset in range(0 , lowerCamelCase__ , lowerCamelCase__ )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating SQL from Arrow format" , ):
written += num_rows
return written
| 37 | 0 |
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
@dataclass
class lowercase__ ( __A ):
__UpperCamelCase = 42
class lowercase__ ( __A , __A ):
@register_to_config
def __init__( self , _lowercase = 3 , _lowercase = 3 , _lowercase = ("DownEncoderBlock2D",) , _lowercase = ("UpDecoderBlock2D",) , _lowercase = (64,) , _lowercase = 1 , _lowercase = "silu" , _lowercase = 3 , _lowercase = 32 , _lowercase = 256 , _lowercase = 32 , _lowercase = None , _lowercase = 0.1_8215 , _lowercase = "group" , ):
super().__init__()
# pass init params to Encoder
lowerCAmelCase_ : Dict = Encoder(
in_channels=_lowercase , out_channels=_lowercase , down_block_types=_lowercase , block_out_channels=_lowercase , layers_per_block=_lowercase , act_fn=_lowercase , norm_num_groups=_lowercase , double_z=_lowercase , )
lowerCAmelCase_ : Any = vq_embed_dim if vq_embed_dim is not None else latent_channels
lowerCAmelCase_ : Tuple = nn.Convad(_lowercase , _lowercase , 1 )
lowerCAmelCase_ : Tuple = VectorQuantizer(_lowercase , _lowercase , beta=0.25 , remap=_lowercase , sane_index_shape=_lowercase )
lowerCAmelCase_ : Tuple = nn.Convad(_lowercase , _lowercase , 1 )
# pass init params to Decoder
lowerCAmelCase_ : Optional[Any] = Decoder(
in_channels=_lowercase , out_channels=_lowercase , up_block_types=_lowercase , block_out_channels=_lowercase , layers_per_block=_lowercase , act_fn=_lowercase , norm_num_groups=_lowercase , norm_type=_lowercase , )
@apply_forward_hook
def UpperCAmelCase__ ( self , _lowercase , _lowercase = True ):
lowerCAmelCase_ : str = self.encoder(_lowercase )
lowerCAmelCase_ : Optional[Any] = self.quant_conv(_lowercase )
if not return_dict:
return (h,)
return VQEncoderOutput(latents=_lowercase )
@apply_forward_hook
def UpperCAmelCase__ ( self , _lowercase , _lowercase = False , _lowercase = True ):
# also go through quantization layer
if not force_not_quantize:
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : Dict = self.quantize(_lowercase )
else:
lowerCAmelCase_ : Optional[int] = h
lowerCAmelCase_ : Tuple = self.post_quant_conv(_lowercase )
lowerCAmelCase_ : str = self.decoder(_lowercase , quant if self.config.norm_type == """spatial""" else None )
if not return_dict:
return (dec,)
return DecoderOutput(sample=_lowercase )
def UpperCAmelCase__ ( self , _lowercase , _lowercase = True ):
lowerCAmelCase_ : Dict = sample
lowerCAmelCase_ : List[str] = self.encode(_lowercase ).latents
lowerCAmelCase_ : List[Any] = self.decode(_lowercase ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=_lowercase )
| 440 |
import inspect
import os
import unittest
from pathlib import Path
import torch
import accelerate
from accelerate.test_utils import execute_subprocess_async
from accelerate.test_utils.testing import run_command
class lowercase__ ( unittest.TestCase ):
__UpperCamelCase = inspect.getfile(accelerate.test_utils )
__UpperCamelCase = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["""scripts""", """test_cli.py"""] )
__UpperCamelCase = ["""accelerate""", """launch"""]
__UpperCamelCase = Path.home() / """.cache/huggingface/accelerate"""
__UpperCamelCase = """default_config.yaml"""
__UpperCamelCase = config_folder / config_file
__UpperCamelCase = config_folder / """_default_config.yaml"""
__UpperCamelCase = Path("""tests/test_configs""" )
@classmethod
def UpperCAmelCase__ ( cls ):
if cls.config_path.is_file():
cls.config_path.rename(cls.changed_path )
@classmethod
def UpperCAmelCase__ ( cls ):
if cls.changed_path.is_file():
cls.changed_path.rename(cls.config_path )
def UpperCAmelCase__ ( self ):
lowerCAmelCase_ : int = self.base_cmd
if torch.cuda.is_available() and (torch.cuda.device_count() > 1):
cmd += ["--multi_gpu"]
execute_subprocess_async(cmd + [self.test_file_path] , env=os.environ.copy() )
def UpperCAmelCase__ ( self ):
for config in sorted(self.test_config_path.glob("""**/*.yaml""" ) ):
with self.subTest(config_file=_lowercase ):
execute_subprocess_async(
self.base_cmd + ["""--config_file""", str(_lowercase ), self.test_file_path] , env=os.environ.copy() )
def UpperCAmelCase__ ( self ):
execute_subprocess_async(["""accelerate""", """test"""] , env=os.environ.copy() )
class lowercase__ ( unittest.TestCase ):
__UpperCamelCase = """test-tpu"""
__UpperCamelCase = """us-central1-a"""
__UpperCamelCase = """ls"""
__UpperCamelCase = ["""accelerate""", """tpu-config"""]
__UpperCamelCase = """cd /usr/share"""
__UpperCamelCase = """tests/test_samples/test_command_file.sh"""
__UpperCamelCase = """Running gcloud compute tpus tpu-vm ssh"""
def UpperCAmelCase__ ( self ):
lowerCAmelCase_ : str = run_command(
self.cmd
+ ["""--command""", self.command, """--tpu_zone""", self.tpu_zone, """--tpu_name""", self.tpu_name, """--debug"""] , return_stdout=_lowercase , )
self.assertIn(
F'{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all' , _lowercase , )
def UpperCAmelCase__ ( self ):
lowerCAmelCase_ : str = run_command(
self.cmd
+ [
"""--config_file""",
"""tests/test_configs/0_12_0.yaml""",
"""--command""",
self.command,
"""--tpu_zone""",
self.tpu_zone,
"""--tpu_name""",
self.tpu_name,
"""--debug""",
] , return_stdout=_lowercase , )
self.assertIn(
F'{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all' , _lowercase , )
def UpperCAmelCase__ ( self ):
lowerCAmelCase_ : Optional[Any] = run_command(
self.cmd + ["""--config_file""", """tests/test_configs/latest.yaml""", """--debug"""] , return_stdout=_lowercase )
self.assertIn(
F'{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo "hello world"; echo "this is a second command" --worker all' , _lowercase , )
def UpperCAmelCase__ ( self ):
lowerCAmelCase_ : Tuple = run_command(
self.cmd + ["""--config_file""", """tests/test_configs/latest.yaml""", """--command""", self.command, """--debug"""] , return_stdout=_lowercase , )
self.assertIn(
F'{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all' , _lowercase , )
def UpperCAmelCase__ ( self ):
lowerCAmelCase_ : str = run_command(
self.cmd
+ [
"""--config_file""",
"""tests/test_configs/latest.yaml""",
"""--command""",
self.command,
"""--command""",
"""echo \"Hello World\"""",
"""--debug""",
] , return_stdout=_lowercase , )
self.assertIn(
F'{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls; echo "Hello World" --worker all' , _lowercase , )
def UpperCAmelCase__ ( self ):
lowerCAmelCase_ : str = run_command(
self.cmd
+ ["""--config_file""", """tests/test_configs/latest.yaml""", """--command_file""", self.command_file, """--debug"""] , return_stdout=_lowercase , )
self.assertIn(
F'{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo "hello world"; echo "this is a second command" --worker all' , _lowercase , )
def UpperCAmelCase__ ( self ):
lowerCAmelCase_ : Optional[Any] = run_command(
self.cmd
+ [
"""--config_file""",
"""tests/test_configs/0_12_0.yaml""",
"""--command_file""",
self.command_file,
"""--tpu_zone""",
self.tpu_zone,
"""--tpu_name""",
self.tpu_name,
"""--debug""",
] , return_stdout=_lowercase , )
self.assertIn(
F'{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo "hello world"; echo "this is a second command" --worker all' , _lowercase , )
def UpperCAmelCase__ ( self ):
lowerCAmelCase_ : Tuple = run_command(
self.cmd + ["""--config_file""", """tests/test_configs/latest.yaml""", """--install_accelerate""", """--debug"""] , return_stdout=_lowercase , )
self.assertIn(
F'{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate -U; echo "hello world"; echo "this is a second command" --worker all' , _lowercase , )
def UpperCAmelCase__ ( self ):
lowerCAmelCase_ : Any = run_command(
self.cmd
+ [
"""--config_file""",
"""tests/test_configs/latest.yaml""",
"""--install_accelerate""",
"""--accelerate_version""",
"""12.0.0""",
"""--debug""",
] , return_stdout=_lowercase , )
self.assertIn(
F'{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate==12.0.0; echo "hello world"; echo "this is a second command" --worker all' , _lowercase , )
| 440 | 1 |
import gc
import unittest
import numpy as np
import torch
from diffusers import StableDiffusionKDiffusionPipeline
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
@slow
@require_torch_gpu
class __a ( unittest.TestCase ):
"""simple docstring"""
def __A ( self : Any ) -> List[str]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __A ( self : List[str] ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ =StableDiffusionKDiffusionPipeline.from_pretrained("""CompVis/stable-diffusion-v1-4""" )
SCREAMING_SNAKE_CASE__ =sd_pipe.to(__lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=__lowerCAmelCase )
sd_pipe.set_scheduler("""sample_euler""" )
SCREAMING_SNAKE_CASE__ ="""A painting of a squirrel eating a burger"""
SCREAMING_SNAKE_CASE__ =torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ =sd_pipe([prompt] ,generator=__lowerCAmelCase ,guidance_scale=9.0 ,num_inference_steps=2_0 ,output_type="""np""" )
SCREAMING_SNAKE_CASE__ =output.images
SCREAMING_SNAKE_CASE__ =image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
SCREAMING_SNAKE_CASE__ =np.array([0.0447, 0.0492, 0.0468, 0.0408, 0.0383, 0.0408, 0.0354, 0.0380, 0.0339] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __A ( self : int ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ =StableDiffusionKDiffusionPipeline.from_pretrained("""stabilityai/stable-diffusion-2-1-base""" )
SCREAMING_SNAKE_CASE__ =sd_pipe.to(__lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=__lowerCAmelCase )
sd_pipe.set_scheduler("""sample_euler""" )
SCREAMING_SNAKE_CASE__ ="""A painting of a squirrel eating a burger"""
SCREAMING_SNAKE_CASE__ =torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ =sd_pipe([prompt] ,generator=__lowerCAmelCase ,guidance_scale=9.0 ,num_inference_steps=2_0 ,output_type="""np""" )
SCREAMING_SNAKE_CASE__ =output.images
SCREAMING_SNAKE_CASE__ =image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
SCREAMING_SNAKE_CASE__ =np.array([0.1237, 0.1320, 0.1438, 0.1359, 0.1390, 0.1132, 0.1277, 0.1175, 0.1112] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-1
def __A ( self : int ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ =StableDiffusionKDiffusionPipeline.from_pretrained("""stabilityai/stable-diffusion-2-1-base""" )
SCREAMING_SNAKE_CASE__ =sd_pipe.to(__lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=__lowerCAmelCase )
sd_pipe.set_scheduler("""sample_dpmpp_2m""" )
SCREAMING_SNAKE_CASE__ ="""A painting of a squirrel eating a burger"""
SCREAMING_SNAKE_CASE__ =torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ =sd_pipe(
[prompt] ,generator=__lowerCAmelCase ,guidance_scale=7.5 ,num_inference_steps=1_5 ,output_type="""np""" ,use_karras_sigmas=__lowerCAmelCase ,)
SCREAMING_SNAKE_CASE__ =output.images
SCREAMING_SNAKE_CASE__ =image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
SCREAMING_SNAKE_CASE__ =np.array(
[0.1138_1689, 0.1211_2921, 0.138_9457, 0.1254_9606, 0.124_4964, 0.1083_1517, 0.1156_2866, 0.1086_7816, 0.1049_9048] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 151 |
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .tokenization_wavaveca import WavaVecaCTCTokenizer
class A (SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCamelCase : Dict = '''Wav2Vec2FeatureExtractor'''
__lowerCamelCase : str = '''AutoTokenizer'''
def __init__( self : Union[str, Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[Any] ) -> Tuple:
"""simple docstring"""
super().__init__(__lowerCAmelCase , __lowerCAmelCase )
A__ = self.feature_extractor
A__ = False
@classmethod
def a_ ( cls : Union[str, Any] , __lowerCAmelCase : Optional[Any] , **__lowerCAmelCase : Union[str, Any] ) -> int:
"""simple docstring"""
try:
return super().from_pretrained(__lowerCAmelCase , **__lowerCAmelCase )
except OSError:
warnings.warn(
f'Loading a tokenizer inside {cls.__name__} from a config that does not'
""" include a `tokenizer_class` attribute is deprecated and will be """
"""removed in v5. Please add `'tokenizer_class': 'Wav2Vec2CTCTokenizer'`"""
""" attribute to either your `config.json` or `tokenizer_config.json` """
"""file to suppress this warning: """ , __lowerCAmelCase , )
A__ = WavaVecaFeatureExtractor.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase )
A__ = WavaVecaCTCTokenizer.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase )
return cls(feature_extractor=__lowerCAmelCase , tokenizer=__lowerCAmelCase )
def __call__( self : Any , *__lowerCAmelCase : Union[str, Any] , **__lowerCAmelCase : List[Any] ) -> Optional[Any]:
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor(*__lowerCAmelCase , **__lowerCAmelCase )
if "raw_speech" in kwargs:
warnings.warn("""Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.""" )
A__ = kwargs.pop("""raw_speech""" )
else:
A__ = kwargs.pop("""audio""" , __lowerCAmelCase )
A__ = kwargs.pop("""sampling_rate""" , __lowerCAmelCase )
A__ = kwargs.pop("""text""" , __lowerCAmelCase )
if len(__lowerCAmelCase ) > 0:
A__ = args[0]
A__ = args[1:]
if audio is None and text is None:
raise ValueError("""You need to specify either an `audio` or `text` input to process.""" )
if audio is not None:
A__ = self.feature_extractor(__lowerCAmelCase , *__lowerCAmelCase , sampling_rate=__lowerCAmelCase , **__lowerCAmelCase )
if text is not None:
A__ = self.tokenizer(__lowerCAmelCase , **__lowerCAmelCase )
if text is None:
return inputs
elif audio is None:
return encodings
else:
A__ = encodings["""input_ids"""]
return inputs
def a_ ( self : List[str] , *__lowerCAmelCase : Optional[Any] , **__lowerCAmelCase : Tuple ) -> List[str]:
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor.pad(*__lowerCAmelCase , **__lowerCAmelCase )
A__ = kwargs.pop("""input_features""" , __lowerCAmelCase )
A__ = kwargs.pop("""labels""" , __lowerCAmelCase )
if len(__lowerCAmelCase ) > 0:
A__ = args[0]
A__ = args[1:]
if input_features is not None:
A__ = self.feature_extractor.pad(__lowerCAmelCase , *__lowerCAmelCase , **__lowerCAmelCase )
if labels is not None:
A__ = self.tokenizer.pad(__lowerCAmelCase , **__lowerCAmelCase )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
A__ = labels["""input_ids"""]
return input_features
def a_ ( self : Optional[int] , *__lowerCAmelCase : List[str] , **__lowerCAmelCase : List[Any] ) -> Dict:
"""simple docstring"""
return self.tokenizer.batch_decode(*__lowerCAmelCase , **__lowerCAmelCase )
def a_ ( self : Any , *__lowerCAmelCase : List[str] , **__lowerCAmelCase : Union[str, Any] ) -> List[str]:
"""simple docstring"""
return self.tokenizer.decode(*__lowerCAmelCase , **__lowerCAmelCase )
@contextmanager
def a_ ( self : Union[str, Any] ) -> int:
"""simple docstring"""
warnings.warn(
"""`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your """
"""labels by using the argument `text` of the regular `__call__` method (either in the same call as """
"""your audio inputs, or in a separate call.""" )
A__ = True
A__ = self.tokenizer
yield
A__ = self.feature_extractor
A__ = False
| 176 | 0 |
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class snake_case ( __snake_case ):
'''simple docstring'''
snake_case_ : Optional[int] = ['image_processor', 'tokenizer']
snake_case_ : Tuple = 'BlipImageProcessor'
snake_case_ : Optional[int] = 'AutoTokenizer'
def __init__( self : Dict , lowerCAmelCase : Any , lowerCAmelCase : Optional[Any]) -> Union[str, Any]:
"""simple docstring"""
_snake_case : str = False
super().__init__(A_ , A_)
_snake_case : str = self.image_processor
def __call__( self : Tuple , lowerCAmelCase : Optional[Any] = None , lowerCAmelCase : str = None , lowerCAmelCase : Optional[int] = True , lowerCAmelCase : List[str] = False , lowerCAmelCase : Any = None , lowerCAmelCase : str = None , lowerCAmelCase : str = 0 , lowerCAmelCase : int = None , lowerCAmelCase : Tuple = None , lowerCAmelCase : Any = False , lowerCAmelCase : Tuple = False , lowerCAmelCase : Union[str, Any] = False , lowerCAmelCase : Union[str, Any] = False , lowerCAmelCase : Union[str, Any] = False , lowerCAmelCase : str = True , lowerCAmelCase : Optional[Any] = None , **lowerCAmelCase : str , ) -> Any:
"""simple docstring"""
if images is None and text is None:
raise ValueError("""You have to specify either images or text.""")
# Get only text
if images is None:
_snake_case : int = self.tokenizer
_snake_case : Optional[int] = self.tokenizer(
text=A_ , add_special_tokens=A_ , padding=A_ , truncation=A_ , max_length=A_ , stride=A_ , pad_to_multiple_of=A_ , return_attention_mask=A_ , return_overflowing_tokens=A_ , return_special_tokens_mask=A_ , return_offsets_mapping=A_ , return_token_type_ids=A_ , return_length=A_ , verbose=A_ , return_tensors=A_ , **A_ , )
return text_encoding
# add pixel_values
_snake_case : int = self.image_processor(A_ , return_tensors=A_)
if text is not None:
_snake_case : Dict = self.tokenizer(
text=A_ , add_special_tokens=A_ , padding=A_ , truncation=A_ , max_length=A_ , stride=A_ , pad_to_multiple_of=A_ , return_attention_mask=A_ , return_overflowing_tokens=A_ , return_special_tokens_mask=A_ , return_offsets_mapping=A_ , return_token_type_ids=A_ , return_length=A_ , verbose=A_ , return_tensors=A_ , **A_ , )
else:
_snake_case : Dict = None
if text_encoding is not None:
encoding_image_processor.update(A_)
return encoding_image_processor
def UpperCamelCase_ ( self : int , *lowerCAmelCase : List[str] , **lowerCAmelCase : Optional[Any]) -> Optional[Any]:
"""simple docstring"""
return self.tokenizer.batch_decode(*A_ , **A_)
def UpperCamelCase_ ( self : Any , *lowerCAmelCase : Optional[int] , **lowerCAmelCase : str) -> Any:
"""simple docstring"""
return self.tokenizer.decode(*A_ , **A_)
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def UpperCamelCase_ ( self : Optional[Any]) -> Any:
"""simple docstring"""
_snake_case : List[str] = self.tokenizer.model_input_names
_snake_case : int = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
| 716 |
from .constants import (
MODEL_NAME,
OPTIMIZER_NAME,
RNG_STATE_NAME,
SAFE_WEIGHTS_INDEX_NAME,
SAFE_WEIGHTS_NAME,
SCALER_NAME,
SCHEDULER_NAME,
TORCH_LAUNCH_PARAMS,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
)
from .dataclasses import (
BnbQuantizationConfig,
ComputeEnvironment,
CustomDtype,
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
DynamoBackend,
FPaRecipeKwargs,
FullyShardedDataParallelPlugin,
GradientAccumulationPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
KwargsHandler,
LoggerType,
MegatronLMPlugin,
PrecisionType,
ProjectConfiguration,
RNGType,
SageMakerDistributedType,
TensorInformation,
TorchDynamoPlugin,
)
from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env
from .imports import (
get_ccl_version,
is_abit_bnb_available,
is_abit_bnb_available,
is_aim_available,
is_bfaa_available,
is_bnb_available,
is_botoa_available,
is_ccl_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_fpa_available,
is_ipex_available,
is_megatron_lm_available,
is_mlflow_available,
is_mps_available,
is_npu_available,
is_rich_available,
is_safetensors_available,
is_sagemaker_available,
is_tensorboard_available,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
from .modeling import (
check_device_map,
check_tied_parameters_in_config,
check_tied_parameters_on_same_device,
compute_module_sizes,
convert_file_size_to_int,
dtype_byte_size,
find_tied_parameters,
get_balanced_memory,
get_max_layer_size,
get_max_memory,
get_mixed_precision_context_manager,
id_tensor_storage,
infer_auto_device_map,
load_checkpoint_in_model,
load_offloaded_weights,
load_state_dict,
named_module_tensors,
retie_parameters,
set_module_tensor_to_device,
shard_checkpoint,
)
from .offload import (
OffloadedWeightsLoader,
PrefixedDataset,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
save_offload_index,
)
from .operations import (
broadcast,
broadcast_object_list,
concatenate,
convert_outputs_to_fpaa,
convert_to_fpaa,
find_batch_size,
find_device,
gather,
gather_object,
get_data_structure,
honor_type,
initialize_tensors,
is_namedtuple,
is_tensor_information,
is_torch_tensor,
listify,
pad_across_processes,
recursively_apply,
reduce,
send_to_device,
slice_tensors,
)
from .versions import compare_versions, is_torch_version
if is_deepspeed_available():
from .deepspeed import (
DeepSpeedEngineWrapper,
DeepSpeedOptimizerWrapper,
DeepSpeedSchedulerWrapper,
DummyOptim,
DummyScheduler,
HfDeepSpeedConfig,
)
from .bnb import has_abit_bnb_layers, load_and_quantize_model
from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer
from .launch import (
PrepareForLaunch,
_filter_args,
prepare_deepspeed_cmd_env,
prepare_multi_gpu_env,
prepare_sagemager_args_inputs,
prepare_simple_launcher_cmd_env,
prepare_tpu,
)
from .megatron_lm import (
AbstractTrainStep,
BertTrainStep,
GPTTrainStep,
MegatronEngine,
MegatronLMDummyDataLoader,
MegatronLMDummyScheduler,
MegatronLMOptimizerWrapper,
MegatronLMSchedulerWrapper,
TaTrainStep,
avg_losses_across_data_parallel_group,
gather_across_data_parallel_groups,
)
from .megatron_lm import initialize as megatron_lm_initialize
from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader
from .megatron_lm import prepare_model as megatron_lm_prepare_model
from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer
from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler
from .memory import find_executable_batch_size, release_memory
from .other import (
extract_model_from_parallel,
get_pretty_name,
is_port_in_use,
merge_dicts,
patch_environment,
save,
wait_for_everyone,
write_basic_config,
)
from .random import set_seed, synchronize_rng_state, synchronize_rng_states
from .torch_xla import install_xla
from .tqdm import tqdm
from .transformer_engine import convert_model, has_transformer_engine_layers
| 198 | 0 |
'''simple docstring'''
import json
import os
import unittest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class SCREAMING_SNAKE_CASE ( snake_case_ , unittest.TestCase ):
__magic_name__ : Union[str, Any] = CLIPTokenizer
__magic_name__ : int = CLIPTokenizerFast
__magic_name__ : Optional[Any] = True
__magic_name__ : int = {}
__magic_name__ : List[str] = False
def lowercase_ ( self : Tuple ):
'''simple docstring'''
super().setUp()
# fmt: off
a_ : Union[str, Any] = ["""l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """lo""", """l</w>""", """w</w>""", """r</w>""", """t</w>""", """low</w>""", """er</w>""", """lowest</w>""", """newer</w>""", """wider""", """<unk>""", """<|startoftext|>""", """<|endoftext|>"""]
# fmt: on
a_ : int = dict(zip(lowercase__ , range(len(lowercase__ ) ) ) )
a_ : List[str] = ["""#version: 0.2""", """l o""", """lo w</w>""", """e r</w>"""]
a_ : Union[str, Any] = {"""unk_token""": """<unk>"""}
a_ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
a_ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(lowercase__ ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(lowercase__ ) )
def lowercase_ ( self : str , **lowercase__ : Union[str, Any] ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return CLIPTokenizer.from_pretrained(self.tmpdirname , **lowercase__ )
def lowercase_ ( self : Tuple , **lowercase__ : Optional[int] ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **lowercase__ )
def lowercase_ ( self : str , lowercase__ : Union[str, Any] ):
'''simple docstring'''
a_ : List[str] = """lower newer"""
a_ : Optional[int] = """lower newer"""
return input_text, output_text
def lowercase_ ( self : Dict ):
'''simple docstring'''
a_ : Union[str, Any] = CLIPTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
a_ : int = """lower newer"""
a_ : Optional[int] = ["""lo""", """w""", """er</w>""", """n""", """e""", """w""", """er</w>"""]
a_ : List[str] = tokenizer.tokenize(lowercase__ )
self.assertListEqual(lowercase__ , lowercase__ )
a_ : List[str] = tokens + [tokenizer.unk_token]
a_ : Optional[int] = [10, 2, 16, 9, 3, 2, 16, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase__ ) , lowercase__ )
@require_ftfy
def lowercase_ ( self : str ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
a_ : Dict = self.tokenizer_class.from_pretrained(lowercase__ , **lowercase__ )
a_ : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(lowercase__ , **lowercase__ )
a_ : Optional[int] = """A\n'll 11p223RF☆ho!!to?'d'd''d of a cat to-$''d."""
a_ : List[Any] = tokenizer_s.tokenize(lowercase__ )
a_ : Union[str, Any] = tokenizer_r.tokenize(lowercase__ )
self.assertListEqual(lowercase__ , lowercase__ )
# Test that the tokenization is identical on an example containing a character (Latin Small Letter A
# with Tilde) encoded in 2 different ways
a_ : int = """xa\u0303y""" + """ """ + """x\xe3y"""
a_ : Any = tokenizer_s.tokenize(lowercase__ )
a_ : Dict = tokenizer_r.tokenize(lowercase__ )
self.assertListEqual(lowercase__ , lowercase__ )
# Test that the tokenization is identical on unicode of space type
a_ : List[str] = [
"""\u0009""", # (horizontal tab, '\t')
"""\u000B""", # (vertical tab)
"""\u000C""", # (form feed)
"""\u0020""", # (space, ' ')
"""\u200E""", # (left-to-right mark):w
"""\u200F""", # (right-to-left mark)
]
for unicode_seq in spaces_unicodes:
a_ : Union[str, Any] = tokenizer_s.tokenize(lowercase__ )
a_ : List[Any] = tokenizer_r.tokenize(lowercase__ )
self.assertListEqual(lowercase__ , lowercase__ )
# Test that the tokenization is identical on unicode of line break type
a_ : Tuple = [
"""\u000A""", # (line feed, '\n')
"""\r\n""", # (carriage return and line feed, '\r\n')
"""\u000D""", # (carriage return, '\r')
"""\r""", # (carriage return, '\r')
"""\u000D""", # (carriage return, '\r')
"""\u2028""", # (line separator)
"""\u2029""", # (paragraph separator)
# "\u0085", # (next line)
]
# The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms
# it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a
# space (and thus into an empty list).
for unicode_seq in line_break_unicodes:
a_ : int = tokenizer_s.tokenize(lowercase__ )
a_ : Tuple = tokenizer_r.tokenize(lowercase__ )
self.assertListEqual(lowercase__ , lowercase__ )
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
a_ : Any = """hello""" # `hello` is a token in the vocabulary of `pretrained_name`
a_ : Any = F"{text_of_1_token} {text_of_1_token}"
a_ : Tuple = self.rust_tokenizer_class.from_pretrained(
lowercase__ , use_fast=lowercase__ , )
a_ : str = tokenizer_r(lowercase__ , return_offsets_mapping=lowercase__ , add_special_tokens=lowercase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowercase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowercase__ ) + 1, len(lowercase__ ) + 1 + len(lowercase__ )) , )
a_ : Any = F" {text}"
a_ : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(
lowercase__ , use_fast=lowercase__ , )
a_ : Optional[int] = tokenizer_r(lowercase__ , return_offsets_mapping=lowercase__ , add_special_tokens=lowercase__ )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(lowercase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowercase__ ) + 1, 1 + len(lowercase__ ) + 1 + len(lowercase__ )) , )
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
with self.assertRaises(lowercase__ ) as context:
self.rust_tokenizer_class.from_pretrained("""robot-test/old-clip-tokenizer""" )
self.assertTrue(
context.exception.args[0].startswith(
"""The `backend_tokenizer` provided does not match the expected format.""" ) )
@require_ftfy
def lowercase_ ( self : str ):
'''simple docstring'''
super().test_tokenization_python_rust_equals()
def lowercase_ ( self : str ):
'''simple docstring'''
pass
| 442 |
'''simple docstring'''
import argparse
import os
import sys
from unittest.mock import patch
import pytorch_lightning as pl
import timeout_decorator
import torch
from distillation import SummarizationDistiller, distill_main
from finetune import SummarizationModule, main
from transformers import MarianMTModel
from transformers.file_utils import cached_path
from transformers.testing_utils import TestCasePlus, require_torch_gpu, slow
from utils import load_json
lowerCAmelCase_ : List[str] = 'sshleifer/mar_enro_6_3_student'
class SCREAMING_SNAKE_CASE ( snake_case_ ):
def lowercase_ ( self : int ):
'''simple docstring'''
super().setUp()
a_ : Optional[int] = cached_path(
"""https://cdn-datasets.huggingface.co/translation/wmt_en_ro-tr40k-va0.5k-te0.5k.tar.gz""" , extract_compressed_file=lowercase__ , )
a_ : List[Any] = F"{data_cached}/wmt_en_ro-tr40k-va0.5k-te0.5k"
@slow
@require_torch_gpu
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
MarianMTModel.from_pretrained(lowercase__ )
@slow
@require_torch_gpu
def lowercase_ ( self : Tuple ):
'''simple docstring'''
a_ : Union[str, Any] = {
"""$MAX_LEN""": 64,
"""$BS""": 64,
"""$GAS""": 1,
"""$ENRO_DIR""": self.data_dir,
"""facebook/mbart-large-cc25""": MARIAN_MODEL,
# "val_check_interval=0.25": "val_check_interval=1.0",
"""--learning_rate=3e-5""": """--learning_rate 3e-4""",
"""--num_train_epochs 6""": """--num_train_epochs 1""",
}
# Clean up bash script
a_ : Optional[int] = (self.test_file_dir / """train_mbart_cc25_enro.sh""").open().read().split("""finetune.py""" )[1].strip()
a_ : List[str] = bash_script.replace("""\\\n""" , """""" ).strip().replace("""\"$@\"""" , """""" )
for k, v in env_vars_to_replace.items():
a_ : Optional[Any] = bash_script.replace(lowercase__ , str(lowercase__ ) )
a_ : Union[str, Any] = self.get_auto_remove_tmp_dir()
# bash_script = bash_script.replace("--fp16 ", "")
a_ : List[str] = F"\n --output_dir {output_dir}\n --tokenizer_name Helsinki-NLP/opus-mt-en-ro\n --sortish_sampler\n --do_predict\n --gpus 1\n --freeze_encoder\n --n_train 40000\n --n_val 500\n --n_test 500\n --fp16_opt_level O1\n --num_sanity_val_steps 0\n --eval_beams 2\n ".split()
# XXX: args.gpus > 1 : handle multi_gpu in the future
a_ : Dict = ["""finetune.py"""] + bash_script.split() + args
with patch.object(lowercase__ , """argv""" , lowercase__ ):
a_ : List[Any] = argparse.ArgumentParser()
a_ : int = pl.Trainer.add_argparse_args(lowercase__ )
a_ : Dict = SummarizationModule.add_model_specific_args(lowercase__ , os.getcwd() )
a_ : List[str] = parser.parse_args()
a_ : List[Any] = main(lowercase__ )
# Check metrics
a_ : List[str] = load_json(model.metrics_save_path )
a_ : Any = metrics["""val"""][0]
a_ : int = metrics["""val"""][-1]
self.assertEqual(len(metrics["""val"""] ) , (args.max_epochs / args.val_check_interval) )
assert isinstance(last_step_stats[F"val_avg_{model.val_metric}"] , lowercase__ )
self.assertGreater(last_step_stats["""val_avg_gen_time"""] , 0.01 )
# model hanging on generate. Maybe bad config was saved. (XXX: old comment/assert?)
self.assertLessEqual(last_step_stats["""val_avg_gen_time"""] , 1.0 )
# test learning requirements:
# 1. BLEU improves over the course of training by more than 2 pts
self.assertGreater(last_step_stats["""val_avg_bleu"""] - first_step_stats["""val_avg_bleu"""] , 2 )
# 2. BLEU finishes above 17
self.assertGreater(last_step_stats["""val_avg_bleu"""] , 17 )
# 3. test BLEU and val BLEU within ~1.1 pt.
self.assertLess(abs(metrics["""val"""][-1]["""val_avg_bleu"""] - metrics["""test"""][-1]["""test_avg_bleu"""] ) , 1.1 )
# check lightning ckpt can be loaded and has a reasonable statedict
a_ : Dict = os.listdir(lowercase__ )
a_ : Union[str, Any] = [x for x in contents if x.endswith(""".ckpt""" )][0]
a_ : Optional[Any] = os.path.join(args.output_dir , lowercase__ )
a_ : Dict = torch.load(lowercase__ , map_location="""cpu""" )
a_ : Tuple = """model.model.decoder.layers.0.encoder_attn_layer_norm.weight"""
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
a_ : int = {os.path.basename(lowercase__ ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics["""test"""] ) == 1
class SCREAMING_SNAKE_CASE ( snake_case_ ):
@timeout_decorator.timeout(600 )
@slow
@require_torch_gpu
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
a_ : int = F"{self.test_file_dir_str}/test_data/wmt_en_ro"
a_ : Dict = {
"""--fp16_opt_level=O1""": """""",
"""$MAX_LEN""": 128,
"""$BS""": 16,
"""$GAS""": 1,
"""$ENRO_DIR""": data_dir,
"""$m""": """sshleifer/student_marian_en_ro_6_1""",
"""val_check_interval=0.25""": """val_check_interval=1.0""",
}
# Clean up bash script
a_ : List[str] = (
(self.test_file_dir / """distil_marian_no_teacher.sh""").open().read().split("""distillation.py""" )[1].strip()
)
a_ : List[str] = bash_script.replace("""\\\n""" , """""" ).strip().replace("""\"$@\"""" , """""" )
a_ : Tuple = bash_script.replace("""--fp16 """ , """ """ )
for k, v in env_vars_to_replace.items():
a_ : Union[str, Any] = bash_script.replace(lowercase__ , str(lowercase__ ) )
a_ : List[str] = self.get_auto_remove_tmp_dir()
a_ : List[str] = bash_script.replace("""--fp16""" , """""" )
a_ : str = 6
a_ : Union[str, Any] = (
["""distillation.py"""]
+ bash_script.split()
+ [
F"--output_dir={output_dir}",
"""--gpus=1""",
"""--learning_rate=1e-3""",
F"--num_train_epochs={epochs}",
"""--warmup_steps=10""",
"""--val_check_interval=1.0""",
"""--do_predict""",
]
)
with patch.object(lowercase__ , """argv""" , lowercase__ ):
a_ : Dict = argparse.ArgumentParser()
a_ : Tuple = pl.Trainer.add_argparse_args(lowercase__ )
a_ : int = SummarizationDistiller.add_model_specific_args(lowercase__ , os.getcwd() )
a_ : List[Any] = parser.parse_args()
# assert args.gpus == gpus THIS BREAKS for multi_gpu
a_ : Union[str, Any] = distill_main(lowercase__ )
# Check metrics
a_ : Union[str, Any] = load_json(model.metrics_save_path )
a_ : List[str] = metrics["""val"""][0]
a_ : Optional[Any] = metrics["""val"""][-1]
assert len(metrics["""val"""] ) >= (args.max_epochs / args.val_check_interval) # +1 accounts for val_sanity_check
assert last_step_stats["val_avg_gen_time"] >= 0.01
assert first_step_stats["val_avg_bleu"] < last_step_stats["val_avg_bleu"] # model learned nothing
assert 1.0 >= last_step_stats["val_avg_gen_time"] # model hanging on generate. Maybe bad config was saved.
assert isinstance(last_step_stats[F"val_avg_{model.val_metric}"] , lowercase__ )
# check lightning ckpt can be loaded and has a reasonable statedict
a_ : Optional[int] = os.listdir(lowercase__ )
a_ : Dict = [x for x in contents if x.endswith(""".ckpt""" )][0]
a_ : Optional[Any] = os.path.join(args.output_dir , lowercase__ )
a_ : Optional[Any] = torch.load(lowercase__ , map_location="""cpu""" )
a_ : List[str] = """model.model.decoder.layers.0.encoder_attn_layer_norm.weight"""
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
a_ : int = {os.path.basename(lowercase__ ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics["""test"""] ) == 1
| 442 | 1 |
'''simple docstring'''
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
# TODO Update this
UpperCamelCase_ = {
'''facebook/esm-1b''': '''https://huggingface.co/facebook/esm-1b/resolve/main/config.json''',
# See all ESM models at https://huggingface.co/models?filter=esm
}
class _SCREAMING_SNAKE_CASE( _SCREAMING_SNAKE_CASE ):
A_ : Union[str, Any] = 'esm'
def __init__( self : Tuple , UpperCamelCase_ : Any=None , UpperCamelCase_ : Optional[int]=None , UpperCamelCase_ : Any=None , UpperCamelCase_ : Union[str, Any]=7_68 , UpperCamelCase_ : Optional[int]=12 , UpperCamelCase_ : Dict=12 , UpperCamelCase_ : Optional[int]=30_72 , UpperCamelCase_ : str=0.1 , UpperCamelCase_ : Optional[Any]=0.1 , UpperCamelCase_ : Union[str, Any]=10_26 , UpperCamelCase_ : List[str]=0.02 , UpperCamelCase_ : List[str]=1e-12 , UpperCamelCase_ : str="absolute" , UpperCamelCase_ : int=True , UpperCamelCase_ : int=None , UpperCamelCase_ : Any=False , UpperCamelCase_ : Union[str, Any]=False , UpperCamelCase_ : str=None , UpperCamelCase_ : List[Any]=None , **UpperCamelCase_ : List[Any] , ) -> Optional[Any]:
super().__init__(pad_token_id=UpperCamelCase_ , mask_token_id=UpperCamelCase_ , **UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :List[str] = vocab_size
SCREAMING_SNAKE_CASE__ :Optional[int] = hidden_size
SCREAMING_SNAKE_CASE__ :Dict = num_hidden_layers
SCREAMING_SNAKE_CASE__ :Union[str, Any] = num_attention_heads
SCREAMING_SNAKE_CASE__ :Union[str, Any] = intermediate_size
SCREAMING_SNAKE_CASE__ :str = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ :List[Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ :str = max_position_embeddings
SCREAMING_SNAKE_CASE__ :Optional[int] = initializer_range
SCREAMING_SNAKE_CASE__ :Tuple = layer_norm_eps
SCREAMING_SNAKE_CASE__ :List[Any] = position_embedding_type
SCREAMING_SNAKE_CASE__ :Union[str, Any] = use_cache
SCREAMING_SNAKE_CASE__ :Optional[Any] = emb_layer_norm_before
SCREAMING_SNAKE_CASE__ :Optional[Any] = token_dropout
SCREAMING_SNAKE_CASE__ :Dict = is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info('No esmfold_config supplied for folding model, using default values.' )
SCREAMING_SNAKE_CASE__ :Dict = EsmFoldConfig()
elif isinstance(UpperCamelCase_ , UpperCamelCase_ ):
SCREAMING_SNAKE_CASE__ :str = EsmFoldConfig(**UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :Optional[int] = esmfold_config
if vocab_list is None:
logger.warning('No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!' )
SCREAMING_SNAKE_CASE__ :Tuple = get_default_vocab_list()
else:
SCREAMING_SNAKE_CASE__ :int = vocab_list
else:
SCREAMING_SNAKE_CASE__ :List[str] = None
SCREAMING_SNAKE_CASE__ :List[Any] = None
if self.esmfold_config is not None and getattr(self.esmfold_config , 'use_esm_attn_map' , UpperCamelCase_ ):
raise ValueError('The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!' )
def __lowerCamelCase ( self : Optional[int] ) -> List[Any]:
SCREAMING_SNAKE_CASE__ :Tuple = super().to_dict()
if isinstance(self.esmfold_config , UpperCamelCase_ ):
SCREAMING_SNAKE_CASE__ :str = self.esmfold_config.to_dict()
return output
@dataclass
class _SCREAMING_SNAKE_CASE:
A_ : str = None
A_ : bool = True
A_ : bool = False
A_ : bool = False
A_ : bool = False
A_ : float = 0
A_ : bool = True
A_ : bool = False
A_ : int = 128
A_ : "TrunkConfig" = None
def __lowerCamelCase ( self : Tuple ) -> Optional[int]:
if self.trunk is None:
SCREAMING_SNAKE_CASE__ :Dict = TrunkConfig()
elif isinstance(self.trunk , UpperCamelCase_ ):
SCREAMING_SNAKE_CASE__ :int = TrunkConfig(**self.trunk )
def __lowerCamelCase ( self : int ) -> Tuple:
SCREAMING_SNAKE_CASE__ :List[Any] = asdict(self )
SCREAMING_SNAKE_CASE__ :Optional[Any] = self.trunk.to_dict()
return output
@dataclass
class _SCREAMING_SNAKE_CASE:
A_ : int = 48
A_ : int = 1_024
A_ : int = 128
A_ : int = 32
A_ : int = 32
A_ : int = 32
A_ : float = 0
A_ : float = 0
A_ : bool = False
A_ : int = 4
A_ : Optional[int] = 128
A_ : "StructureModuleConfig" = None
def __lowerCamelCase ( self : Optional[int] ) -> int:
if self.structure_module is None:
SCREAMING_SNAKE_CASE__ :Optional[Any] = StructureModuleConfig()
elif isinstance(self.structure_module , UpperCamelCase_ ):
SCREAMING_SNAKE_CASE__ :Tuple = StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(f'''`max_recycles` should be positive, got {self.max_recycles}.''' )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
'`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got'
f''' {self.sequence_state_dim} and {self.sequence_state_dim}.''' )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
'`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got'
f''' {self.pairwise_state_dim} and {self.pairwise_state_dim}.''' )
SCREAMING_SNAKE_CASE__ :List[Any] = self.sequence_state_dim // self.sequence_head_width
SCREAMING_SNAKE_CASE__ :str = self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
'`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got'
f''' {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.''' )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
'`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got'
f''' {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.''' )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(f'''`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.''' )
if self.dropout >= 0.4:
raise ValueError(f'''`dropout` should not be greater than 0.4, got {self.dropout}.''' )
def __lowerCamelCase ( self : Dict ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ :List[Any] = asdict(self )
SCREAMING_SNAKE_CASE__ :List[Any] = self.structure_module.to_dict()
return output
@dataclass
class _SCREAMING_SNAKE_CASE:
A_ : int = 384
A_ : int = 128
A_ : int = 16
A_ : int = 128
A_ : int = 12
A_ : int = 4
A_ : int = 8
A_ : float = 0.1
A_ : int = 8
A_ : int = 1
A_ : int = 2
A_ : int = 7
A_ : int = 10
A_ : float = 1e-8
A_ : float = 1e5
def __lowerCamelCase ( self : Optional[Any] ) -> List[str]:
return asdict(self )
def lowerCamelCase ( ) -> List[str]:
'''simple docstring'''
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 320 | '''simple docstring'''
import collections
import os
import re
from pathlib import Path
UpperCamelCase_ = '''src/transformers'''
# Matches is_xxx_available()
UpperCamelCase_ = re.compile(R'''is\_([a-z_]*)_available()''')
# Catches a one-line _import_struct = {xxx}
UpperCamelCase_ = re.compile(R'''^_import_structure\s+=\s+\{([^\}]+)\}''')
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
UpperCamelCase_ = re.compile(R'''\s+"\S*":\s+\[([^\]]*)\]''')
# Catches a line if not is_foo_available
UpperCamelCase_ = re.compile(R'''^\s*if\s+not\s+is\_[a-z_]*\_available\(\)''')
# Catches a line _import_struct["bla"].append("foo")
UpperCamelCase_ = re.compile(R'''^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)''')
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
UpperCamelCase_ = re.compile(R'''^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]''')
# Catches a line with an object between quotes and a comma: "MyModel",
UpperCamelCase_ = re.compile(R'''^\s+"([^"]+)",''')
# Catches a line with objects between brackets only: ["foo", "bar"],
UpperCamelCase_ = re.compile(R'''^\s+\[([^\]]+)\]''')
# Catches a line with from foo import bar, bla, boo
UpperCamelCase_ = re.compile(R'''\s+from\s+\S*\s+import\s+([^\(\s].*)\n''')
# Catches a line with try:
UpperCamelCase_ = re.compile(R'''^\s*try:''')
# Catches a line with else:
UpperCamelCase_ = re.compile(R'''^\s*else:''')
def lowerCamelCase ( UpperCAmelCase__ : int ) -> Union[str, Any]:
'''simple docstring'''
if _re_test_backend.search(UpperCAmelCase__ ) is None:
return None
SCREAMING_SNAKE_CASE__ :List[str] = [b[0] for b in _re_backend.findall(UpperCAmelCase__ )]
backends.sort()
return "_and_".join(UpperCAmelCase__ )
def lowerCamelCase ( UpperCAmelCase__ : List[Any] ) -> Optional[int]:
'''simple docstring'''
with open(UpperCAmelCase__ , 'r' , encoding='utf-8' , newline='\n' ) as f:
SCREAMING_SNAKE_CASE__ :Tuple = f.readlines()
SCREAMING_SNAKE_CASE__ :Optional[int] = 0
while line_index < len(UpperCAmelCase__ ) and not lines[line_index].startswith('_import_structure = {' ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(UpperCAmelCase__ ):
return None
# First grab the objects without a specific backend in _import_structure
SCREAMING_SNAKE_CASE__ :int = []
while not lines[line_index].startswith('if TYPE_CHECKING' ) and find_backend(lines[line_index] ) is None:
SCREAMING_SNAKE_CASE__ :int = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(UpperCAmelCase__ ):
SCREAMING_SNAKE_CASE__ :List[str] = _re_one_line_import_struct.search(UpperCAmelCase__ ).groups()[0]
SCREAMING_SNAKE_CASE__ :List[str] = re.findall(r'\[([^\]]+)\]' , UpperCAmelCase__ )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(', ' )] )
line_index += 1
continue
SCREAMING_SNAKE_CASE__ :Optional[int] = _re_import_struct_key_value.search(UpperCAmelCase__ )
if single_line_import_search is not None:
SCREAMING_SNAKE_CASE__ :Union[str, Any] = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(', ' ) if len(UpperCAmelCase__ ) > 0]
objects.extend(UpperCAmelCase__ )
elif line.startswith(' ' * 8 + '"' ):
objects.append(line[9:-3] )
line_index += 1
SCREAMING_SNAKE_CASE__ :Tuple = {'none': objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith('if TYPE_CHECKING' ):
# If the line is an if not is_backend_available, we grab all objects associated.
SCREAMING_SNAKE_CASE__ :List[str] = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
SCREAMING_SNAKE_CASE__ :Union[str, Any] = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
SCREAMING_SNAKE_CASE__ :List[Any] = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(' ' * 4 ):
SCREAMING_SNAKE_CASE__ :Union[str, Any] = lines[line_index]
if _re_import_struct_add_one.search(UpperCAmelCase__ ) is not None:
objects.append(_re_import_struct_add_one.search(UpperCAmelCase__ ).groups()[0] )
elif _re_import_struct_add_many.search(UpperCAmelCase__ ) is not None:
SCREAMING_SNAKE_CASE__ :Optional[int] = _re_import_struct_add_many.search(UpperCAmelCase__ ).groups()[0].split(', ' )
SCREAMING_SNAKE_CASE__ :Union[str, Any] = [obj[1:-1] for obj in imports if len(UpperCAmelCase__ ) > 0]
objects.extend(UpperCAmelCase__ )
elif _re_between_brackets.search(UpperCAmelCase__ ) is not None:
SCREAMING_SNAKE_CASE__ :List[str] = _re_between_brackets.search(UpperCAmelCase__ ).groups()[0].split(', ' )
SCREAMING_SNAKE_CASE__ :List[str] = [obj[1:-1] for obj in imports if len(UpperCAmelCase__ ) > 0]
objects.extend(UpperCAmelCase__ )
elif _re_quote_object.search(UpperCAmelCase__ ) is not None:
objects.append(_re_quote_object.search(UpperCAmelCase__ ).groups()[0] )
elif line.startswith(' ' * 8 + '"' ):
objects.append(line[9:-3] )
elif line.startswith(' ' * 1_2 + '"' ):
objects.append(line[1_3:-3] )
line_index += 1
SCREAMING_SNAKE_CASE__ :List[str] = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
SCREAMING_SNAKE_CASE__ :int = []
while (
line_index < len(UpperCAmelCase__ )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith('else' )
):
SCREAMING_SNAKE_CASE__ :Optional[int] = lines[line_index]
SCREAMING_SNAKE_CASE__ :Dict = _re_import.search(UpperCAmelCase__ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(', ' ) )
elif line.startswith(' ' * 8 ):
objects.append(line[8:-2] )
line_index += 1
SCREAMING_SNAKE_CASE__ :Optional[int] = {'none': objects}
# Let's continue with backend-specific objects
while line_index < len(UpperCAmelCase__ ):
# If the line is an if is_backend_available, we grab all objects associated.
SCREAMING_SNAKE_CASE__ :Tuple = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
SCREAMING_SNAKE_CASE__ :Optional[int] = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
SCREAMING_SNAKE_CASE__ :Dict = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(' ' * 8 ):
SCREAMING_SNAKE_CASE__ :int = lines[line_index]
SCREAMING_SNAKE_CASE__ :List[Any] = _re_import.search(UpperCAmelCase__ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(', ' ) )
elif line.startswith(' ' * 1_2 ):
objects.append(line[1_2:-2] )
line_index += 1
SCREAMING_SNAKE_CASE__ :List[str] = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def lowerCamelCase ( UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Any ) -> Tuple:
'''simple docstring'''
def find_duplicates(UpperCAmelCase__ : Optional[Any] ):
return [k for k, v in collections.Counter(UpperCAmelCase__ ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
SCREAMING_SNAKE_CASE__ :int = []
for key in import_dict_objects.keys():
SCREAMING_SNAKE_CASE__ :Optional[int] = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(F'''Duplicate _import_structure definitions for: {duplicate_imports}''' )
SCREAMING_SNAKE_CASE__ :Optional[Any] = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(F'''Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}''' )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
SCREAMING_SNAKE_CASE__ :Dict = 'base imports' if key == 'none' else F'''{key} backend'''
errors.append(F'''Differences for {name}:''' )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F''' {a} in TYPE_HINT but not in _import_structure.''' )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F''' {a} in _import_structure but not in TYPE_HINT.''' )
return errors
def lowerCamelCase ( ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :List[str] = []
for root, _, files in os.walk(UpperCAmelCase__ ):
if "__init__.py" in files:
SCREAMING_SNAKE_CASE__ :Dict = os.path.join(UpperCAmelCase__ , '__init__.py' )
SCREAMING_SNAKE_CASE__ :Optional[int] = parse_init(UpperCAmelCase__ )
if objects is not None:
SCREAMING_SNAKE_CASE__ :Tuple = analyze_results(*UpperCAmelCase__ )
if len(UpperCAmelCase__ ) > 0:
SCREAMING_SNAKE_CASE__ :Any = F'''Problem in {fname}, both halves do not define the same objects.\n{errors[0]}'''
failures.append('\n'.join(UpperCAmelCase__ ) )
if len(UpperCAmelCase__ ) > 0:
raise ValueError('\n\n'.join(UpperCAmelCase__ ) )
def lowerCamelCase ( ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Dict = []
for path, directories, files in os.walk(UpperCAmelCase__ ):
for folder in directories:
# Ignore private modules
if folder.startswith('_' ):
directories.remove(UpperCAmelCase__ )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(UpperCAmelCase__ ) / folder).glob('*.py' ) ) ) == 0:
continue
SCREAMING_SNAKE_CASE__ :Union[str, Any] = str((Path(UpperCAmelCase__ ) / folder).relative_to(UpperCAmelCase__ ) )
SCREAMING_SNAKE_CASE__ :Any = short_path.replace(os.path.sep , '.' )
submodules.append(UpperCAmelCase__ )
for fname in files:
if fname == "__init__.py":
continue
SCREAMING_SNAKE_CASE__ :Optional[int] = str((Path(UpperCAmelCase__ ) / fname).relative_to(UpperCAmelCase__ ) )
SCREAMING_SNAKE_CASE__ :List[Any] = short_path.replace('.py' , '' ).replace(os.path.sep , '.' )
if len(submodule.split('.' ) ) == 1:
submodules.append(UpperCAmelCase__ )
return submodules
UpperCamelCase_ = [
'''convert_pytorch_checkpoint_to_tf2''',
'''modeling_flax_pytorch_utils''',
'''models.esm.openfold_utils''',
]
def lowerCamelCase ( ) -> Dict:
'''simple docstring'''
from transformers.utils import direct_transformers_import
SCREAMING_SNAKE_CASE__ :Dict = direct_transformers_import(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE__ :List[str] = set(transformers._import_structure.keys() )
# This contains all the base keys of the _import_structure object defined in the init, but if the user is missing
# some optional dependencies, they may not have all of them. Thus we read the init to read all additions and
# (potentiall re-) add them.
with open(os.path.join(UpperCAmelCase__ , '__init__.py' ) , 'r' ) as f:
SCREAMING_SNAKE_CASE__ :Tuple = f.read()
import_structure_keys.update(set(re.findall(r'import_structure\[\"([^\"]*)\"\]' , UpperCAmelCase__ ) ) )
SCREAMING_SNAKE_CASE__ :int = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in import_structure_keys
]
if len(UpperCAmelCase__ ) > 0:
SCREAMING_SNAKE_CASE__ :List[Any] = '\n'.join(F'''- {module}''' for module in module_not_registered )
raise ValueError(
'The following submodules are not properly registed in the main init of Transformers:\n'
F'''{list_of_modules}\n'''
'Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.' )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 320 | 1 |
"""simple docstring"""
from numpy import exp, pi, sqrt
def lowerCamelCase__ ( _lowerCamelCase : List[Any] , _lowerCamelCase : float = 0.0 , _lowerCamelCase : float = 1.0 ) -> int:
return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 549 |
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Optional[int] = 8.314_4598
def lowerCamelCase__ ( _lowerCamelCase : float , _lowerCamelCase : float ) -> float:
if temperature < 0:
raise Exception('Temperature cannot be less than 0 K' )
if molar_mass <= 0:
raise Exception('Molar mass cannot be less than or equal to 0 kg/mol' )
else:
return (3 * UNIVERSAL_GAS_CONSTANT * temperature / molar_mass) ** 0.5
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# example
_SCREAMING_SNAKE_CASE : List[Any] = 300
_SCREAMING_SNAKE_CASE : Dict = 28
_SCREAMING_SNAKE_CASE : List[str] = rms_speed_of_molecule(temperature, molar_mass)
print(F'''Vrms of Nitrogen gas at 300 K is {vrms} m/s''')
| 549 | 1 |
"""simple docstring"""
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
ControlNetModel,
DDIMScheduler,
StableDiffusionControlNetImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import MultiControlNetModel
from diffusers.utils import floats_tensor, load_image, load_numpy, randn_tensor, slow, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
)
enable_full_determinism()
class a ( A_ , A_ , A_ , unittest.TestCase ):
A_ : str = StableDiffusionControlNetImgaImgPipeline
A_ : List[Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width'''}
A_ : List[Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
A_ : Optional[int] = IMAGE_TO_IMAGE_IMAGE_PARAMS.union({'''control_image'''} )
A_ : List[Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS
def lowerCAmelCase_ ( self : str ) -> Union[str, Any]:
torch.manual_seed(0 )
__a = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
torch.manual_seed(0 )
__a = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
torch.manual_seed(0 )
__a = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="""scaled_linear""" , clip_sample=UpperCamelCase_ , set_alpha_to_one=UpperCamelCase_ , )
torch.manual_seed(0 )
__a = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
__a = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
__a = CLIPTextModel(UpperCamelCase_ )
__a = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
__a = {
'unet': unet,
'controlnet': controlnet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def lowerCAmelCase_ ( self : Union[str, Any] , lowerCamelCase_ : Dict , lowerCamelCase_ : List[Any]=0 ) -> Dict:
if str(UpperCamelCase_ ).startswith("""mps""" ):
__a = torch.manual_seed(UpperCamelCase_ )
else:
__a = torch.Generator(device=UpperCamelCase_ ).manual_seed(UpperCamelCase_ )
__a = 2
__a = randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=UpperCamelCase_ , device=torch.device(UpperCamelCase_ ) , )
__a = floats_tensor(control_image.shape , rng=random.Random(UpperCamelCase_ ) ).to(UpperCamelCase_ )
__a = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__a = Image.fromarray(np.uinta(UpperCamelCase_ ) ).convert("""RGB""" ).resize((64, 64) )
__a = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
'image': image,
'control_image': control_image,
}
return inputs
def lowerCAmelCase_ ( self : List[Any] ) -> int:
return self._test_attention_slicing_forward_pass(expected_max_diff=2E-3 )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def lowerCAmelCase_ ( self : Optional[Any] ) -> Dict:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3 )
def lowerCAmelCase_ ( self : Dict ) -> Any:
self._test_inference_batch_single_identical(expected_max_diff=2E-3 )
class a ( A_ , A_ , unittest.TestCase ):
A_ : Any = StableDiffusionControlNetImgaImgPipeline
A_ : int = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width'''}
A_ : Union[str, Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
A_ : Union[str, Any] = frozenset([] ) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess
def lowerCAmelCase_ ( self : Optional[Any] ) -> Any:
torch.manual_seed(0 )
__a = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
torch.manual_seed(0 )
def init_weights(lowerCamelCase_ : Optional[int] ):
if isinstance(UpperCamelCase_ , torch.nn.Convad ):
torch.nn.init.normal(m.weight )
m.bias.data.fill_(1.0 )
__a = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(UpperCamelCase_ )
torch.manual_seed(0 )
__a = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(UpperCamelCase_ )
torch.manual_seed(0 )
__a = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="""scaled_linear""" , clip_sample=UpperCamelCase_ , set_alpha_to_one=UpperCamelCase_ , )
torch.manual_seed(0 )
__a = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
__a = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
__a = CLIPTextModel(UpperCamelCase_ )
__a = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
__a = MultiControlNetModel([controlneta, controlneta] )
__a = {
'unet': unet,
'controlnet': controlnet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def lowerCAmelCase_ ( self : int , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Tuple=0 ) -> int:
if str(UpperCamelCase_ ).startswith("""mps""" ):
__a = torch.manual_seed(UpperCamelCase_ )
else:
__a = torch.Generator(device=UpperCamelCase_ ).manual_seed(UpperCamelCase_ )
__a = 2
__a = [
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=UpperCamelCase_ , device=torch.device(UpperCamelCase_ ) , ),
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=UpperCamelCase_ , device=torch.device(UpperCamelCase_ ) , ),
]
__a = floats_tensor(control_image[0].shape , rng=random.Random(UpperCamelCase_ ) ).to(UpperCamelCase_ )
__a = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__a = Image.fromarray(np.uinta(UpperCamelCase_ ) ).convert("""RGB""" ).resize((64, 64) )
__a = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
'image': image,
'control_image': control_image,
}
return inputs
def lowerCAmelCase_ ( self : Tuple ) -> str:
__a = self.get_dummy_components()
__a = self.pipeline_class(**UpperCamelCase_ )
pipe.to(UpperCamelCase_ )
__a = 10.0
__a = 4
__a = self.get_dummy_inputs(UpperCamelCase_ )
__a = steps
__a = scale
__a = pipe(**UpperCamelCase_ )[0]
__a = self.get_dummy_inputs(UpperCamelCase_ )
__a = steps
__a = scale
__a = pipe(**UpperCamelCase_ , control_guidance_start=0.1 , control_guidance_end=0.2 )[0]
__a = self.get_dummy_inputs(UpperCamelCase_ )
__a = steps
__a = scale
__a = pipe(**UpperCamelCase_ , control_guidance_start=[0.1, 0.3] , control_guidance_end=[0.2, 0.7] )[0]
__a = self.get_dummy_inputs(UpperCamelCase_ )
__a = steps
__a = scale
__a = pipe(**UpperCamelCase_ , control_guidance_start=0.4 , control_guidance_end=[0.5, 0.8] )[0]
# make sure that all outputs are different
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
def lowerCAmelCase_ ( self : Optional[int] ) -> List[str]:
return self._test_attention_slicing_forward_pass(expected_max_diff=2E-3 )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def lowerCAmelCase_ ( self : Optional[int] ) -> List[str]:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3 )
def lowerCAmelCase_ ( self : List[str] ) -> Dict:
self._test_inference_batch_single_identical(expected_max_diff=2E-3 )
def lowerCAmelCase_ ( self : Dict ) -> List[Any]:
__a = self.get_dummy_components()
__a = self.pipeline_class(**UpperCamelCase_ )
pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
with tempfile.TemporaryDirectory() as tmpdir:
try:
# save_pretrained is not implemented for Multi-ControlNet
pipe.save_pretrained(UpperCamelCase_ )
except NotImplementedError:
pass
@slow
@require_torch_gpu
class a ( unittest.TestCase ):
def lowerCAmelCase_ ( self : Union[str, Any] ) -> Union[str, Any]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase_ ( self : str ) -> Optional[Any]:
__a = ControlNetModel.from_pretrained("""lllyasviel/sd-controlnet-canny""" )
__a = StableDiffusionControlNetImgaImgPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , safety_checker=UpperCamelCase_ , controlnet=UpperCamelCase_ )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
__a = torch.Generator(device="""cpu""" ).manual_seed(0 )
__a = 'evil space-punk bird'
__a = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png""" ).resize((5_12, 5_12) )
__a = load_image(
"""https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png""" ).resize((5_12, 5_12) )
__a = pipe(
UpperCamelCase_ , UpperCamelCase_ , control_image=UpperCamelCase_ , generator=UpperCamelCase_ , output_type="""np""" , num_inference_steps=50 , strength=0.6 , )
__a = output.images[0]
assert image.shape == (5_12, 5_12, 3)
__a = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/img2img.npy""" )
assert np.abs(expected_image - image ).max() < 9E-2
| 704 | """simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
"""s-JoL/Open-Llama-V1""": """https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json""",
}
class a ( A_ ):
A_ : str = '''open-llama'''
def __init__( self : Tuple , lowerCamelCase_ : Tuple=10_00_00 , lowerCamelCase_ : Union[str, Any]=40_96 , lowerCamelCase_ : Any=1_10_08 , lowerCamelCase_ : Union[str, Any]=32 , lowerCamelCase_ : Optional[Any]=32 , lowerCamelCase_ : Tuple="silu" , lowerCamelCase_ : Dict=20_48 , lowerCamelCase_ : Optional[Any]=0.02 , lowerCamelCase_ : List[Any]=1E-6 , lowerCamelCase_ : Optional[int]=True , lowerCamelCase_ : Tuple=0 , lowerCamelCase_ : Any=1 , lowerCamelCase_ : Any=2 , lowerCamelCase_ : Union[str, Any]=False , lowerCamelCase_ : List[Any]=True , lowerCamelCase_ : str=0.1 , lowerCamelCase_ : Any=0.1 , lowerCamelCase_ : Any=True , lowerCamelCase_ : Optional[Any]=True , lowerCamelCase_ : Dict=None , **lowerCamelCase_ : int , ) -> List[Any]:
__a = vocab_size
__a = max_position_embeddings
__a = hidden_size
__a = intermediate_size
__a = num_hidden_layers
__a = num_attention_heads
__a = hidden_act
__a = initializer_range
__a = rms_norm_eps
__a = use_cache
__a = kwargs.pop(
"""use_memorry_efficient_attention""" , lowerCamelCase_ )
__a = hidden_dropout_prob
__a = attention_dropout_prob
__a = use_stable_embedding
__a = shared_input_output_embedding
__a = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=lowerCamelCase_ , bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , tie_word_embeddings=lowerCamelCase_ , **lowerCamelCase_ , )
def lowerCAmelCase_ ( self : Optional[int] ) -> Optional[Any]:
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , lowerCamelCase_ ) or len(self.rope_scaling ) != 2:
raise ValueError(
"""`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, """
F"""got {self.rope_scaling}""" )
__a = self.rope_scaling.get("""type""" , lowerCamelCase_ )
__a = self.rope_scaling.get("""factor""" , lowerCamelCase_ )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F"""`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}""" )
if rope_scaling_factor is None or not isinstance(lowerCamelCase_ , lowerCamelCase_ ) or rope_scaling_factor <= 1.0:
raise ValueError(F"""`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}""" )
| 173 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE : List[Any] = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : Optional[int] = {
"""MIT/ast-finetuned-audioset-10-10-0.4593""": (
"""https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json"""
),
}
class _snake_case ( UpperCAmelCase__ ):
lowerCAmelCase_ : Optional[int] = "audio-spectrogram-transformer"
def __init__( self , a__=768 , a__=12 , a__=12 , a__=3_072 , a__="gelu" , a__=0.0 , a__=0.0 , a__=0.0_2 , a__=1e-12 , a__=16 , a__=True , a__=10 , a__=10 , a__=1_024 , a__=128 , **a__ , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(**UpperCAmelCase__ )
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = initializer_range
snake_case_ = layer_norm_eps
snake_case_ = patch_size
snake_case_ = qkv_bias
snake_case_ = frequency_stride
snake_case_ = time_stride
snake_case_ = max_length
snake_case_ = num_mel_bins
| 400 |
from __future__ import annotations
import unittest
from transformers import MobileBertConfig, is_tf_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_MODEL_FOR_PRETRAINING_MAPPING,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertModel,
)
@require_tf
class UpperCamelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__ = (
(
TFMobileBertModel,
TFMobileBertForMaskedLM,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertForMultipleChoice,
)
if is_tf_available()
else ()
)
UpperCAmelCase__ = (
{
'''feature-extraction''': TFMobileBertModel,
'''fill-mask''': TFMobileBertForMaskedLM,
'''question-answering''': TFMobileBertForQuestionAnswering,
'''text-classification''': TFMobileBertForSequenceClassification,
'''token-classification''': TFMobileBertForTokenClassification,
'''zero-shot''': TFMobileBertForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCAmelCase__ = False
UpperCAmelCase__ = False
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : str=False) ->Optional[Any]:
'''simple docstring'''
A__ = super()._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ , return_labels=UpperCAmelCase__)
if return_labels:
if model_class in get_values(UpperCAmelCase__):
A__ = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa)
return inputs_dict
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self : List[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : int=13 , UpperCAmelCase__ : str=7 , UpperCAmelCase__ : Optional[Any]=True , UpperCAmelCase__ : Optional[int]=True , UpperCAmelCase__ : Union[str, Any]=True , UpperCAmelCase__ : Optional[int]=True , UpperCAmelCase__ : str=99 , UpperCAmelCase__ : List[str]=32 , UpperCAmelCase__ : Optional[int]=32 , UpperCAmelCase__ : Any=2 , UpperCAmelCase__ : List[str]=4 , UpperCAmelCase__ : Optional[Any]=37 , UpperCAmelCase__ : Optional[int]="gelu" , UpperCAmelCase__ : Any=0.1 , UpperCAmelCase__ : Optional[Any]=0.1 , UpperCAmelCase__ : List[Any]=512 , UpperCAmelCase__ : Tuple=16 , UpperCAmelCase__ : Any=2 , UpperCAmelCase__ : Dict=0.02 , UpperCAmelCase__ : int=3 , UpperCAmelCase__ : List[str]=4 , UpperCAmelCase__ : Tuple=None , ) ->Any:
'''simple docstring'''
A__ = parent
A__ = batch_size
A__ = seq_length
A__ = is_training
A__ = use_input_mask
A__ = use_token_type_ids
A__ = use_labels
A__ = vocab_size
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_size
A__ = hidden_act
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = max_position_embeddings
A__ = type_vocab_size
A__ = type_sequence_label_size
A__ = initializer_range
A__ = num_labels
A__ = num_choices
A__ = scope
A__ = embedding_size
def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Tuple:
'''simple docstring'''
A__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
A__ = None
if self.use_input_mask:
A__ = random_attention_mask([self.batch_size, self.seq_length])
A__ = None
if self.use_token_type_ids:
A__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
A__ = None
A__ = None
A__ = None
if self.use_labels:
A__ = ids_tensor([self.batch_size] , self.type_sequence_label_size)
A__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
A__ = ids_tensor([self.batch_size] , self.num_choices)
A__ = MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , embedding_size=self.embedding_size , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : str , UpperCAmelCase__ : str , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Optional[Any]) ->Any:
'''simple docstring'''
A__ = TFMobileBertModel(config=UpperCAmelCase__)
A__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
A__ = model(UpperCAmelCase__)
A__ = [input_ids, input_mask]
A__ = model(UpperCAmelCase__)
A__ = model(UpperCAmelCase__)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size))
def SCREAMING_SNAKE_CASE ( self : List[str] , UpperCAmelCase__ : int , UpperCAmelCase__ : str , UpperCAmelCase__ : Any , UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Tuple) ->Optional[Any]:
'''simple docstring'''
A__ = TFMobileBertForMaskedLM(config=UpperCAmelCase__)
A__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
A__ = model(UpperCAmelCase__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def SCREAMING_SNAKE_CASE ( self : Dict , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[Any]) ->int:
'''simple docstring'''
A__ = TFMobileBertForNextSentencePrediction(config=UpperCAmelCase__)
A__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
A__ = model(UpperCAmelCase__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2))
def SCREAMING_SNAKE_CASE ( self : Dict , UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : int) ->List[Any]:
'''simple docstring'''
A__ = TFMobileBertForPreTraining(config=UpperCAmelCase__)
A__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
A__ = model(UpperCAmelCase__)
self.parent.assertEqual(
result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2))
def SCREAMING_SNAKE_CASE ( self : Tuple , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Any , UpperCAmelCase__ : int , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Tuple) ->Dict:
'''simple docstring'''
A__ = self.num_labels
A__ = TFMobileBertForSequenceClassification(config=UpperCAmelCase__)
A__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
A__ = model(UpperCAmelCase__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def SCREAMING_SNAKE_CASE ( self : Any , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : str , UpperCAmelCase__ : Any , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : int) ->Dict:
'''simple docstring'''
A__ = self.num_choices
A__ = TFMobileBertForMultipleChoice(config=UpperCAmelCase__)
A__ = tf.tile(tf.expand_dims(UpperCAmelCase__ , 1) , (1, self.num_choices, 1))
A__ = tf.tile(tf.expand_dims(UpperCAmelCase__ , 1) , (1, self.num_choices, 1))
A__ = tf.tile(tf.expand_dims(UpperCAmelCase__ , 1) , (1, self.num_choices, 1))
A__ = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
A__ = model(UpperCAmelCase__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def SCREAMING_SNAKE_CASE ( self : Dict , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[int]) ->int:
'''simple docstring'''
A__ = self.num_labels
A__ = TFMobileBertForTokenClassification(config=UpperCAmelCase__)
A__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
A__ = model(UpperCAmelCase__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def SCREAMING_SNAKE_CASE ( self : List[Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Union[str, Any]) ->Union[str, Any]:
'''simple docstring'''
A__ = TFMobileBertForQuestionAnswering(config=UpperCAmelCase__)
A__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
A__ = model(UpperCAmelCase__)
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def SCREAMING_SNAKE_CASE ( self : Any) ->str:
'''simple docstring'''
A__ = self.prepare_config_and_inputs()
(
(
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) ,
) = config_and_inputs
A__ = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
def SCREAMING_SNAKE_CASE ( self : Any) ->Union[str, Any]:
'''simple docstring'''
A__ = TFMobileBertModelTest.TFMobileBertModelTester(self)
A__ = ConfigTester(self , config_class=UpperCAmelCase__ , hidden_size=37)
def SCREAMING_SNAKE_CASE ( self : Tuple) ->Optional[int]:
'''simple docstring'''
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Dict:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : List[str]) ->Tuple:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Dict:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Tuple) ->Dict:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : List[Any]) ->Union[str, Any]:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : int) ->Optional[int]:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Tuple) ->Optional[int]:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Tuple) ->List[str]:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*UpperCAmelCase__)
@slow
def SCREAMING_SNAKE_CASE ( self : str) ->List[Any]:
'''simple docstring'''
for model_name in ["google/mobilebert-uncased"]:
A__ = TFMobileBertModel.from_pretrained(UpperCAmelCase__)
self.assertIsNotNone(UpperCAmelCase__)
@require_tf
class UpperCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Any:
'''simple docstring'''
A__ = TFMobileBertForPreTraining.from_pretrained('''google/mobilebert-uncased''')
A__ = tf.constant([[0, 1, 2, 3, 4, 5]])
A__ = model(UpperCAmelCase__)[0]
A__ = [1, 6, 30_522]
self.assertEqual(output.shape , UpperCAmelCase__)
A__ = tf.constant(
[
[
[-4.5919547, -9.248295, -9.645256],
[-6.7306175, -6.440284, -6.6052837],
[-7.2743506, -6.7847915, -6.024673],
]
])
tf.debugging.assert_near(output[:, :3, :3] , UpperCAmelCase__ , atol=1e-4)
| 87 | 0 |
'''simple docstring'''
from __future__ import annotations
def lowerCamelCase ( _snake_case : Optional[int] ,_snake_case : Dict ):
'''simple docstring'''
if len(lowercase__ ) <= 1 or n <= 1:
return
insert_next(lowercase__ ,n - 1 )
rec_insertion_sort(lowercase__ ,n - 1 )
def lowerCamelCase ( _snake_case : List[str] ,_snake_case : Union[str, Any] ):
'''simple docstring'''
if index >= len(lowercase__ ) or collection[index - 1] <= collection[index]:
return
# Swaps adjacent elements since they are not in ascending order
lowercase__ , lowercase__ = (
collection[index],
collection[index - 1],
)
insert_next(lowercase__ ,index + 1 )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = input("Enter integers separated by spaces: ")
SCREAMING_SNAKE_CASE__ = [int(num) for num in numbers.split()]
rec_insertion_sort(number_list, len(number_list))
print(number_list)
| 702 |
'''simple docstring'''
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import platform
import numpy as np
import psutil
import torch
from accelerate import __version__ as version
from accelerate.commands.config import default_config_file, load_config_from_file
from ..utils import is_npu_available, is_xpu_available
def lowerCamelCase ( _snake_case : str=None ):
'''simple docstring'''
if subparsers is not None:
lowercase__ = subparsers.add_parser("env" )
else:
lowercase__ = argparse.ArgumentParser("Accelerate env command" )
parser.add_argument(
"--config_file" ,default=_snake_case ,help="The config file to use for the default values in the launching script." )
if subparsers is not None:
parser.set_defaults(func=_snake_case )
return parser
def lowerCamelCase ( _snake_case : Tuple ):
'''simple docstring'''
lowercase__ = torch.__version__
lowercase__ = torch.cuda.is_available()
lowercase__ = is_xpu_available()
lowercase__ = is_npu_available()
lowercase__ = "Not found"
# Get the default from the config file.
if args.config_file is not None or os.path.isfile(_snake_case ):
lowercase__ = load_config_from_file(args.config_file ).to_dict()
lowercase__ = {
"`Accelerate` version": version,
"Platform": platform.platform(),
"Python version": platform.python_version(),
"Numpy version": np.__version__,
"PyTorch version (GPU?)": f'''{pt_version} ({pt_cuda_available})''',
"PyTorch XPU available": str(_snake_case ),
"PyTorch NPU available": str(_snake_case ),
"System RAM": f'''{psutil.virtual_memory().total / 1_024 ** 3:.2f} GB''',
}
if pt_cuda_available:
lowercase__ = torch.cuda.get_device_name()
print("\nCopy-and-paste the text below in your GitHub issue\n" )
print("\n".join([f'''- {prop}: {val}''' for prop, val in info.items()] ) )
print("- `Accelerate` default config:" if args.config_file is None else "- `Accelerate` config passed:" )
lowercase__ = (
"\n".join([f'''\t- {prop}: {val}''' for prop, val in accelerate_config.items()] )
if isinstance(_snake_case ,_snake_case )
else f'''\t{accelerate_config}'''
)
print(_snake_case )
lowercase__ = accelerate_config
return info
def lowerCamelCase ( ):
'''simple docstring'''
lowercase__ = env_command_parser()
lowercase__ = parser.parse_args()
env_command(_snake_case )
return 0
if __name__ == "__main__":
raise SystemExit(main())
| 539 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCAmelCase_ : Any = {
'''configuration_rag''': ['''RagConfig'''],
'''retrieval_rag''': ['''RagRetriever'''],
'''tokenization_rag''': ['''RagTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Union[str, Any] = [
'''RagModel''',
'''RagPreTrainedModel''',
'''RagSequenceForGeneration''',
'''RagTokenForGeneration''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Union[str, Any] = [
'''TFRagModel''',
'''TFRagPreTrainedModel''',
'''TFRagSequenceForGeneration''',
'''TFRagTokenForGeneration''',
]
if TYPE_CHECKING:
from .configuration_rag import RagConfig
from .retrieval_rag import RagRetriever
from .tokenization_rag import RagTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rag import RagModel, RagPreTrainedModel, RagSequenceForGeneration, RagTokenForGeneration
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rag import (
TFRagModel,
TFRagPreTrainedModel,
TFRagSequenceForGeneration,
TFRagTokenForGeneration,
)
else:
import sys
UpperCAmelCase_ : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 24 |
def _A ( lowerCamelCase ):
a__ : Tuple = []
a__ : str = set({"(", "[", "{"} )
a__ : List[str] = set({")", "]", "}"} )
a__ : int = {"{": "}", "[": "]", "(": ")"}
for i in range(len(lowerCamelCase ) ):
if s[i] in open_brackets:
stack.append(s[i] )
elif s[i] in closed_brackets and (
len(lowerCamelCase ) == 0 or (len(lowerCamelCase ) > 0 and open_to_closed[stack.pop()] != s[i])
):
return False
return len(lowerCamelCase ) == 0
def _A ( ):
a__ : Optional[int] = input("Enter sequence of brackets: " )
if is_balanced(lowerCamelCase ):
print(lowerCamelCase , "is balanced" )
else:
print(lowerCamelCase , "is not balanced" )
if __name__ == "__main__":
main()
| 112 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class lowerCamelCase__ ( A__ , A__ , A__ , unittest.TestCase ):
__lowerCamelCase = StableDiffusionInpaintPipeline
__lowerCamelCase = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
__lowerCamelCase = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
__lowerCamelCase = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
__lowerCamelCase = frozenset([] )
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCamelCase__: str = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=9 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=__a , )
lowerCamelCase__: Optional[Any] = PNDMScheduler(skip_prk_steps=__a )
torch.manual_seed(0 )
lowerCamelCase__: Optional[int] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
lowerCamelCase__: Any = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="""gelu""" , projection_dim=512 , )
lowerCamelCase__: str = CLIPTextModel(__a )
lowerCamelCase__: Optional[Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
lowerCamelCase__: int = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def lowerCamelCase_ ( self : Tuple , __a : Union[str, Any] , __a : Tuple=0 ):
'''simple docstring'''
lowerCamelCase__: List[str] = floats_tensor((1, 3, 32, 32) , rng=random.Random(__a ) ).to(__a )
lowerCamelCase__: Optional[Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCamelCase__: Union[str, Any] = Image.fromarray(np.uinta(__a ) ).convert("""RGB""" ).resize((64, 64) )
lowerCamelCase__: int = Image.fromarray(np.uinta(image + 4 ) ).convert("""RGB""" ).resize((64, 64) )
if str(__a ).startswith("""mps""" ):
lowerCamelCase__: Union[str, Any] = torch.manual_seed(__a )
else:
lowerCamelCase__: str = torch.Generator(device=__a ).manual_seed(__a )
lowerCamelCase__: Any = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": init_image,
"""mask_image""": mask_image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
lowerCamelCase__: List[Any] = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowerCamelCase__: Union[str, Any] = self.get_dummy_components()
lowerCamelCase__: Tuple = StableDiffusionInpaintPipeline(**__a )
lowerCamelCase__: Optional[Any] = sd_pipe.to(__a )
sd_pipe.set_progress_bar_config(disable=__a )
lowerCamelCase__: int = self.get_dummy_inputs(__a )
lowerCamelCase__: Optional[Any] = sd_pipe(**__a ).images
lowerCamelCase__: List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCamelCase__: int = np.array([0.4_727, 0.5_735, 0.3_941, 0.5_446, 0.5_926, 0.4_394, 0.5_062, 0.4_654, 0.4_476] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class lowerCamelCase__ ( unittest.TestCase ):
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
lowerCamelCase__: Tuple = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-inpaint/init_image.png""" )
lowerCamelCase__: str = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" )
lowerCamelCase__: Tuple = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint"""
"""/yellow_cat_sitting_on_a_park_bench.npy""" )
lowerCamelCase__: Dict = """stabilityai/stable-diffusion-2-inpainting"""
lowerCamelCase__: int = StableDiffusionInpaintPipeline.from_pretrained(__a , safety_checker=__a )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
pipe.enable_attention_slicing()
lowerCamelCase__: Tuple = """Face of a yellow cat, high resolution, sitting on a park bench"""
lowerCamelCase__: Any = torch.manual_seed(0 )
lowerCamelCase__: Dict = pipe(
prompt=__a , image=__a , mask_image=__a , generator=__a , output_type="""np""" , )
lowerCamelCase__: Any = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 9e-3
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
lowerCamelCase__: Union[str, Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-inpaint/init_image.png""" )
lowerCamelCase__: Optional[Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" )
lowerCamelCase__: List[Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint"""
"""/yellow_cat_sitting_on_a_park_bench_fp16.npy""" )
lowerCamelCase__: Dict = """stabilityai/stable-diffusion-2-inpainting"""
lowerCamelCase__: List[str] = StableDiffusionInpaintPipeline.from_pretrained(
__a , torch_dtype=torch.floataa , safety_checker=__a , )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
pipe.enable_attention_slicing()
lowerCamelCase__: Optional[Any] = """Face of a yellow cat, high resolution, sitting on a park bench"""
lowerCamelCase__: int = torch.manual_seed(0 )
lowerCamelCase__: int = pipe(
prompt=__a , image=__a , mask_image=__a , generator=__a , output_type="""np""" , )
lowerCamelCase__: Optional[int] = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 5e-1
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowerCamelCase__: Dict = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-inpaint/init_image.png""" )
lowerCamelCase__: Tuple = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" )
lowerCamelCase__: Optional[int] = """stabilityai/stable-diffusion-2-inpainting"""
lowerCamelCase__: List[Any] = PNDMScheduler.from_pretrained(__a , subfolder="""scheduler""" )
lowerCamelCase__: Optional[int] = StableDiffusionInpaintPipeline.from_pretrained(
__a , safety_checker=__a , scheduler=__a , torch_dtype=torch.floataa , )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
lowerCamelCase__: Optional[Any] = """Face of a yellow cat, high resolution, sitting on a park bench"""
lowerCamelCase__: List[str] = torch.manual_seed(0 )
lowerCamelCase__: str = pipe(
prompt=__a , image=__a , mask_image=__a , generator=__a , num_inference_steps=2 , output_type="""np""" , )
lowerCamelCase__: str = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.65 * 10**9
| 242 |
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def __lowerCAmelCase ( _UpperCamelCase ) -> int:
'''simple docstring'''
for param in module.parameters():
lowerCamelCase__: Optional[int] = False
def __lowerCAmelCase ( ) -> List[Any]:
'''simple docstring'''
lowerCamelCase__: Any = """cuda""" if torch.cuda.is_available() else """cpu"""
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
lowerCamelCase__: Dict = """mps"""
if device == "mps":
print(
"""WARNING: MPS currently doesn't seem to work, and messes up backpropagation without any visible torch"""
""" errors. I recommend using CUDA on a colab notebook or CPU instead if you're facing inexplicable issues"""
""" with generations.""" )
return device
def __lowerCAmelCase ( _UpperCamelCase ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase__: List[str] = plt.imshow(_UpperCamelCase )
fig.axes.get_xaxis().set_visible(_UpperCamelCase )
fig.axes.get_yaxis().set_visible(_UpperCamelCase )
plt.show()
def __lowerCAmelCase ( ) -> List[str]:
'''simple docstring'''
lowerCamelCase__: List[Any] = datetime.now()
lowerCamelCase__: int = current_time.strftime("""%H:%M:%S""" )
return timestamp
| 242 | 1 |
"""simple docstring"""
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
__lowerCamelCase = subprocess.check_output("git merge-base main HEAD".split()).decode("utf-8")
__lowerCamelCase = subprocess.check_output(f"""git diff --name-only {fork_point_sha}""".split()).decode("utf-8").split()
__lowerCamelCase = """|""".join(sys.argv[1:])
__lowerCamelCase = re.compile(rf"""^({joined_dirs}).*?\.py$""")
__lowerCamelCase = [x for x in modified_files if regex.match(x)]
print(" ".join(relevant_modified_files), end="")
| 490 | import contextlib
import csv
import json
import os
import sqlitea
import tarfile
import textwrap
import zipfile
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
import datasets
import datasets.config
@pytest.fixture(scope="session" )
def __A ( ):
"""simple docstring"""
__a = 10
__a = datasets.Features(
{
"tokens": datasets.Sequence(datasets.Value("string" ) ),
"labels": datasets.Sequence(datasets.ClassLabel(names=["negative", "positive"] ) ),
"answers": datasets.Sequence(
{
"text": datasets.Value("string" ),
"answer_start": datasets.Value("int32" ),
} ),
"id": datasets.Value("int64" ),
} )
__a = datasets.Dataset.from_dict(
{
"tokens": [["foo"] * 5] * n,
"labels": [[1] * 5] * n,
"answers": [{"answer_start": [97], "text": ["1976"]}] * 10,
"id": list(range(_A ) ),
} , features=_A , )
return dataset
@pytest.fixture(scope="session" )
def __A ( _A , _A ):
"""simple docstring"""
__a = str(tmp_path_factory.mktemp("data" ) / "file.arrow" )
dataset.map(cache_file_name=_A )
return filename
# FILE_CONTENT + files
SCREAMING_SNAKE_CASE : Tuple = """\
Text data.
Second line of data."""
@pytest.fixture(scope="session" )
def __A ( _A ):
"""simple docstring"""
__a = tmp_path_factory.mktemp("data" ) / "file.txt"
__a = FILE_CONTENT
with open(_A , "w" ) as f:
f.write(_A )
return filename
@pytest.fixture(scope="session" )
def __A ( _A ):
"""simple docstring"""
import bza
__a = tmp_path_factory.mktemp("data" ) / "file.txt.bz2"
__a = bytes(_A , "utf-8" )
with bza.open(_A , "wb" ) as f:
f.write(_A )
return path
@pytest.fixture(scope="session" )
def __A ( _A ):
"""simple docstring"""
import gzip
__a = str(tmp_path_factory.mktemp("data" ) / "file.txt.gz" )
__a = bytes(_A , "utf-8" )
with gzip.open(_A , "wb" ) as f:
f.write(_A )
return path
@pytest.fixture(scope="session" )
def __A ( _A ):
"""simple docstring"""
if datasets.config.LZ4_AVAILABLE:
import lza.frame
__a = tmp_path_factory.mktemp("data" ) / "file.txt.lz4"
__a = bytes(_A , "utf-8" )
with lza.frame.open(_A , "wb" ) as f:
f.write(_A )
return path
@pytest.fixture(scope="session" )
def __A ( _A , _A ):
"""simple docstring"""
if datasets.config.PY7ZR_AVAILABLE:
import pyazr
__a = tmp_path_factory.mktemp("data" ) / "file.txt.7z"
with pyazr.SevenZipFile(_A , "w" ) as archive:
archive.write(_A , arcname=os.path.basename(_A ) )
return path
@pytest.fixture(scope="session" )
def __A ( _A , _A ):
"""simple docstring"""
import tarfile
__a = tmp_path_factory.mktemp("data" ) / "file.txt.tar"
with tarfile.TarFile(_A , "w" ) as f:
f.add(_A , arcname=os.path.basename(_A ) )
return path
@pytest.fixture(scope="session" )
def __A ( _A ):
"""simple docstring"""
import lzma
__a = tmp_path_factory.mktemp("data" ) / "file.txt.xz"
__a = bytes(_A , "utf-8" )
with lzma.open(_A , "wb" ) as f:
f.write(_A )
return path
@pytest.fixture(scope="session" )
def __A ( _A , _A ):
"""simple docstring"""
import zipfile
__a = tmp_path_factory.mktemp("data" ) / "file.txt.zip"
with zipfile.ZipFile(_A , "w" ) as f:
f.write(_A , arcname=os.path.basename(_A ) )
return path
@pytest.fixture(scope="session" )
def __A ( _A ):
"""simple docstring"""
if datasets.config.ZSTANDARD_AVAILABLE:
import zstandard as zstd
__a = tmp_path_factory.mktemp("data" ) / "file.txt.zst"
__a = bytes(_A , "utf-8" )
with zstd.open(_A , "wb" ) as f:
f.write(_A )
return path
@pytest.fixture(scope="session" )
def __A ( _A ):
"""simple docstring"""
__a = tmp_path_factory.mktemp("data" ) / "file.xml"
__a = textwrap.dedent(
"\\n <?xml version=\"1.0\" encoding=\"UTF-8\" ?>\n <tmx version=\"1.4\">\n <header segtype=\"sentence\" srclang=\"ca\" />\n <body>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 1</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 1</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 2</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 2</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 3</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 3</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 4</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 4</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 5</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 5</seg></tuv>\n </tu>\n </body>\n </tmx>" )
with open(_A , "w" ) as f:
f.write(_A )
return filename
SCREAMING_SNAKE_CASE : Any = [
{"""col_1""": """0""", """col_2""": 0, """col_3""": 0.0},
{"""col_1""": """1""", """col_2""": 1, """col_3""": 1.0},
{"""col_1""": """2""", """col_2""": 2, """col_3""": 2.0},
{"""col_1""": """3""", """col_2""": 3, """col_3""": 3.0},
]
SCREAMING_SNAKE_CASE : Optional[int] = [
{"""col_1""": """4""", """col_2""": 4, """col_3""": 4.0},
{"""col_1""": """5""", """col_2""": 5, """col_3""": 5.0},
]
SCREAMING_SNAKE_CASE : Any = {
"""col_1""": ["""0""", """1""", """2""", """3"""],
"""col_2""": [0, 1, 2, 3],
"""col_3""": [0.0, 1.0, 2.0, 3.0],
}
SCREAMING_SNAKE_CASE : int = [
{"""col_3""": 0.0, """col_1""": """0""", """col_2""": 0},
{"""col_3""": 1.0, """col_1""": """1""", """col_2""": 1},
]
SCREAMING_SNAKE_CASE : List[Any] = [
{"""col_1""": """s0""", """col_2""": 0, """col_3""": 0.0},
{"""col_1""": """s1""", """col_2""": 1, """col_3""": 1.0},
{"""col_1""": """s2""", """col_2""": 2, """col_3""": 2.0},
{"""col_1""": """s3""", """col_2""": 3, """col_3""": 3.0},
]
@pytest.fixture(scope="session" )
def __A ( ):
"""simple docstring"""
return DATA_DICT_OF_LISTS
@pytest.fixture(scope="session" )
def __A ( _A ):
"""simple docstring"""
__a = datasets.Dataset.from_dict(_A )
__a = str(tmp_path_factory.mktemp("data" ) / "dataset.arrow" )
dataset.map(cache_file_name=_A )
return path
@pytest.fixture(scope="session" )
def __A ( _A ):
"""simple docstring"""
__a = str(tmp_path_factory.mktemp("data" ) / "dataset.sqlite" )
with contextlib.closing(sqlitea.connect(_A ) ) as con:
__a = con.cursor()
cur.execute("CREATE TABLE dataset(col_1 text, col_2 int, col_3 real)" )
for item in DATA:
cur.execute("INSERT INTO dataset(col_1, col_2, col_3) VALUES (?, ?, ?)" , tuple(item.values() ) )
con.commit()
return path
@pytest.fixture(scope="session" )
def __A ( _A ):
"""simple docstring"""
__a = str(tmp_path_factory.mktemp("data" ) / "dataset.csv" )
with open(_A , "w" , newline="" ) as f:
__a = csv.DictWriter(_A , fieldnames=["col_1", "col_2", "col_3"] )
writer.writeheader()
for item in DATA:
writer.writerow(_A )
return path
@pytest.fixture(scope="session" )
def __A ( _A ):
"""simple docstring"""
__a = str(tmp_path_factory.mktemp("data" ) / "dataset2.csv" )
with open(_A , "w" , newline="" ) as f:
__a = csv.DictWriter(_A , fieldnames=["col_1", "col_2", "col_3"] )
writer.writeheader()
for item in DATA:
writer.writerow(_A )
return path
@pytest.fixture(scope="session" )
def __A ( _A , _A ):
"""simple docstring"""
import bza
__a = tmp_path_factory.mktemp("data" ) / "dataset.csv.bz2"
with open(_A , "rb" ) as f:
__a = f.read()
# data = bytes(FILE_CONTENT, "utf-8")
with bza.open(_A , "wb" ) as f:
f.write(_A )
return path
@pytest.fixture(scope="session" )
def __A ( _A , _A , _A ):
"""simple docstring"""
__a = tmp_path_factory.mktemp("data" ) / "dataset.csv.zip"
with zipfile.ZipFile(_A , "w" ) as f:
f.write(_A , arcname=os.path.basename(_A ) )
f.write(_A , arcname=os.path.basename(_A ) )
return path
@pytest.fixture(scope="session" )
def __A ( _A , _A , _A ):
"""simple docstring"""
__a = tmp_path_factory.mktemp("data" ) / "dataset.csv.zip"
with zipfile.ZipFile(_A , "w" ) as f:
f.write(_A , arcname=os.path.basename(csv_path.replace(".csv" , ".CSV" ) ) )
f.write(_A , arcname=os.path.basename(csva_path.replace(".csv" , ".CSV" ) ) )
return path
@pytest.fixture(scope="session" )
def __A ( _A , _A , _A ):
"""simple docstring"""
__a = tmp_path_factory.mktemp("data" ) / "dataset_with_dir.csv.zip"
with zipfile.ZipFile(_A , "w" ) as f:
f.write(_A , arcname=os.path.join("main_dir" , os.path.basename(_A ) ) )
f.write(_A , arcname=os.path.join("main_dir" , os.path.basename(_A ) ) )
return path
@pytest.fixture(scope="session" )
def __A ( _A ):
"""simple docstring"""
__a = str(tmp_path_factory.mktemp("data" ) / "dataset.parquet" )
__a = pa.schema(
{
"col_1": pa.string(),
"col_2": pa.intaa(),
"col_3": pa.floataa(),
} )
with open(_A , "wb" ) as f:
__a = pq.ParquetWriter(_A , schema=_A )
__a = pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(_A ) )] for k in DATA[0]} , schema=_A )
writer.write_table(_A )
writer.close()
return path
@pytest.fixture(scope="session" )
def __A ( _A ):
"""simple docstring"""
__a = str(tmp_path_factory.mktemp("data" ) / "dataset.json" )
__a = {"data": DATA}
with open(_A , "w" ) as f:
json.dump(_A , _A )
return path
@pytest.fixture(scope="session" )
def __A ( _A ):
"""simple docstring"""
__a = str(tmp_path_factory.mktemp("data" ) / "dataset.json" )
__a = {"data": DATA_DICT_OF_LISTS}
with open(_A , "w" ) as f:
json.dump(_A , _A )
return path
@pytest.fixture(scope="session" )
def __A ( _A ):
"""simple docstring"""
__a = str(tmp_path_factory.mktemp("data" ) / "dataset.jsonl" )
with open(_A , "w" ) as f:
for item in DATA:
f.write(json.dumps(_A ) + "\n" )
return path
@pytest.fixture(scope="session" )
def __A ( _A ):
"""simple docstring"""
__a = str(tmp_path_factory.mktemp("data" ) / "dataset2.jsonl" )
with open(_A , "w" ) as f:
for item in DATA:
f.write(json.dumps(_A ) + "\n" )
return path
@pytest.fixture(scope="session" )
def __A ( _A ):
"""simple docstring"""
__a = str(tmp_path_factory.mktemp("data" ) / "dataset_312.jsonl" )
with open(_A , "w" ) as f:
for item in DATA_312:
f.write(json.dumps(_A ) + "\n" )
return path
@pytest.fixture(scope="session" )
def __A ( _A ):
"""simple docstring"""
__a = str(tmp_path_factory.mktemp("data" ) / "dataset-str.jsonl" )
with open(_A , "w" ) as f:
for item in DATA_STR:
f.write(json.dumps(_A ) + "\n" )
return path
@pytest.fixture(scope="session" )
def __A ( _A , _A ):
"""simple docstring"""
import gzip
__a = str(tmp_path_factory.mktemp("data" ) / "dataset.txt.gz" )
with open(_A , "rb" ) as orig_file:
with gzip.open(_A , "wb" ) as zipped_file:
zipped_file.writelines(_A )
return path
@pytest.fixture(scope="session" )
def __A ( _A , _A ):
"""simple docstring"""
import gzip
__a = str(tmp_path_factory.mktemp("data" ) / "dataset.jsonl.gz" )
with open(_A , "rb" ) as orig_file:
with gzip.open(_A , "wb" ) as zipped_file:
zipped_file.writelines(_A )
return path
@pytest.fixture(scope="session" )
def __A ( _A , _A , _A ):
"""simple docstring"""
__a = tmp_path_factory.mktemp("data" ) / "dataset.jsonl.zip"
with zipfile.ZipFile(_A , "w" ) as f:
f.write(_A , arcname=os.path.basename(_A ) )
f.write(_A , arcname=os.path.basename(_A ) )
return path
@pytest.fixture(scope="session" )
def __A ( _A , _A , _A , _A ):
"""simple docstring"""
__a = tmp_path_factory.mktemp("data" ) / "dataset_nested.jsonl.zip"
with zipfile.ZipFile(_A , "w" ) as f:
f.write(_A , arcname=os.path.join("nested" , os.path.basename(_A ) ) )
return path
@pytest.fixture(scope="session" )
def __A ( _A , _A , _A ):
"""simple docstring"""
__a = tmp_path_factory.mktemp("data" ) / "dataset_with_dir.jsonl.zip"
with zipfile.ZipFile(_A , "w" ) as f:
f.write(_A , arcname=os.path.join("main_dir" , os.path.basename(_A ) ) )
f.write(_A , arcname=os.path.join("main_dir" , os.path.basename(_A ) ) )
return path
@pytest.fixture(scope="session" )
def __A ( _A , _A , _A ):
"""simple docstring"""
__a = tmp_path_factory.mktemp("data" ) / "dataset.jsonl.tar"
with tarfile.TarFile(_A , "w" ) as f:
f.add(_A , arcname=os.path.basename(_A ) )
f.add(_A , arcname=os.path.basename(_A ) )
return path
@pytest.fixture(scope="session" )
def __A ( _A , _A , _A , _A ):
"""simple docstring"""
__a = tmp_path_factory.mktemp("data" ) / "dataset_nested.jsonl.tar"
with tarfile.TarFile(_A , "w" ) as f:
f.add(_A , arcname=os.path.join("nested" , os.path.basename(_A ) ) )
return path
@pytest.fixture(scope="session" )
def __A ( _A ):
"""simple docstring"""
__a = ["0", "1", "2", "3"]
__a = str(tmp_path_factory.mktemp("data" ) / "dataset.txt" )
with open(_A , "w" ) as f:
for item in data:
f.write(item + "\n" )
return path
@pytest.fixture(scope="session" )
def __A ( _A ):
"""simple docstring"""
__a = ["0", "1", "2", "3"]
__a = str(tmp_path_factory.mktemp("data" ) / "dataset2.txt" )
with open(_A , "w" ) as f:
for item in data:
f.write(item + "\n" )
return path
@pytest.fixture(scope="session" )
def __A ( _A ):
"""simple docstring"""
__a = ["0", "1", "2", "3"]
__a = tmp_path_factory.mktemp("data" ) / "dataset.abc"
with open(_A , "w" ) as f:
for item in data:
f.write(item + "\n" )
return path
@pytest.fixture(scope="session" )
def __A ( _A , _A , _A ):
"""simple docstring"""
__a = tmp_path_factory.mktemp("data" ) / "dataset.text.zip"
with zipfile.ZipFile(_A , "w" ) as f:
f.write(_A , arcname=os.path.basename(_A ) )
f.write(_A , arcname=os.path.basename(_A ) )
return path
@pytest.fixture(scope="session" )
def __A ( _A , _A , _A ):
"""simple docstring"""
__a = tmp_path_factory.mktemp("data" ) / "dataset_with_dir.text.zip"
with zipfile.ZipFile(_A , "w" ) as f:
f.write(_A , arcname=os.path.join("main_dir" , os.path.basename(_A ) ) )
f.write(_A , arcname=os.path.join("main_dir" , os.path.basename(_A ) ) )
return path
@pytest.fixture(scope="session" )
def __A ( _A , _A , _A ):
"""simple docstring"""
__a = tmp_path_factory.mktemp("data" ) / "dataset.ext.zip"
with zipfile.ZipFile(_A , "w" ) as f:
f.write(_A , arcname=os.path.basename("unsupported.ext" ) )
f.write(_A , arcname=os.path.basename("unsupported_2.ext" ) )
return path
@pytest.fixture(scope="session" )
def __A ( _A ):
"""simple docstring"""
__a = "\n".join(["First", "Second\u2029with Unicode new line", "Third"] )
__a = str(tmp_path_factory.mktemp("data" ) / "dataset_with_unicode_new_lines.txt" )
with open(_A , "w" , encoding="utf-8" ) as f:
f.write(_A )
return path
@pytest.fixture(scope="session" )
def __A ( ):
"""simple docstring"""
return os.path.join("tests" , "features" , "data" , "test_image_rgb.jpg" )
@pytest.fixture(scope="session" )
def __A ( ):
"""simple docstring"""
return os.path.join("tests" , "features" , "data" , "test_audio_44100.wav" )
@pytest.fixture(scope="session" )
def __A ( _A , _A ):
"""simple docstring"""
__a = tmp_path_factory.mktemp("data" ) / "dataset.img.zip"
with zipfile.ZipFile(_A , "w" ) as f:
f.write(_A , arcname=os.path.basename(_A ) )
f.write(_A , arcname=os.path.basename(_A ).replace(".jpg" , "2.jpg" ) )
return path
@pytest.fixture(scope="session" )
def __A ( _A ):
"""simple docstring"""
__a = tmp_path_factory.mktemp("data_dir" )
(data_dir / "subdir").mkdir()
with open(data_dir / "subdir" / "train.txt" , "w" ) as f:
f.write("foo\n" * 10 )
with open(data_dir / "subdir" / "test.txt" , "w" ) as f:
f.write("bar\n" * 10 )
# hidden file
with open(data_dir / "subdir" / ".test.txt" , "w" ) as f:
f.write("bar\n" * 10 )
# hidden directory
(data_dir / ".subdir").mkdir()
with open(data_dir / ".subdir" / "train.txt" , "w" ) as f:
f.write("foo\n" * 10 )
with open(data_dir / ".subdir" / "test.txt" , "w" ) as f:
f.write("bar\n" * 10 )
return data_dir
| 197 | 0 |
import argparse
import torch
from transformers import GPTaLMHeadModel, RobertaForMaskedLM
if __name__ == "__main__":
lowercase = argparse.ArgumentParser(
description=(
"""Extraction some layers of the full RobertaForMaskedLM or GPT2LMHeadModel for Transfer Learned"""
""" Distillation"""
)
)
parser.add_argument("""--model_type""", default="""roberta""", choices=["""roberta""", """gpt2"""])
parser.add_argument("""--model_name""", default="""roberta-large""", type=str)
parser.add_argument("""--dump_checkpoint""", default="""serialization_dir/tf_roberta_048131723.pth""", type=str)
parser.add_argument("""--vocab_transform""", action="""store_true""")
lowercase = parser.parse_args()
if args.model_type == "roberta":
lowercase = RobertaForMaskedLM.from_pretrained(args.model_name)
lowercase = """roberta"""
elif args.model_type == "gpt2":
lowercase = GPTaLMHeadModel.from_pretrained(args.model_name)
lowercase = """transformer"""
lowercase = model.state_dict()
lowercase = {}
# Embeddings #
if args.model_type == "gpt2":
for param_name in ["wte.weight", "wpe.weight"]:
lowercase = state_dict[f'{prefix}.{param_name}']
else:
for w in ["word_embeddings", "position_embeddings", "token_type_embeddings"]:
lowercase = f'{prefix}.embeddings.{w}.weight'
lowercase = state_dict[param_name]
for w in ["weight", "bias"]:
lowercase = f'{prefix}.embeddings.LayerNorm.{w}'
lowercase = state_dict[param_name]
# Transformer Blocks #
lowercase = 0
for teacher_idx in [0, 2, 4, 7, 9, 1_1]:
if args.model_type == "gpt2":
for layer in ["ln_1", "attn.c_attn", "attn.c_proj", "ln_2", "mlp.c_fc", "mlp.c_proj"]:
for w in ["weight", "bias"]:
lowercase = state_dict[
f'{prefix}.h.{teacher_idx}.{layer}.{w}'
]
lowercase = state_dict[f'{prefix}.h.{teacher_idx}.attn.bias']
else:
for layer in [
"attention.self.query",
"attention.self.key",
"attention.self.value",
"attention.output.dense",
"attention.output.LayerNorm",
"intermediate.dense",
"output.dense",
"output.LayerNorm",
]:
for w in ["weight", "bias"]:
lowercase = state_dict[
f'{prefix}.encoder.layer.{teacher_idx}.{layer}.{w}'
]
std_idx += 1
# Language Modeling Head ###s
if args.model_type == "roberta":
for layer in ["lm_head.decoder.weight", "lm_head.bias"]:
lowercase = state_dict[f'{layer}']
if args.vocab_transform:
for w in ["weight", "bias"]:
lowercase = state_dict[f'lm_head.dense.{w}']
lowercase = state_dict[f'lm_head.layer_norm.{w}']
elif args.model_type == "gpt2":
for w in ["weight", "bias"]:
lowercase = state_dict[f'{prefix}.ln_f.{w}']
lowercase = state_dict["""lm_head.weight"""]
print(f'N layers selected for distillation: {std_idx}')
print(f'Number of params transferred for distillation: {len(compressed_sd.keys())}')
print(f'Save transferred checkpoint to {args.dump_checkpoint}.')
torch.save(compressed_sd, args.dump_checkpoint) | 721 | # This model implementation is heavily inspired by https://github.com/haofanwang/ControlNet-for-Diffusers/
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
ControlNetModel,
DDIMScheduler,
StableDiffusionControlNetImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import MultiControlNetModel
from diffusers.utils import floats_tensor, load_image, load_numpy, randn_tensor, slow, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
)
enable_full_determinism()
class __lowercase ( A, A, A, unittest.TestCase ):
'''simple docstring'''
_A : List[Any] = StableDiffusionControlNetImgaImgPipeline
_A : Any = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width'''}
_A : Optional[int] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
_A : List[str] = IMAGE_TO_IMAGE_IMAGE_PARAMS.union({'''control_image'''} )
_A : str = IMAGE_TO_IMAGE_IMAGE_PARAMS
def A_ ( self : Optional[Any] ):
torch.manual_seed(0 )
UpperCamelCase__ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
torch.manual_seed(0 )
UpperCamelCase__ = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
torch.manual_seed(0 )
UpperCamelCase__ = DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=_a , set_alpha_to_one=_a , )
torch.manual_seed(0 )
UpperCamelCase__ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
UpperCamelCase__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
UpperCamelCase__ = CLIPTextModel(_a )
UpperCamelCase__ = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
UpperCamelCase__ = {
'''unet''': unet,
'''controlnet''': controlnet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def A_ ( self : Optional[Any] , _a : Optional[int] , _a : Optional[Any]=0 ):
if str(_a ).startswith('''mps''' ):
UpperCamelCase__ = torch.manual_seed(_a )
else:
UpperCamelCase__ = torch.Generator(device=_a ).manual_seed(_a )
UpperCamelCase__ = 2
UpperCamelCase__ = randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=_a , device=torch.device(_a ) , )
UpperCamelCase__ = floats_tensor(control_image.shape , rng=random.Random(_a ) ).to(_a )
UpperCamelCase__ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCamelCase__ = Image.fromarray(np.uinta(_a ) ).convert('''RGB''' ).resize((64, 64) )
UpperCamelCase__ = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
'''image''': image,
'''control_image''': control_image,
}
return inputs
def A_ ( self : Union[str, Any] ):
return self._test_attention_slicing_forward_pass(expected_max_diff=2E-3 )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def A_ ( self : Any ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3 )
def A_ ( self : Any ):
self._test_inference_batch_single_identical(expected_max_diff=2E-3 )
class __lowercase ( A, A, unittest.TestCase ):
'''simple docstring'''
_A : int = StableDiffusionControlNetImgaImgPipeline
_A : Any = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width'''}
_A : Optional[int] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
_A : Optional[Any] = frozenset([] ) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess
def A_ ( self : Tuple ):
torch.manual_seed(0 )
UpperCamelCase__ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
torch.manual_seed(0 )
def init_weights(_a : List[str] ):
if isinstance(_a , torch.nn.Convad ):
torch.nn.init.normal(m.weight )
m.bias.data.fill_(1.0 )
UpperCamelCase__ = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(_a )
torch.manual_seed(0 )
UpperCamelCase__ = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(_a )
torch.manual_seed(0 )
UpperCamelCase__ = DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=_a , set_alpha_to_one=_a , )
torch.manual_seed(0 )
UpperCamelCase__ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
UpperCamelCase__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
UpperCamelCase__ = CLIPTextModel(_a )
UpperCamelCase__ = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
UpperCamelCase__ = MultiControlNetModel([controlneta, controlneta] )
UpperCamelCase__ = {
'''unet''': unet,
'''controlnet''': controlnet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def A_ ( self : Tuple , _a : Dict , _a : Optional[int]=0 ):
if str(_a ).startswith('''mps''' ):
UpperCamelCase__ = torch.manual_seed(_a )
else:
UpperCamelCase__ = torch.Generator(device=_a ).manual_seed(_a )
UpperCamelCase__ = 2
UpperCamelCase__ = [
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=_a , device=torch.device(_a ) , ),
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=_a , device=torch.device(_a ) , ),
]
UpperCamelCase__ = floats_tensor(control_image[0].shape , rng=random.Random(_a ) ).to(_a )
UpperCamelCase__ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCamelCase__ = Image.fromarray(np.uinta(_a ) ).convert('''RGB''' ).resize((64, 64) )
UpperCamelCase__ = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
'''image''': image,
'''control_image''': control_image,
}
return inputs
def A_ ( self : str ):
UpperCamelCase__ = self.get_dummy_components()
UpperCamelCase__ = self.pipeline_class(**_a )
pipe.to(_a )
UpperCamelCase__ = 10.0
UpperCamelCase__ = 4
UpperCamelCase__ = self.get_dummy_inputs(_a )
UpperCamelCase__ = steps
UpperCamelCase__ = scale
UpperCamelCase__ = pipe(**_a )[0]
UpperCamelCase__ = self.get_dummy_inputs(_a )
UpperCamelCase__ = steps
UpperCamelCase__ = scale
UpperCamelCase__ = pipe(**_a , control_guidance_start=0.1 , control_guidance_end=0.2 )[0]
UpperCamelCase__ = self.get_dummy_inputs(_a )
UpperCamelCase__ = steps
UpperCamelCase__ = scale
UpperCamelCase__ = pipe(**_a , control_guidance_start=[0.1, 0.3] , control_guidance_end=[0.2, 0.7] )[0]
UpperCamelCase__ = self.get_dummy_inputs(_a )
UpperCamelCase__ = steps
UpperCamelCase__ = scale
UpperCamelCase__ = pipe(**_a , control_guidance_start=0.4 , control_guidance_end=[0.5, 0.8] )[0]
# make sure that all outputs are different
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
def A_ ( self : int ):
return self._test_attention_slicing_forward_pass(expected_max_diff=2E-3 )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def A_ ( self : int ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3 )
def A_ ( self : int ):
self._test_inference_batch_single_identical(expected_max_diff=2E-3 )
def A_ ( self : Dict ):
UpperCamelCase__ = self.get_dummy_components()
UpperCamelCase__ = self.pipeline_class(**_a )
pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
with tempfile.TemporaryDirectory() as tmpdir:
try:
# save_pretrained is not implemented for Multi-ControlNet
pipe.save_pretrained(_a )
except NotImplementedError:
pass
@slow
@require_torch_gpu
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
def A_ ( self : Optional[int] ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A_ ( self : Dict ):
UpperCamelCase__ = ControlNetModel.from_pretrained('''lllyasviel/sd-controlnet-canny''' )
UpperCamelCase__ = StableDiffusionControlNetImgaImgPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , safety_checker=_a , controlnet=_a )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=_a )
UpperCamelCase__ = torch.Generator(device='''cpu''' ).manual_seed(0 )
UpperCamelCase__ = '''evil space-punk bird'''
UpperCamelCase__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png''' ).resize((512, 512) )
UpperCamelCase__ = load_image(
'''https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png''' ).resize((512, 512) )
UpperCamelCase__ = pipe(
_a , _a , control_image=_a , generator=_a , output_type='''np''' , num_inference_steps=50 , strength=0.6 , )
UpperCamelCase__ = output.images[0]
assert image.shape == (512, 512, 3)
UpperCamelCase__ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/img2img.npy''' )
assert np.abs(expected_image - image ).max() < 9E-2
| 591 | 0 |
import argparse
import torch
from transformers import BertForMaskedLM
if __name__ == "__main__":
__A = argparse.ArgumentParser(
description=(
'Extraction some layers of the full BertForMaskedLM or RObertaForMaskedLM for Transfer Learned'
' Distillation'
)
)
parser.add_argument('--model_type', default='bert', choices=['bert'])
parser.add_argument('--model_name', default='bert-base-uncased', type=str)
parser.add_argument('--dump_checkpoint', default='serialization_dir/tf_bert-base-uncased_0247911.pth', type=str)
parser.add_argument('--vocab_transform', action='store_true')
__A = parser.parse_args()
if args.model_type == "bert":
__A = BertForMaskedLM.from_pretrained(args.model_name)
__A = 'bert'
else:
raise ValueError('args.model_type should be "bert".')
__A = model.state_dict()
__A = {}
for w in ["word_embeddings", "position_embeddings"]:
__A = state_dict[f'{prefix}.embeddings.{w}.weight']
for w in ["weight", "bias"]:
__A = state_dict[f'{prefix}.embeddings.LayerNorm.{w}']
__A = 0
for teacher_idx in [0, 2, 4, 7, 9, 11]:
for w in ["weight", "bias"]:
__A = state_dict[
f'{prefix}.encoder.layer.{teacher_idx}.attention.self.query.{w}'
]
__A = state_dict[
f'{prefix}.encoder.layer.{teacher_idx}.attention.self.key.{w}'
]
__A = state_dict[
f'{prefix}.encoder.layer.{teacher_idx}.attention.self.value.{w}'
]
__A = state_dict[
f'{prefix}.encoder.layer.{teacher_idx}.attention.output.dense.{w}'
]
__A = state_dict[
f'{prefix}.encoder.layer.{teacher_idx}.attention.output.LayerNorm.{w}'
]
__A = state_dict[
f'{prefix}.encoder.layer.{teacher_idx}.intermediate.dense.{w}'
]
__A = state_dict[
f'{prefix}.encoder.layer.{teacher_idx}.output.dense.{w}'
]
__A = state_dict[
f'{prefix}.encoder.layer.{teacher_idx}.output.LayerNorm.{w}'
]
std_idx += 1
__A = state_dict['cls.predictions.decoder.weight']
__A = state_dict['cls.predictions.bias']
if args.vocab_transform:
for w in ["weight", "bias"]:
__A = state_dict[f'cls.predictions.transform.dense.{w}']
__A = state_dict[f'cls.predictions.transform.LayerNorm.{w}']
print(f'N layers selected for distillation: {std_idx}')
print(f'Number of params transferred for distillation: {len(compressed_sd.keys())}')
print(f'Save transferred checkpoint to {args.dump_checkpoint}.')
torch.save(compressed_sd, args.dump_checkpoint)
| 484 |
def __A ( _lowercase ):
'''simple docstring'''
_A = []
_A = []
_A = {
'''^''': 3,
'''*''': 2,
'''/''': 2,
'''%''': 2,
'''+''': 1,
'''-''': 1,
} # Priority of each operator
_A = len(_lowercase ) if (len(_lowercase ) > 7) else 7
# Print table header for output
print(
'''Symbol'''.center(8 ) , '''Stack'''.center(_lowercase ) , '''Postfix'''.center(_lowercase ) , sep=''' | ''' , )
print('''-''' * (print_width * 3 + 7) )
for x in infix:
if x.isalpha() or x.isdigit():
post_fix.append(_lowercase ) # if x is Alphabet / Digit, add it to Postfix
elif x == "(":
stack.append(_lowercase ) # if x is "(" push to Stack
elif x == ")": # if x is ")" pop stack until "(" is encountered
while stack[-1] != "(":
post_fix.append(stack.pop() ) # Pop stack & add the content to Postfix
stack.pop()
else:
if len(_lowercase ) == 0:
stack.append(_lowercase ) # If stack is empty, push x to stack
else: # while priority of x is not > priority of element in the stack
while len(_lowercase ) > 0 and priority[x] <= priority[stack[-1]]:
post_fix.append(stack.pop() ) # pop stack & add to Postfix
stack.append(_lowercase ) # push x to stack
print(
x.center(8 ) , (''''''.join(_lowercase )).ljust(_lowercase ) , (''''''.join(_lowercase )).ljust(_lowercase ) , sep=''' | ''' , ) # Output in tabular format
while len(_lowercase ) > 0: # while stack is not empty
post_fix.append(stack.pop() ) # pop stack & add to Postfix
print(
''' '''.center(8 ) , (''''''.join(_lowercase )).ljust(_lowercase ) , (''''''.join(_lowercase )).ljust(_lowercase ) , sep=''' | ''' , ) # Output in tabular format
return "".join(_lowercase ) # return Postfix as str
def __A ( _lowercase ):
'''simple docstring'''
_A = list(infix[::-1] ) # reverse the infix equation
for i in range(len(_lowercase ) ):
if infix[i] == "(":
_A = ''')''' # change "(" to ")"
elif infix[i] == ")":
_A = '''(''' # change ")" to "("
return (infix_2_postfix(''''''.join(_lowercase ) ))[
::-1
] # call infix_2_postfix on Infix, return reverse of Postfix
if __name__ == "__main__":
__A = input('\nEnter an Infix Equation = ') # Input an Infix equation
__A = ''.join(Infix.split()) # Remove spaces from the input
print('\n\t', Infix, '(Infix) -> ', infix_2_prefix(Infix), '(Prefix)')
| 484 | 1 |
'''simple docstring'''
import os
from typing import List, Optional, Union
from ...tokenization_utils import PreTrainedTokenizer
from ...tokenization_utils_base import AddedToken
from ...utils import logging
__UpperCamelCase : Optional[int] = logging.get_logger(__name__)
__UpperCamelCase : Union[str, Any] = {'vocab_file': 'vocab.txt'}
__UpperCamelCase : int = {
'vocab_file': {
'facebook/esm2_t6_8M_UR50D': 'https://huggingface.co/facebook/esm2_t6_8M_UR50D/resolve/main/vocab.txt',
'facebook/esm2_t12_35M_UR50D': 'https://huggingface.co/facebook/esm2_t12_35M_UR50D/resolve/main/vocab.txt',
},
}
__UpperCamelCase : Optional[Any] = {
'facebook/esm2_t6_8M_UR50D': 1024,
'facebook/esm2_t12_35M_UR50D': 1024,
}
def lowercase ( lowerCAmelCase : Tuple):
"""simple docstring"""
with open(lowerCamelCase_ , '''r''') as f:
_A : str = f.read().splitlines()
return [l.strip() for l in lines]
class lowerCamelCase__ ( lowerCamelCase__ ):
"""simple docstring"""
__magic_name__ = VOCAB_FILES_NAMES
__magic_name__ = PRETRAINED_VOCAB_FILES_MAP
__magic_name__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ = ["""input_ids""", """attention_mask"""]
def __init__( self , UpperCAmelCase__ , UpperCAmelCase__="<unk>" , UpperCAmelCase__="<cls>" , UpperCAmelCase__="<pad>" , UpperCAmelCase__="<mask>" , UpperCAmelCase__="<eos>" , **UpperCAmelCase__ , ) -> Dict:
super().__init__(**__lowerCamelCase )
_A : Union[str, Any] = load_vocab_file(__lowerCamelCase )
_A : List[Any] = dict(enumerate(self.all_tokens ) )
_A : List[str] = {tok: ind for ind, tok in enumerate(self.all_tokens )}
_A : int = unk_token
_A : str = cls_token
_A : List[Any] = pad_token
_A : int = mask_token
_A : str = eos_token
_A : Optional[int] = self.all_tokens
self._create_trie(self.unique_no_split_tokens )
def _lowerCamelCase ( self , UpperCAmelCase__ ) -> str:
return self._id_to_token.get(__lowerCamelCase , self.unk_token )
def _lowerCamelCase ( self , UpperCAmelCase__ ) -> int:
return self._token_to_id.get(__lowerCamelCase , self._token_to_id.get(self.unk_token ) )
def _lowerCamelCase ( self , UpperCAmelCase__ , **UpperCAmelCase__ ) -> Union[str, Any]:
return text.split()
def _lowerCamelCase ( self , UpperCAmelCase__=False ) -> Optional[int]:
return len(self._id_to_token )
def _lowerCamelCase ( self ) -> str:
return {token: i for i, token in enumerate(self.all_tokens )}
def _lowerCamelCase ( self , UpperCAmelCase__ ) -> int:
return self._token_to_id.get(__lowerCamelCase , self._token_to_id.get(self.unk_token ) )
def _lowerCamelCase ( self , UpperCAmelCase__ ) -> str:
return self._id_to_token.get(__lowerCamelCase , self.unk_token )
def _lowerCamelCase ( self , UpperCAmelCase__ , UpperCAmelCase__ = None ) -> List[int]:
_A : Optional[int] = [self.cls_token_id]
_A : Tuple = [self.eos_token_id] # No sep token in ESM vocabulary
if token_ids_a is None:
if self.eos_token_id is None:
return cls + token_ids_a
else:
return cls + token_ids_a + sep
elif self.eos_token_id is None:
raise ValueError('''Cannot tokenize multiple sequences when EOS token is not set!''' )
return cls + token_ids_a + sep + token_ids_a + sep # Multiple inputs always have an EOS token
def _lowerCamelCase ( self , UpperCAmelCase__ , UpperCAmelCase__ = None , UpperCAmelCase__ = False ) -> List[int]:
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if token in self.all_special_ids else 0 for token in token_ids_a]
_A : Tuple = [1] + ([0] * len(__lowerCamelCase )) + [1]
if token_ids_a is not None:
mask += [0] * len(__lowerCamelCase ) + [1]
return mask
def _lowerCamelCase ( self , UpperCAmelCase__ , UpperCAmelCase__ ) -> Tuple:
_A : Union[str, Any] = os.path.join(__lowerCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + '''vocab.txt''' )
with open(__lowerCamelCase , '''w''' ) as f:
f.write('''\n'''.join(self.all_tokens ) )
return (vocab_file,)
@property
def _lowerCamelCase ( self ) -> int:
return self.get_vocab_size(with_added_tokens=__lowerCamelCase )
def _lowerCamelCase ( self , UpperCAmelCase__ , UpperCAmelCase__ = False ) -> int:
return super()._add_tokens(__lowerCamelCase , special_tokens=__lowerCamelCase )
| 702 |
'''simple docstring'''
import unittest
from transformers import GPTNeoXJapaneseConfig, is_torch_available
from transformers.models.gpt_neox_japanese.tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import GPTNeoXJapaneseForCausalLM, GPTNeoXJapaneseModel
class lowerCamelCase__ :
"""simple docstring"""
def __init__( self , UpperCAmelCase__ , UpperCAmelCase__=1_3 , UpperCAmelCase__=7 , UpperCAmelCase__=True , UpperCAmelCase__=True , UpperCAmelCase__=True , UpperCAmelCase__=True , UpperCAmelCase__=9_9 , UpperCAmelCase__=3_2 , UpperCAmelCase__=5 , UpperCAmelCase__=4 , UpperCAmelCase__=4 , UpperCAmelCase__="gelu" , UpperCAmelCase__=0.0 , UpperCAmelCase__=0.1 , UpperCAmelCase__=True , UpperCAmelCase__=5_1_2 , UpperCAmelCase__=1_6 , UpperCAmelCase__=2 , UpperCAmelCase__=0.0_2 , UpperCAmelCase__=3 , UpperCAmelCase__=4 , UpperCAmelCase__=None , ) -> int:
_A : Dict = parent
_A : Tuple = batch_size
_A : Optional[Any] = seq_length
_A : Dict = is_training
_A : Tuple = use_input_mask
_A : int = use_token_type_ids
_A : Tuple = use_labels
_A : Optional[int] = vocab_size
_A : List[Any] = hidden_size
_A : List[Any] = num_hidden_layers
_A : Union[str, Any] = num_attention_heads
_A : int = intermediate_multiple_size
_A : Optional[int] = hidden_act
_A : Any = hidden_dropout
_A : Union[str, Any] = attention_dropout
_A : Tuple = weight_tying
_A : Dict = max_position_embeddings
_A : Tuple = type_vocab_size
_A : Optional[int] = type_sequence_label_size
_A : str = initializer_range
_A : str = num_labels
_A : int = num_choices
_A : Optional[int] = scope
def _lowerCamelCase ( self ) -> Dict:
_A : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_A : Any = None
if self.use_input_mask:
_A : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
_A : List[Any] = None
if self.use_labels:
_A : Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_A : int = self.get_config()
return config, input_ids, input_mask, token_labels
def _lowerCamelCase ( self ) -> Optional[int]:
return GPTNeoXJapaneseConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_multiple_size=self.intermediate_multiple_size , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , weight_tying=self.weight_tying , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCAmelCase__ , initializer_range=self.initializer_range , )
def _lowerCamelCase ( self ) -> List[Any]:
_A , _A , _A , _A : Optional[Any] = self.prepare_config_and_inputs()
_A : List[Any] = True
return config, input_ids, input_mask, token_labels
def _lowerCamelCase ( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) -> int:
_A : Dict = GPTNeoXJapaneseModel(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
_A : Tuple = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ )
_A : Optional[Any] = model(UpperCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCamelCase ( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) -> Dict:
_A : int = True
_A : Tuple = GPTNeoXJapaneseModel(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
_A : Optional[int] = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCamelCase ( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) -> str:
_A : int = GPTNeoXJapaneseForCausalLM(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
_A : List[Any] = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowerCamelCase ( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) -> Optional[Any]:
_A : str = True
_A : List[str] = GPTNeoXJapaneseForCausalLM(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
# first forward pass
_A : Optional[int] = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , use_cache=UpperCAmelCase__ )
_A : str = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
_A : Optional[Any] = ids_tensor((self.batch_size, 3) , config.vocab_size )
_A : List[Any] = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
_A : Optional[Any] = torch.cat([input_ids, next_tokens] , dim=-1 )
_A : Dict = torch.cat([input_mask, next_mask] , dim=-1 )
_A : Dict = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , output_hidden_states=UpperCAmelCase__ )
_A : str = output_from_no_past['''hidden_states'''][0]
_A : Optional[int] = model(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , past_key_values=UpperCAmelCase__ , output_hidden_states=UpperCAmelCase__ , )['''hidden_states'''][0]
# select random slice
_A : int = ids_tensor((1,) , output_from_past.shape[-1] ).item()
_A : int = output_from_no_past[:, -3:, random_slice_idx].detach()
_A : int = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1e-3 ) )
def _lowerCamelCase ( self ) -> Dict:
_A : Any = self.prepare_config_and_inputs()
_A , _A , _A , _A : Tuple = config_and_inputs
_A : Optional[int] = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class lowerCamelCase__ ( snake_case_ , snake_case_ , unittest.TestCase ):
"""simple docstring"""
__magic_name__ = (GPTNeoXJapaneseModel, GPTNeoXJapaneseForCausalLM) if is_torch_available() else ()
__magic_name__ = (GPTNeoXJapaneseForCausalLM,) if is_torch_available() else ()
__magic_name__ = (
{"""feature-extraction""": GPTNeoXJapaneseModel, """text-generation""": GPTNeoXJapaneseForCausalLM}
if is_torch_available()
else {}
)
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
def _lowerCamelCase ( self ) -> List[str]:
_A : Dict = GPTNeoXJapaneseModelTester(self )
_A : Any = ConfigTester(self , config_class=UpperCAmelCase__ , hidden_size=3_7 )
def _lowerCamelCase ( self ) -> Union[str, Any]:
self.config_tester.run_common_tests()
def _lowerCamelCase ( self ) -> Tuple:
_A , _A , _A , _A : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
def _lowerCamelCase ( self ) -> Dict:
_A , _A , _A , _A : Dict = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
def _lowerCamelCase ( self ) -> Optional[int]:
# This regression test was failing with PyTorch < 1.3
_A , _A , _A , _A : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_decoder()
_A : Tuple = None
self.model_tester.create_and_check_model_as_decoder(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
def _lowerCamelCase ( self ) -> Optional[int]:
_A , _A , _A , _A : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
def _lowerCamelCase ( self ) -> Tuple:
_A : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*UpperCAmelCase__ )
@slow
def _lowerCamelCase ( self ) -> Union[str, Any]:
_A : Union[str, Any] = '''abeja/gpt-neox-japanese-2.7b'''
_A : Optional[Any] = ['''データサイエンティストとは、''', '''100年後に必要とされる会社は、''', '''フルリモートの環境で働くために必要なことは、''', '''国境の長いトンネルを抜けると''', '''美味しい日本食といえば、''']
_A : List[str] = [
'''データサイエンティストとは、データを分析し、ビジネスに役立つ知見を導き出す専門家のことです。''',
'''100年後に必要とされる会社は、「人」が中心の会社です。''',
'''フルリモートの環境で働くために必要なことは、「自分の時間をコントロールする」ことです。''',
'''国境の長いトンネルを抜けると、そこは雪国だった。''',
'''美味しい日本食といえば、やっぱりお寿司ですよね。''',
]
_A : int = GPTNeoXJapaneseTokenizer.from_pretrained(UpperCAmelCase__ )
_A : Union[str, Any] = GPTNeoXJapaneseForCausalLM.from_pretrained(UpperCAmelCase__ )
_A : Union[str, Any] = []
for prompt in prompts:
_A : int = tokenizer(UpperCAmelCase__ , return_tensors='''pt''' ).input_ids
_A : Optional[int] = model.generate(UpperCAmelCase__ , max_length=5_0 )
_A : Optional[Any] = tokenizer.batch_decode(UpperCAmelCase__ , skip_special_tokens=UpperCAmelCase__ )
predicted_outputs += generated_string
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
| 417 | 0 |
def lowerCamelCase__ ( _a):
SCREAMING_SNAKE_CASE : str = 1
SCREAMING_SNAKE_CASE : str = 2
while i * i <= n:
SCREAMING_SNAKE_CASE : Union[str, Any] = 0
while n % i == 0:
n //= i
multiplicity += 1
n_divisors *= multiplicity + 1
i += 1
if n > 1:
n_divisors *= 2
return n_divisors
def lowerCamelCase__ ( ):
SCREAMING_SNAKE_CASE : str = 1
SCREAMING_SNAKE_CASE : List[Any] = 1
while True:
i += 1
t_num += i
if count_divisors(_a) > 500:
break
return t_num
if __name__ == "__main__":
print(solution()) | 25 | """simple docstring"""
import argparse
import pickle
import numpy as np
import torch
from torch import nn
from transformers import ReformerConfig, ReformerModelWithLMHead
from transformers.utils import logging
logging.set_verbosity_info()
def _lowerCamelCase ( UpperCAmelCase__,UpperCAmelCase__,UpperCAmelCase__=None ) -> int:
'''simple docstring'''
assert torch_layer.weight.shape == weight.shape, f'''{torch_layer} layer.weight does not match'''
a__ = nn.Parameter(UpperCAmelCase__ )
if bias is not None:
assert torch_layer.bias.shape == bias.shape, f'''{torch_layer} layer.bias does not match'''
a__ = nn.Parameter(UpperCAmelCase__ )
def _lowerCamelCase ( UpperCAmelCase__,UpperCAmelCase__,UpperCAmelCase__ ) -> Dict:
'''simple docstring'''
a__ = np.asarray(weights[0] )
a__ = np.asarray(weights[1] )
a__ = np.asarray(weights[2] )
set_param(
torch_layer.self_attention.query_key,torch.tensor(UpperCAmelCase__ ).transpose(1,2 ).contiguous().view(-1,UpperCAmelCase__ ),)
set_param(
torch_layer.self_attention.value,torch.tensor(UpperCAmelCase__ ).transpose(1,2 ).contiguous().view(-1,UpperCAmelCase__ ),)
set_param(
torch_layer.output.dense,torch.tensor(UpperCAmelCase__ ).view(-1,UpperCAmelCase__ ).contiguous().transpose(0,1 ),)
def _lowerCamelCase ( UpperCAmelCase__,UpperCAmelCase__,UpperCAmelCase__ ) -> Dict:
'''simple docstring'''
a__ = np.asarray(weights[0] )
a__ = np.asarray(weights[1] )
a__ = np.asarray(weights[2] )
a__ = np.asarray(weights[3] )
set_param(
torch_layer.self_attention.query,torch.tensor(UpperCAmelCase__ ).transpose(1,2 ).contiguous().view(-1,UpperCAmelCase__ ),)
set_param(
torch_layer.self_attention.key,torch.tensor(UpperCAmelCase__ ).transpose(1,2 ).contiguous().view(-1,UpperCAmelCase__ ),)
set_param(
torch_layer.self_attention.value,torch.tensor(UpperCAmelCase__ ).transpose(1,2 ).contiguous().view(-1,UpperCAmelCase__ ),)
set_param(
torch_layer.output.dense,torch.tensor(UpperCAmelCase__ ).view(-1,UpperCAmelCase__ ).contiguous().transpose(0,1 ),)
def _lowerCamelCase ( UpperCAmelCase__,UpperCAmelCase__,UpperCAmelCase__ ) -> Optional[Any]:
'''simple docstring'''
a__ = weights[0][0][0]
a__ = np.asarray(layer_norm_a[0] )
a__ = np.asarray(layer_norm_a[1] )
set_param(
torch_block.attention.layer_norm,torch.tensor(UpperCAmelCase__ ),torch.tensor(UpperCAmelCase__ ),)
# lsh weights + output
a__ = weights[0][1]
if len(UpperCAmelCase__ ) < 4:
set_layer_weights_in_torch_lsh(UpperCAmelCase__,torch_block.attention,UpperCAmelCase__ )
else:
set_layer_weights_in_torch_local(UpperCAmelCase__,torch_block.attention,UpperCAmelCase__ )
# intermediate weighs
a__ = weights[2][0][1][2]
# Chunked Feed Forward
if len(UpperCAmelCase__ ) == 4:
a__ = intermediate_weights[2]
# layernorm 2
a__ = np.asarray(intermediate_weights[0][0] )
a__ = np.asarray(intermediate_weights[0][1] )
set_param(
torch_block.feed_forward.layer_norm,torch.tensor(UpperCAmelCase__ ),torch.tensor(UpperCAmelCase__ ),)
# intermediate dense
a__ = np.asarray(intermediate_weights[1][0] )
a__ = np.asarray(intermediate_weights[1][1] )
set_param(
torch_block.feed_forward.dense.dense,torch.tensor(UpperCAmelCase__ ).transpose(0,1 ).contiguous(),torch.tensor(UpperCAmelCase__ ),)
# intermediate out
a__ = np.asarray(intermediate_weights[4][0] )
a__ = np.asarray(intermediate_weights[4][1] )
set_param(
torch_block.feed_forward.output.dense,torch.tensor(UpperCAmelCase__ ).transpose(0,1 ).contiguous(),torch.tensor(UpperCAmelCase__ ),)
def _lowerCamelCase ( UpperCAmelCase__,UpperCAmelCase__,UpperCAmelCase__ ) -> List[Any]:
'''simple docstring'''
a__ = torch_model.reformer
# word embeds
a__ = np.asarray(weights[1] )
set_param(
torch_model_reformer.embeddings.word_embeddings,torch.tensor(UpperCAmelCase__ ),)
if isinstance(weights[3],UpperCAmelCase__ ):
a__ = torch_model_reformer.embeddings.position_embeddings
for emb_idx in range(len(position_embeddings.weights ) ):
a__ = np.asarray(weights[3][emb_idx][0] )
assert (
position_embeddings.weights[emb_idx].shape == emb_weights.shape
), f'''{position_embeddings[emb_idx]} emb does not match'''
a__ = nn.Parameter(torch.tensor(UpperCAmelCase__ ) )
a__ = weights[5]
assert len(torch_model_reformer.encoder.layers ) * 4 == len(
UpperCAmelCase__ ), "HF and trax model do not have the same number of layers"
for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ):
a__ = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)]
set_block_weights_in_torch(UpperCAmelCase__,UpperCAmelCase__,UpperCAmelCase__ )
# output layer norm
a__ = np.asarray(weights[7][0] )
a__ = np.asarray(weights[7][1] )
set_param(
torch_model_reformer.encoder.layer_norm,torch.tensor(UpperCAmelCase__ ),torch.tensor(UpperCAmelCase__ ),)
# output embeddings
a__ = np.asarray(weights[9][0] )
a__ = np.asarray(weights[9][1] )
set_param(
torch_model.lm_head.decoder,torch.tensor(UpperCAmelCase__ ).transpose(0,1 ).contiguous(),torch.tensor(UpperCAmelCase__ ),)
def _lowerCamelCase ( UpperCAmelCase__,UpperCAmelCase__,UpperCAmelCase__ ) -> Optional[int]:
'''simple docstring'''
a__ = ReformerConfig.from_json_file(UpperCAmelCase__ )
print(f'''Building PyTorch model from configuration: {config}''' )
a__ = ReformerModelWithLMHead(UpperCAmelCase__ )
with open(UpperCAmelCase__,'rb' ) as f:
a__ = pickle.load(UpperCAmelCase__ )['weights']
set_model_weights_in_torch(UpperCAmelCase__,UpperCAmelCase__,config.hidden_size )
# Save pytorch-model
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict(),UpperCAmelCase__ )
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--trax_model_pkl_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained Reformer model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
__magic_name__ = parser.parse_args()
convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
| 232 | 0 |
import argparse
from pathlib import Path
from transformers import AutoConfig, AutoTokenizer, RagConfig, RagSequenceForGeneration, RagTokenForGeneration
def _SCREAMING_SNAKE_CASE ( __lowercase : List[Any] , __lowercase : List[Any] , __lowercase : Dict , __lowercase : Optional[int] , __lowercase : Optional[Any] = None , __lowercase : Tuple = None , __lowercase : List[str] = None , ) -> str:
"""simple docstring"""
if config_name_or_path is None:
__A = "facebook/rag-token-base" if model_type == "rag_token" else "facebook/rag-sequence-base"
if generator_tokenizer_name_or_path is None:
__A = generator_name_or_path
if question_encoder_tokenizer_name_or_path is None:
__A = question_encoder_name_or_path
__A = RagTokenForGeneration if model_type == "rag_token" else RagSequenceForGeneration
# Save model.
__A = RagConfig.from_pretrained(_lowerCAmelCase )
__A = AutoConfig.from_pretrained(_lowerCAmelCase )
__A = AutoConfig.from_pretrained(_lowerCAmelCase )
__A = gen_config
__A = question_encoder_config
__A = model_class.from_pretrained_question_encoder_generator(
_lowerCAmelCase , _lowerCAmelCase , config=_lowerCAmelCase )
rag_model.save_pretrained(_lowerCAmelCase )
# Sanity check.
model_class.from_pretrained(_lowerCAmelCase )
# Save tokenizers.
__A = AutoTokenizer.from_pretrained(_lowerCAmelCase )
gen_tokenizer.save_pretrained(dest_dir / """generator_tokenizer/""" )
__A = AutoTokenizer.from_pretrained(_lowerCAmelCase )
question_encoder_tokenizer.save_pretrained(dest_dir / """question_encoder_tokenizer/""" )
if __name__ == "__main__":
__a : Tuple = argparse.ArgumentParser()
parser.add_argument(
"--model_type",
choices=["rag_sequence", "rag_token"],
required=True,
type=str,
help="RAG model type: rag_sequence, rag_token",
)
parser.add_argument("--dest", type=str, required=True, help="Path to the output checkpoint directory.")
parser.add_argument("--generator_name_or_path", type=str, required=True, help="Generator model identifier")
parser.add_argument(
"--question_encoder_name_or_path", type=str, required=True, help="Question encoder model identifier"
)
parser.add_argument(
"--generator_tokenizer_name_or_path",
type=str,
help="Generator tokenizer identifier, if not specified, resolves to ``generator_name_or_path``",
)
parser.add_argument(
"--question_encoder_tokenizer_name_or_path",
type=str,
help="Question encoder tokenizer identifier, if not specified, resolves to ``question_encoder_name_or_path``",
)
parser.add_argument(
"--config_name_or_path",
type=str,
help=(
"Identifier of the model config to use, if not provided, resolves to a base config for a given"
" ``model_type``"
),
)
__a : Dict = parser.parse_args()
__a : Dict = Path(args.dest)
dest_dir.mkdir(exist_ok=True)
consolidate(
args.model_type,
args.generator_name_or_path,
args.question_encoder_name_or_path,
dest_dir,
args.config_name_or_path,
args.generator_tokenizer_name_or_path,
args.question_encoder_tokenizer_name_or_path,
)
| 716 |
import argparse
import requests
import torch
from PIL import Image
from transformers import ViTMAEConfig, ViTMAEForPreTraining, ViTMAEImageProcessor
def _SCREAMING_SNAKE_CASE ( __lowercase : str ) -> str:
"""simple docstring"""
if "cls_token" in name:
__A = name.replace("""cls_token""" , """vit.embeddings.cls_token""" )
if "mask_token" in name:
__A = name.replace("""mask_token""" , """decoder.mask_token""" )
if "decoder_pos_embed" in name:
__A = name.replace("""decoder_pos_embed""" , """decoder.decoder_pos_embed""" )
if "pos_embed" in name and "decoder" not in name:
__A = name.replace("""pos_embed""" , """vit.embeddings.position_embeddings""" )
if "patch_embed.proj" in name:
__A = name.replace("""patch_embed.proj""" , """vit.embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
__A = name.replace("""patch_embed.norm""" , """vit.embeddings.norm""" )
if "decoder_blocks" in name:
__A = name.replace("""decoder_blocks""" , """decoder.decoder_layers""" )
if "blocks" in name:
__A = name.replace("""blocks""" , """vit.encoder.layer""" )
if "attn.proj" in name:
__A = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name:
__A = name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
__A = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
__A = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
__A = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
__A = name.replace("""mlp.fc2""" , """output.dense""" )
if "decoder_embed" in name:
__A = name.replace("""decoder_embed""" , """decoder.decoder_embed""" )
if "decoder_norm" in name:
__A = name.replace("""decoder_norm""" , """decoder.decoder_norm""" )
if "decoder_pred" in name:
__A = name.replace("""decoder_pred""" , """decoder.decoder_pred""" )
if "norm.weight" in name and "decoder" not in name:
__A = name.replace("""norm.weight""" , """vit.layernorm.weight""" )
if "norm.bias" in name and "decoder" not in name:
__A = name.replace("""norm.bias""" , """vit.layernorm.bias""" )
return name
def _SCREAMING_SNAKE_CASE ( __lowercase : Any , __lowercase : Dict ) -> str:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
__A = orig_state_dict.pop(__lowercase )
if "qkv" in key:
__A = key.split(""".""" )
__A = int(key_split[1] )
if "decoder_blocks" in key:
__A = config.decoder_hidden_size
__A = """decoder.decoder_layers."""
if "weight" in key:
__A = val[:dim, :]
__A = val[dim : dim * 2, :]
__A = val[-dim:, :]
elif "bias" in key:
__A = val[:dim]
__A = val[dim : dim * 2]
__A = val[-dim:]
else:
__A = config.hidden_size
__A = """vit.encoder.layer."""
if "weight" in key:
__A = val[:dim, :]
__A = val[dim : dim * 2, :]
__A = val[-dim:, :]
elif "bias" in key:
__A = val[:dim]
__A = val[dim : dim * 2]
__A = val[-dim:]
else:
__A = val
return orig_state_dict
def _SCREAMING_SNAKE_CASE ( __lowercase : Tuple , __lowercase : str ) -> Optional[Any]:
"""simple docstring"""
__A = ViTMAEConfig()
if "large" in checkpoint_url:
__A = 1_0_2_4
__A = 4_0_9_6
__A = 2_4
__A = 1_6
elif "huge" in checkpoint_url:
__A = 1_4
__A = 1_2_8_0
__A = 5_1_2_0
__A = 3_2
__A = 1_6
__A = ViTMAEForPreTraining(__lowercase )
__A = torch.hub.load_state_dict_from_url(__lowercase , map_location="""cpu""" )["""model"""]
__A = ViTMAEImageProcessor(size=config.image_size )
__A = convert_state_dict(__lowercase , __lowercase )
model.load_state_dict(__lowercase )
model.eval()
__A = """https://user-images.githubusercontent.com/11435359/147738734-196fd92f-9260-48d5-ba7e-bf103d29364d.jpg"""
__A = Image.open(requests.get(__lowercase , stream=__lowercase ).raw )
__A = ViTMAEImageProcessor(size=config.image_size )
__A = image_processor(images=__lowercase , return_tensors="""pt""" )
# forward pass
torch.manual_seed(2 )
__A = model(**__lowercase )
__A = outputs.logits
if "large" in checkpoint_url:
__A = torch.tensor(
[[-0.7_309, -0.7_128, -1.0_169], [-1.0_161, -0.9_058, -1.1_878], [-1.0_478, -0.9_411, -1.1_911]] )
elif "huge" in checkpoint_url:
__A = torch.tensor(
[[-1.1_599, -0.9_199, -1.2_221], [-1.1_952, -0.9_269, -1.2_307], [-1.2_143, -0.9_337, -1.2_262]] )
else:
__A = torch.tensor(
[[-0.9_192, -0.8_481, -1.1_259], [-1.1_349, -1.0_034, -1.2_599], [-1.1_757, -1.0_429, -1.2_726]] )
# verify logits
assert torch.allclose(logits[0, :3, :3] , __lowercase , atol=1E-4 )
print(f"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(__lowercase )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(__lowercase )
if __name__ == "__main__":
__a : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://dl.fbaipublicfiles.com/mae/visualize/mae_visualize_vit_base.pth",
type=str,
help="URL of the checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
__a : List[str] = parser.parse_args()
convert_vit_mae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 199 | 0 |
import json
import os
import tempfile
from transformers.testing_utils import check_json_file_has_correct_format
class _lowercase :
'''simple docstring'''
SCREAMING_SNAKE_CASE: Optional[Any] = None
def _a ( self ):
lowerCAmelCase_: Optional[int] = self.feature_extraction_class(**self.feat_extract_dict )
lowerCAmelCase_: Any = json.loads(feat_extract.to_json_string() )
for key, value in self.feat_extract_dict.items():
self.assertEqual(obj[key] , lowerCamelCase__ )
def _a ( self ):
lowerCAmelCase_: List[Any] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase_: List[str] = os.path.join(lowerCamelCase__ , "feat_extract.json" )
feat_extract_first.to_json_file(lowerCamelCase__ )
lowerCAmelCase_: Any = self.feature_extraction_class.from_json_file(lowerCamelCase__ )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def _a ( self ):
lowerCAmelCase_: Any = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase_: str = feat_extract_first.save_pretrained(lowerCamelCase__ )[0]
check_json_file_has_correct_format(lowerCamelCase__ )
lowerCAmelCase_: List[Any] = self.feature_extraction_class.from_pretrained(lowerCamelCase__ )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def _a ( self ):
lowerCAmelCase_: Dict = self.feature_extraction_class()
self.assertIsNotNone(lowerCamelCase__ ) | 613 | def snake_case__ ( lowercase ):
lowerCAmelCase_: Union[str, Any] = [1]
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_: int = 0, 0, 0
lowerCAmelCase_: Union[str, Any] = ugly_nums[ia] * 2
lowerCAmelCase_: str = ugly_nums[ia] * 3
lowerCAmelCase_: Dict = ugly_nums[ia] * 5
for _ in range(1 , lowercase ):
lowerCAmelCase_: Any = min(lowercase , lowercase , lowercase )
ugly_nums.append(lowercase )
if next_num == next_a:
ia += 1
lowerCAmelCase_: str = ugly_nums[ia] * 2
if next_num == next_a:
ia += 1
lowerCAmelCase_: Optional[int] = ugly_nums[ia] * 3
if next_num == next_a:
ia += 1
lowerCAmelCase_: int = ugly_nums[ia] * 5
return ugly_nums[-1]
if __name__ == "__main__":
from doctest import testmod
testmod(verbose=True)
print(f'''{ugly_numbers(2_0_0) = }''') | 613 | 1 |
"""simple docstring"""
__lowerCamelCase = [0, 2, 4, 6, 8]
__lowerCamelCase = [1, 3, 5, 7, 9]
def lowercase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> int:
if remaining_length == 0:
if digits[0] == 0 or digits[-1] == 0:
return 0
for i in range(length // 2 - 1 , -1 , -1 ):
remainder += digits[i] + digits[length - i - 1]
if remainder % 2 == 0:
return 0
remainder //= 10
return 1
if remaining_length == 1:
if remainder % 2 == 0:
return 0
__magic_name__ = 0
for digit in range(10 ):
__magic_name__ = digit
result += reversible_numbers(
0 , (remainder + 2 * digit) // 10 , __UpperCamelCase , __UpperCamelCase )
return result
__magic_name__ = 0
for digita in range(10 ):
__magic_name__ = digita
if (remainder + digita) % 2 == 0:
__magic_name__ = ODD_DIGITS
else:
__magic_name__ = EVEN_DIGITS
for digita in other_parity_digits:
__magic_name__ = digita
result += reversible_numbers(
remaining_length - 2 , (remainder + digita + digita) // 10 , __UpperCamelCase , __UpperCamelCase , )
return result
def lowercase ( __UpperCamelCase = 9 ) -> int:
__magic_name__ = 0
for length in range(1 , max_power + 1 ):
result += reversible_numbers(__UpperCamelCase , 0 , [0] * length , __UpperCamelCase )
return result
if __name__ == "__main__":
print(f"""{solution() = }""")
| 190 |
"""simple docstring"""
from __future__ import annotations
from sys import maxsize
from typing import Generic, TypeVar
__lowerCamelCase = TypeVar("T")
def lowercase ( __UpperCamelCase ) -> int:
return (position - 1) // 2
def lowercase ( __UpperCamelCase ) -> int:
return (2 * position) + 1
def lowercase ( __UpperCamelCase ) -> int:
return (2 * position) + 2
class _lowercase ( Generic[T] ):
def __init__( self ):
__magic_name__ = []
__magic_name__ = {}
__magic_name__ = 0
def __len__( self ):
return self.elements
def __repr__( self ):
return str(self.heap )
def lowerCAmelCase__ ( self ):
# Check if the priority queue is empty
return self.elements == 0
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ ):
# Add an element with given priority to the queue
self.heap.append((elem, weight) )
__magic_name__ = self.elements
self.elements += 1
self._bubble_up(UpperCamelCase_ )
def lowerCAmelCase__ ( self ):
# Remove and return the element with lowest weight (highest priority)
if self.elements > 1:
self._swap_nodes(0 , self.elements - 1 )
__magic_name__ , __magic_name__ = self.heap.pop()
del self.position_map[elem]
self.elements -= 1
if self.elements > 0:
__magic_name__ , __magic_name__ = self.heap[0]
self._bubble_down(UpperCamelCase_ )
return elem
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ ):
# Update the weight of the given key
__magic_name__ = self.position_map[elem]
__magic_name__ = (elem, weight)
if position > 0:
__magic_name__ = get_parent_position(UpperCamelCase_ )
__magic_name__ , __magic_name__ = self.heap[parent_position]
if parent_weight > weight:
self._bubble_up(UpperCamelCase_ )
else:
self._bubble_down(UpperCamelCase_ )
else:
self._bubble_down(UpperCamelCase_ )
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
# Place a node at the proper position (upward movement) [to be used internally
# only]
__magic_name__ = self.position_map[elem]
if curr_pos == 0:
return None
__magic_name__ = get_parent_position(UpperCamelCase_ )
__magic_name__ , __magic_name__ = self.heap[curr_pos]
__magic_name__ , __magic_name__ = self.heap[parent_position]
if parent_weight > weight:
self._swap_nodes(UpperCamelCase_ , UpperCamelCase_ )
return self._bubble_up(UpperCamelCase_ )
return None
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
# Place a node at the proper position (downward movement) [to be used
# internally only]
__magic_name__ = self.position_map[elem]
__magic_name__ , __magic_name__ = self.heap[curr_pos]
__magic_name__ = get_child_left_position(UpperCamelCase_ )
__magic_name__ = get_child_right_position(UpperCamelCase_ )
if child_left_position < self.elements and child_right_position < self.elements:
__magic_name__ , __magic_name__ = self.heap[child_left_position]
__magic_name__ , __magic_name__ = self.heap[child_right_position]
if child_right_weight < child_left_weight and child_right_weight < weight:
self._swap_nodes(UpperCamelCase_ , UpperCamelCase_ )
return self._bubble_down(UpperCamelCase_ )
if child_left_position < self.elements:
__magic_name__ , __magic_name__ = self.heap[child_left_position]
if child_left_weight < weight:
self._swap_nodes(UpperCamelCase_ , UpperCamelCase_ )
return self._bubble_down(UpperCamelCase_ )
else:
return None
if child_right_position < self.elements:
__magic_name__ , __magic_name__ = self.heap[child_right_position]
if child_right_weight < weight:
self._swap_nodes(UpperCamelCase_ , UpperCamelCase_ )
return self._bubble_down(UpperCamelCase_ )
return None
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ ):
# Swap the nodes at the given positions
__magic_name__ = self.heap[nodea_pos][0]
__magic_name__ = self.heap[nodea_pos][0]
__magic_name__ , __magic_name__ = (
self.heap[nodea_pos],
self.heap[nodea_pos],
)
__magic_name__ = nodea_pos
__magic_name__ = nodea_pos
class _lowercase ( Generic[T] ):
def __init__( self ):
__magic_name__ = {}
__magic_name__ = 0
def __repr__( self ):
return str(self.connections )
def __len__( self ):
return self.nodes
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
# Add a node in the graph if it is not in the graph
if node not in self.connections:
__magic_name__ = {}
self.nodes += 1
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
# Add an edge between 2 nodes in the graph
self.add_node(UpperCamelCase_ )
self.add_node(UpperCamelCase_ )
__magic_name__ = weight
__magic_name__ = weight
def lowercase ( __UpperCamelCase , ) -> tuple[dict[T, int], dict[T, T | None]]:
__magic_name__ = {node: maxsize for node in graph.connections}
__magic_name__ = {node: None for node in graph.connections}
__magic_name__ = MinPriorityQueue()
for node, weight in dist.items():
priority_queue.push(__UpperCamelCase , __UpperCamelCase )
if priority_queue.is_empty():
return dist, parent
# initialization
__magic_name__ = priority_queue.extract_min()
__magic_name__ = 0
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
__magic_name__ = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(__UpperCamelCase , dist[neighbour] )
__magic_name__ = node
# running prim's algorithm
while not priority_queue.is_empty():
__magic_name__ = priority_queue.extract_min()
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
__magic_name__ = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(__UpperCamelCase , dist[neighbour] )
__magic_name__ = node
return dist, parent
| 190 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCamelCase = {
"configuration_instructblip": [
"INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"InstructBlipConfig",
"InstructBlipQFormerConfig",
"InstructBlipVisionConfig",
],
"processing_instructblip": ["InstructBlipProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
"INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"InstructBlipQFormerModel",
"InstructBlipPreTrainedModel",
"InstructBlipForConditionalGeneration",
"InstructBlipVisionModel",
]
if TYPE_CHECKING:
from .configuration_instructblip import (
INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
InstructBlipConfig,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
)
from .processing_instructblip import InstructBlipProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_instructblip import (
INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
InstructBlipForConditionalGeneration,
InstructBlipPreTrainedModel,
InstructBlipQFormerModel,
InstructBlipVisionModel,
)
else:
import sys
__lowerCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 490 |
def __magic_name__ ( __a : list[int] ):
'''simple docstring'''
UpperCamelCase__ = len(__a )
for i in range(__a ):
for j in range(i + 1 , __a ):
if numbers[j] < numbers[i]:
UpperCamelCase__ , UpperCamelCase__ = numbers[j], numbers[i]
return numbers
if __name__ == "__main__":
lowerCamelCase_ = input('''Enter numbers separated by a comma:\n''').strip()
lowerCamelCase_ = [int(item) for item in user_input.split(''',''')]
print(exchange_sort(unsorted))
| 513 | 0 |
"""simple docstring"""
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
__snake_case : Union[str, Any] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated.
__snake_case : List[Any] = ' \"""\n Output class for the scheduler\'s step function output.\n\n Args:\n prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the\n denoising loop.\n pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n The predicted denoised sample (x_{0}) based on the model output from the current timestep.\n `pred_original_sample` can be used to preview progress or for guidance.\n \"""\n\n prev_sample: torch.FloatTensor\n pred_original_sample: Optional[torch.FloatTensor] = None\n'
class A__ ( unittest.TestCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self: Tuple) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase : Union[str, Any] = tempfile.mkdtemp()
os.makedirs(os.path.join(self.diffusers_dir , "schedulers/"))
__lowerCAmelCase : Optional[int] = self.diffusers_dir
shutil.copy(
os.path.join(_SCREAMING_SNAKE_CASE , "src/diffusers/schedulers/scheduling_ddpm.py") , os.path.join(self.diffusers_dir , "schedulers/scheduling_ddpm.py") , )
def _SCREAMING_SNAKE_CASE ( self: List[Any]) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase : Optional[int] = "src/diffusers"
shutil.rmtree(self.diffusers_dir)
def _SCREAMING_SNAKE_CASE ( self: List[Any] , _SCREAMING_SNAKE_CASE: Any , _SCREAMING_SNAKE_CASE: str , _SCREAMING_SNAKE_CASE: Union[str, Any] , _SCREAMING_SNAKE_CASE: str=None) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase : Dict = comment + F"""\nclass {class_name}(nn.Module):\n""" + class_code
if overwrite_result is not None:
__lowerCAmelCase : int = comment + F"""\nclass {class_name}(nn.Module):\n""" + overwrite_result
__lowerCAmelCase : str = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119)
__lowerCAmelCase : Tuple = black.format_str(_SCREAMING_SNAKE_CASE , mode=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : str = os.path.join(self.diffusers_dir , "new_code.py")
with open(_SCREAMING_SNAKE_CASE , "w" , newline="\n") as f:
f.write(_SCREAMING_SNAKE_CASE)
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(_SCREAMING_SNAKE_CASE)) == 0)
else:
check_copies.is_copy_consistent(f.name , overwrite=_SCREAMING_SNAKE_CASE)
with open(_SCREAMING_SNAKE_CASE , "r") as f:
self.assertTrue(f.read() , _SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: Any) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : List[str] = check_copies.find_code_in_diffusers("schedulers.scheduling_ddpm.DDPMSchedulerOutput")
self.assertEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: Tuple) -> Tuple:
"""simple docstring"""
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput" , "DDPMSchedulerOutput" , REFERENCE_CODE + "\n" , )
# With no empty line at the end
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput" , "DDPMSchedulerOutput" , _SCREAMING_SNAKE_CASE , )
# Copy consistency with rename
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test" , "TestSchedulerOutput" , re.sub("DDPM" , "Test" , _SCREAMING_SNAKE_CASE) , )
# Copy consistency with a really long name
__lowerCAmelCase : int = "TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason"
self.check_copy_consistency(
F"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}""" , F"""{long_class_name}SchedulerOutput""" , re.sub("Bert" , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE) , )
# Copy consistency with overwrite
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test" , "TestSchedulerOutput" , _SCREAMING_SNAKE_CASE , overwrite_result=re.sub("DDPM" , "Test" , _SCREAMING_SNAKE_CASE) , ) | 721 |
"""simple docstring"""
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
__snake_case : str = pytest.mark.integration
@pytest.mark.parametrize("path" ,["paws", "csv"] )
def _lowercase ( __snake_case ,__snake_case ) -> Any:
inspect_dataset(__snake_case ,__snake_case )
__lowerCAmelCase : str = path + ".py"
assert script_name in os.listdir(__snake_case )
assert "__pycache__" not in os.listdir(__snake_case )
@pytest.mark.filterwarnings("ignore:inspect_metric is deprecated:FutureWarning" )
@pytest.mark.filterwarnings("ignore:metric_module_factory is deprecated:FutureWarning" )
@pytest.mark.parametrize("path" ,["accuracy"] )
def _lowercase ( __snake_case ,__snake_case ) -> int:
inspect_metric(__snake_case ,__snake_case )
__lowerCAmelCase : str = path + ".py"
assert script_name in os.listdir(__snake_case )
assert "__pycache__" not in os.listdir(__snake_case )
@pytest.mark.parametrize(
"path, config_name, expected_splits" ,[
("squad", "plain_text", ["train", "validation"]),
("dalle-mini/wit", "dalle-mini--wit", ["train"]),
("paws", "labeled_final", ["train", "test", "validation"]),
] ,)
def _lowercase ( __snake_case ,__snake_case ,__snake_case ) -> Optional[int]:
__lowerCAmelCase : str = get_dataset_config_info(__snake_case ,config_name=__snake_case )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
"path, config_name, expected_exception" ,[
("paws", None, ValueError),
] ,)
def _lowercase ( __snake_case ,__snake_case ,__snake_case ) -> List[str]:
with pytest.raises(__snake_case ):
get_dataset_config_info(__snake_case ,config_name=__snake_case )
@pytest.mark.parametrize(
"path, expected" ,[
("squad", "plain_text"),
("acronym_identification", "default"),
("lhoestq/squad", "plain_text"),
("lhoestq/test", "default"),
("lhoestq/demo1", "lhoestq--demo1"),
("dalle-mini/wit", "dalle-mini--wit"),
] ,)
def _lowercase ( __snake_case ,__snake_case ) -> int:
__lowerCAmelCase : Dict = get_dataset_config_names(__snake_case )
assert expected in config_names
@pytest.mark.parametrize(
"path, expected_configs, expected_splits_in_first_config" ,[
("squad", ["plain_text"], ["train", "validation"]),
("dalle-mini/wit", ["dalle-mini--wit"], ["train"]),
("paws", ["labeled_final", "labeled_swap", "unlabeled_final"], ["train", "test", "validation"]),
] ,)
def _lowercase ( __snake_case ,__snake_case ,__snake_case ) -> Optional[Any]:
__lowerCAmelCase : List[Any] = get_dataset_infos(__snake_case )
assert list(infos.keys() ) == expected_configs
__lowerCAmelCase : List[Any] = expected_configs[0]
assert expected_config in infos
__lowerCAmelCase : Tuple = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
"path, expected_config, expected_splits" ,[
("squad", "plain_text", ["train", "validation"]),
("dalle-mini/wit", "dalle-mini--wit", ["train"]),
("paws", "labeled_final", ["train", "test", "validation"]),
] ,)
def _lowercase ( __snake_case ,__snake_case ,__snake_case ) -> Union[str, Any]:
__lowerCAmelCase : Tuple = get_dataset_infos(__snake_case )
assert expected_config in infos
__lowerCAmelCase : int = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
"path, config_name, expected_exception" ,[
("paws", None, ValueError),
] ,)
def _lowercase ( __snake_case ,__snake_case ,__snake_case ) -> Any:
with pytest.raises(__snake_case ):
get_dataset_split_names(__snake_case ,config_name=__snake_case ) | 615 | 0 |
"""simple docstring"""
from collections import Counter
from pathlib import Path
from typing import Optional, Tuple
import yaml
class UpperCAmelCase ( yaml.SafeLoader ):
def __UpperCAmelCase ( self : Tuple , __lowerCamelCase : List[str] ):
"""simple docstring"""
_snake_case = [self.constructed_objects[key_node] for key_node, _ in node.value]
_snake_case = [tuple(__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else key for key in keys]
_snake_case = Counter(__lowerCamelCase )
_snake_case = [key for key in counter if counter[key] > 1]
if duplicate_keys:
raise TypeError(f"""Got duplicate yaml keys: {duplicate_keys}""" )
def __UpperCAmelCase ( self : str , __lowerCamelCase : Any , __lowerCamelCase : Optional[Any]=False ):
"""simple docstring"""
_snake_case = super().construct_mapping(__lowerCamelCase , deep=__lowerCamelCase )
self._check_no_duplicates_on_constructed_node(__lowerCamelCase )
return mapping
def snake_case ( lowerCAmelCase_ ) -> Tuple[Optional[str], str]:
_snake_case = list(readme_content.splitlines() )
if full_content and full_content[0] == "---" and "---" in full_content[1:]:
_snake_case = full_content[1:].index('''---''' ) + 1
_snake_case = '''\n'''.join(full_content[1:sep_idx] )
return yamlblock, "\n".join(full_content[sep_idx + 1 :] )
return None, "\n".join(lowerCAmelCase_ )
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
# class attributes
A__ : int = {'''train_eval_index'''} # train-eval-index in the YAML metadata
@classmethod
def __UpperCAmelCase ( cls : str , __lowerCamelCase : Path ):
"""simple docstring"""
with open(__lowerCamelCase , encoding='''utf-8''' ) as readme_file:
_snake_case , _snake_case = _split_yaml_from_readme(readme_file.read() )
if yaml_string is not None:
return cls.from_yaml_string(__lowerCamelCase )
else:
return cls()
def __UpperCAmelCase ( self : List[Any] , __lowerCamelCase : Path ):
"""simple docstring"""
if path.exists():
with open(__lowerCamelCase , encoding='''utf-8''' ) as readme_file:
_snake_case = readme_file.read()
else:
_snake_case = None
_snake_case = self._to_readme(__lowerCamelCase )
with open(__lowerCamelCase , '''w''' , encoding='''utf-8''' ) as readme_file:
readme_file.write(__lowerCamelCase )
def __UpperCAmelCase ( self : Union[str, Any] , __lowerCamelCase : Optional[str] = None ):
"""simple docstring"""
if readme_content is not None:
_snake_case , _snake_case = _split_yaml_from_readme(__lowerCamelCase )
_snake_case = '''---\n''' + self.to_yaml_string() + '''---\n''' + content
else:
_snake_case = '''---\n''' + self.to_yaml_string() + '''---\n'''
return full_content
@classmethod
def __UpperCAmelCase ( cls : Tuple , __lowerCamelCase : str ):
"""simple docstring"""
_snake_case = yaml.load(__lowerCamelCase , Loader=_NoDuplicateSafeLoader ) or {}
# Convert the YAML keys to DatasetMetadata fields
_snake_case = {
(key.replace('''-''' , '''_''' ) if key.replace('''-''' , '''_''' ) in cls._FIELDS_WITH_DASHES else key): value
for key, value in metadata_dict.items()
}
return cls(**__lowerCamelCase )
def __UpperCAmelCase ( self : Dict ):
"""simple docstring"""
return yaml.safe_dump(
{
(key.replace('''_''' , '''-''' ) if key in self._FIELDS_WITH_DASHES else key): value
for key, value in self.items()
} , sort_keys=__lowerCamelCase , allow_unicode=__lowerCamelCase , encoding='''utf-8''' , ).decode('''utf-8''' )
snake_case = {
'''image-classification''': [],
'''translation''': [],
'''image-segmentation''': [],
'''fill-mask''': [],
'''automatic-speech-recognition''': [],
'''token-classification''': [],
'''sentence-similarity''': [],
'''audio-classification''': [],
'''question-answering''': [],
'''summarization''': [],
'''zero-shot-classification''': [],
'''table-to-text''': [],
'''feature-extraction''': [],
'''other''': [],
'''multiple-choice''': [],
'''text-classification''': [],
'''text-to-image''': [],
'''text2text-generation''': [],
'''zero-shot-image-classification''': [],
'''tabular-classification''': [],
'''tabular-regression''': [],
'''image-to-image''': [],
'''tabular-to-text''': [],
'''unconditional-image-generation''': [],
'''text-retrieval''': [],
'''text-to-speech''': [],
'''object-detection''': [],
'''audio-to-audio''': [],
'''text-generation''': [],
'''conversational''': [],
'''table-question-answering''': [],
'''visual-question-answering''': [],
'''image-to-text''': [],
'''reinforcement-learning''': [],
'''voice-activity-detection''': [],
'''time-series-forecasting''': [],
'''document-question-answering''': [],
}
if __name__ == "__main__":
from argparse import ArgumentParser
snake_case = ArgumentParser(usage='''Validate the yaml metadata block of a README.md file.''')
ap.add_argument('''readme_filepath''')
snake_case = ap.parse_args()
snake_case = Path(args.readme_filepath)
snake_case = DatasetMetadata.from_readme(readme_filepath)
print(dataset_metadata)
dataset_metadata.to_readme(readme_filepath)
| 103 |
import math
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ) -> str:
'''simple docstring'''
UpperCAmelCase = 0
UpperCAmelCase = 0
while num > 0:
UpperCAmelCase = num % 8
UpperCAmelCase = octal + (remainder * math.floor(math.pow(10 , UpperCamelCase__ ) ))
counter += 1
UpperCAmelCase = math.floor(num / 8 ) # basically /= 8 without remainder if any
# This formatting removes trailing '.0' from `octal`.
return F"""0o{int(UpperCamelCase__ )}"""
def __SCREAMING_SNAKE_CASE ( ) -> None:
'''simple docstring'''
print('''\n2 in octal is:''' )
print(decimal_to_octal(2 ) ) # = 2
print('''\n8 in octal is:''' )
print(decimal_to_octal(8 ) ) # = 10
print('''\n65 in octal is:''' )
print(decimal_to_octal(65 ) ) # = 101
print('''\n216 in octal is:''' )
print(decimal_to_octal(216 ) ) # = 330
print('''\n512 in octal is:''' )
print(decimal_to_octal(512 ) ) # = 1000
print('''\n''' )
if __name__ == "__main__":
main()
| 130 | 0 |
from __future__ import annotations
import math
from collections.abc import Callable
def UpperCamelCase__( UpperCamelCase__ : Callable[[int | float], int | float] , UpperCamelCase__ : int | float , UpperCamelCase__ : int | float , UpperCamelCase__ : int = 1_00 , )->float:
A__ = x_start
A__ = fnc(UpperCamelCase__ )
A__ = 0.0
for _ in range(UpperCamelCase__ ):
# Approximates curve as a sequence of linear lines and sums their length
A__ = (x_end - x_start) / steps + xa
A__ = fnc(UpperCamelCase__ )
length += math.hypot(xa - xa , fxa - fxa )
# Increment step
A__ = xa
A__ = fxa
return length
if __name__ == "__main__":
def UpperCamelCase__( UpperCamelCase__ : Dict )->List[Any]:
return math.sin(10 * x )
print('f(x) = sin(10 * x)')
print('The length of the curve from x = -10 to x = 10 is:')
a__: List[str] = 10
while i <= 100_000:
print(F"With {i} steps: {line_length(f, -10, 10, i)}")
i *= 10
| 704 |
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTForImageClassification, ViTForMaskedImageModeling, ViTModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class SCREAMING_SNAKE_CASE__ :
def __init__( self,__lowerCamelCase,__lowerCamelCase=13,__lowerCamelCase=30,__lowerCamelCase=2,__lowerCamelCase=3,__lowerCamelCase=True,__lowerCamelCase=True,__lowerCamelCase=32,__lowerCamelCase=5,__lowerCamelCase=4,__lowerCamelCase=37,__lowerCamelCase="gelu",__lowerCamelCase=0.1,__lowerCamelCase=0.1,__lowerCamelCase=10,__lowerCamelCase=0.02,__lowerCamelCase=None,__lowerCamelCase=2,):
A__ = parent
A__ = batch_size
A__ = image_size
A__ = patch_size
A__ = num_channels
A__ = is_training
A__ = use_labels
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_size
A__ = hidden_act
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = type_sequence_label_size
A__ = initializer_range
A__ = scope
A__ = encoder_stride
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
A__ = (image_size // patch_size) ** 2
A__ = num_patches + 1
def UpperCamelCase ( self ):
A__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A__ = None
if self.use_labels:
A__ = ids_tensor([self.batch_size],self.type_sequence_label_size )
A__ = self.get_config()
return config, pixel_values, labels
def UpperCamelCase ( self ):
return ViTConfig(
image_size=self.image_size,patch_size=self.patch_size,num_channels=self.num_channels,hidden_size=self.hidden_size,num_hidden_layers=self.num_hidden_layers,num_attention_heads=self.num_attention_heads,intermediate_size=self.intermediate_size,hidden_act=self.hidden_act,hidden_dropout_prob=self.hidden_dropout_prob,attention_probs_dropout_prob=self.attention_probs_dropout_prob,is_decoder=__lowerCamelCase,initializer_range=self.initializer_range,encoder_stride=self.encoder_stride,)
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase ):
A__ = ViTModel(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
A__ = model(__lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape,(self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase ):
A__ = ViTForMaskedImageModeling(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
A__ = model(__lowerCamelCase )
self.parent.assertEqual(
result.reconstruction.shape,(self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
A__ = 1
A__ = ViTForMaskedImageModeling(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
A__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
A__ = model(__lowerCamelCase )
self.parent.assertEqual(result.reconstruction.shape,(self.batch_size, 1, self.image_size, self.image_size) )
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase ):
A__ = self.type_sequence_label_size
A__ = ViTForImageClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
A__ = model(__lowerCamelCase,labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.type_sequence_label_size) )
# test greyscale images
A__ = 1
A__ = ViTForImageClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
A__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
A__ = model(__lowerCamelCase )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.type_sequence_label_size) )
def UpperCamelCase ( self ):
A__ = self.prepare_config_and_inputs()
(
(
A__
) , (
A__
) , (
A__
) ,
) = config_and_inputs
A__ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
__SCREAMING_SNAKE_CASE = (
(
ViTModel,
ViTForImageClassification,
ViTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
__SCREAMING_SNAKE_CASE = (
{'''feature-extraction''': ViTModel, '''image-classification''': ViTForImageClassification}
if is_torch_available()
else {}
)
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
def UpperCamelCase ( self ):
A__ = ViTModelTester(self )
A__ = ConfigTester(self,config_class=__lowerCamelCase,has_text_modality=__lowerCamelCase,hidden_size=37 )
def UpperCamelCase ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViT does not use inputs_embeds''' )
def UpperCamelCase ( self ):
pass
def UpperCamelCase ( self ):
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = model_class(__lowerCamelCase )
self.assertIsInstance(model.get_input_embeddings(),(nn.Module) )
A__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__lowerCamelCase,nn.Linear ) )
def UpperCamelCase ( self ):
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = model_class(__lowerCamelCase )
A__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A__ = [*signature.parameters.keys()]
A__ = ['''pixel_values''']
self.assertListEqual(arg_names[:1],__lowerCamelCase )
def UpperCamelCase ( self ):
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def UpperCamelCase ( self ):
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__lowerCamelCase )
def UpperCamelCase ( self ):
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowerCamelCase )
@slow
def UpperCamelCase ( self ):
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ = ViTModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
def UpperCamelCase__( )->int:
A__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@cached_property
def UpperCamelCase ( self ):
return ViTImageProcessor.from_pretrained('''google/vit-base-patch16-224''' ) if is_vision_available() else None
@slow
def UpperCamelCase ( self ):
A__ = ViTForImageClassification.from_pretrained('''google/vit-base-patch16-224''' ).to(__lowerCamelCase )
A__ = self.default_image_processor
A__ = prepare_img()
A__ = image_processor(images=__lowerCamelCase,return_tensors='''pt''' ).to(__lowerCamelCase )
# forward pass
with torch.no_grad():
A__ = model(**__lowerCamelCase )
# verify the logits
A__ = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape,__lowerCamelCase )
A__ = torch.tensor([-0.2744, 0.8215, -0.0836] ).to(__lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3],__lowerCamelCase,atol=1E-4 ) )
@slow
def UpperCamelCase ( self ):
# ViT models have an `interpolate_pos_encoding` argument in their forward method,
# allowing to interpolate the pre-trained position embeddings in order to use
# the model on higher resolutions. The DINO model by Facebook AI leverages this
# to visualize self-attention on higher resolution images.
A__ = ViTModel.from_pretrained('''facebook/dino-vits8''' ).to(__lowerCamelCase )
A__ = ViTImageProcessor.from_pretrained('''facebook/dino-vits8''',size=480 )
A__ = prepare_img()
A__ = image_processor(images=__lowerCamelCase,return_tensors='''pt''' )
A__ = inputs.pixel_values.to(__lowerCamelCase )
# forward pass
with torch.no_grad():
A__ = model(__lowerCamelCase,interpolate_pos_encoding=__lowerCamelCase )
# verify the logits
A__ = torch.Size((1, 3601, 384) )
self.assertEqual(outputs.last_hidden_state.shape,__lowerCamelCase )
A__ = torch.tensor(
[[4.2340, 4.3906, -6.6692], [4.5463, 1.8928, -6.7257], [4.4429, 0.8496, -5.8585]] ).to(__lowerCamelCase )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3],__lowerCamelCase,atol=1E-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def UpperCamelCase ( self ):
A__ = ViTModel.from_pretrained('''facebook/dino-vits8''',torch_dtype=torch.floataa,device_map='''auto''' )
A__ = self.default_image_processor
A__ = prepare_img()
A__ = image_processor(images=__lowerCamelCase,return_tensors='''pt''' )
A__ = inputs.pixel_values.to(__lowerCamelCase )
# forward pass to make sure inference works in fp16
with torch.no_grad():
A__ = model(__lowerCamelCase )
| 212 | 0 |
import os
def SCREAMING_SNAKE_CASE( ) -> List[Any]:
with open(os.path.dirname(__UpperCamelCase ) + "/grid.txt" ) as f:
a__ : Any = [] # noqa: E741
for _ in range(20 ):
l.append([int(__UpperCamelCase ) for x in f.readline().split()] )
a__ : Optional[int] = 0
# right
for i in range(20 ):
for j in range(17 ):
a__ : Optional[int] = l[i][j] * l[i][j + 1] * l[i][j + 2] * l[i][j + 3]
if temp > maximum:
a__ : Union[str, Any] = temp
# down
for i in range(17 ):
for j in range(20 ):
a__ : Union[str, Any] = l[i][j] * l[i + 1][j] * l[i + 2][j] * l[i + 3][j]
if temp > maximum:
a__ : List[str] = temp
# diagonal 1
for i in range(17 ):
for j in range(17 ):
a__ : Optional[int] = l[i][j] * l[i + 1][j + 1] * l[i + 2][j + 2] * l[i + 3][j + 3]
if temp > maximum:
a__ : Tuple = temp
# diagonal 2
for i in range(17 ):
for j in range(3 , 20 ):
a__ : Optional[Any] = l[i][j] * l[i + 1][j - 1] * l[i + 2][j - 2] * l[i + 3][j - 3]
if temp > maximum:
a__ : Any = temp
return maximum
if __name__ == "__main__":
print(solution())
| 191 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase = {
"""configuration_instructblip""": [
"""INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""InstructBlipConfig""",
"""InstructBlipQFormerConfig""",
"""InstructBlipVisionConfig""",
],
"""processing_instructblip""": ["""InstructBlipProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase = [
"""INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""InstructBlipQFormerModel""",
"""InstructBlipPreTrainedModel""",
"""InstructBlipForConditionalGeneration""",
"""InstructBlipVisionModel""",
]
if TYPE_CHECKING:
from .configuration_instructblip import (
INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
InstructBlipConfig,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
)
from .processing_instructblip import InstructBlipProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_instructblip import (
INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
InstructBlipForConditionalGeneration,
InstructBlipPreTrainedModel,
InstructBlipQFormerModel,
InstructBlipVisionModel,
)
else:
import sys
lowerCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 191 | 1 |
class __magic_name__ :
"""simple docstring"""
def __init__( self : Tuple ):
"""simple docstring"""
_UpperCamelCase: int = ''''''
_UpperCamelCase: List[str] = ''''''
_UpperCamelCase: Optional[Any] = []
def lowerCAmelCase ( self : Tuple , _lowercase : int , _lowercase : int ):
"""simple docstring"""
if m == -1:
return n + 1
elif n == -1:
return m + 1
elif self.dp[m][n] > -1:
return self.dp[m][n]
else:
if self.worda[m] == self.worda[n]:
_UpperCamelCase: int = self.__min_dist_top_down_dp(m - 1 , n - 1 )
else:
_UpperCamelCase: Optional[Any] = self.__min_dist_top_down_dp(lowercase_ , n - 1 )
_UpperCamelCase: Union[str, Any] = self.__min_dist_top_down_dp(m - 1 , lowercase_ )
_UpperCamelCase: Union[str, Any] = self.__min_dist_top_down_dp(m - 1 , n - 1 )
_UpperCamelCase: List[str] = 1 + min(lowercase_ , lowercase_ , lowercase_ )
return self.dp[m][n]
def lowerCAmelCase ( self : List[Any] , _lowercase : str , _lowercase : str ):
"""simple docstring"""
_UpperCamelCase: Optional[Any] = worda
_UpperCamelCase: List[str] = worda
_UpperCamelCase: Optional[int] = [[-1 for _ in range(len(lowercase_ ) )] for _ in range(len(lowercase_ ) )]
return self.__min_dist_top_down_dp(len(lowercase_ ) - 1 , len(lowercase_ ) - 1 )
def lowerCAmelCase ( self : Optional[Any] , _lowercase : str , _lowercase : str ):
"""simple docstring"""
_UpperCamelCase: Union[str, Any] = worda
_UpperCamelCase: Tuple = worda
_UpperCamelCase: Optional[Any] = len(lowercase_ )
_UpperCamelCase: Optional[Any] = len(lowercase_ )
_UpperCamelCase: Optional[Any] = [[0 for _ in range(n + 1 )] for _ in range(m + 1 )]
for i in range(m + 1 ):
for j in range(n + 1 ):
if i == 0: # first string is empty
_UpperCamelCase: Tuple = j
elif j == 0: # second string is empty
_UpperCamelCase: Optional[int] = i
elif worda[i - 1] == worda[j - 1]: # last characters are equal
_UpperCamelCase: List[Any] = self.dp[i - 1][j - 1]
else:
_UpperCamelCase: Dict = self.dp[i][j - 1]
_UpperCamelCase: Any = self.dp[i - 1][j]
_UpperCamelCase: int = self.dp[i - 1][j - 1]
_UpperCamelCase: Optional[int] = 1 + min(lowercase_ , lowercase_ , lowercase_ )
return self.dp[m][n]
if __name__ == "__main__":
UpperCAmelCase_ = EditDistance()
print('''****************** Testing Edit Distance DP Algorithm ******************''')
print()
UpperCAmelCase_ = input('''Enter the first string: ''').strip()
UpperCAmelCase_ = input('''Enter the second string: ''').strip()
print()
print(f"""The minimum edit distance is: {solver.min_dist_top_down(Sa, Sa)}""")
print(f"""The minimum edit distance is: {solver.min_dist_bottom_up(Sa, Sa)}""")
print()
print('''*************** End of Testing Edit Distance DP Algorithm ***************''') | 716 | import numpy as np
from numpy import ndarray
from scipy.optimize import Bounds, LinearConstraint, minimize
def lowerCAmelCase_ ( lowercase: ndarray ) -> float:
'''simple docstring'''
return np.dot(lowercase , lowercase )
class __magic_name__ :
"""simple docstring"""
def __init__( self : int , *,
_lowercase : float = np.inf , _lowercase : str = "linear" , _lowercase : float = 0.0 , ):
"""simple docstring"""
_UpperCamelCase: int = regularization
_UpperCamelCase: Optional[int] = gamma
if kernel == "linear":
_UpperCamelCase: Optional[Any] = self.__linear
elif kernel == "rbf":
if self.gamma == 0:
raise ValueError('''rbf kernel requires gamma''' )
if not isinstance(self.gamma , (float, int) ):
raise ValueError('''gamma must be float or int''' )
if not self.gamma > 0:
raise ValueError('''gamma must be > 0''' )
_UpperCamelCase: Optional[Any] = self.__rbf
# in the future, there could be a default value like in sklearn
# sklear: def_gamma = 1/(n_features * X.var()) (wiki)
# previously it was 1/(n_features)
else:
_UpperCamelCase: Optional[int] = f"""Unknown kernel: {kernel}"""
raise ValueError(_lowercase )
def lowerCAmelCase ( self : Union[str, Any] , _lowercase : ndarray , _lowercase : ndarray ):
"""simple docstring"""
return np.dot(_lowercase , _lowercase )
def lowerCAmelCase ( self : str , _lowercase : ndarray , _lowercase : ndarray ):
"""simple docstring"""
return np.exp(-(self.gamma * norm_squared(vectora - vectora )) )
def lowerCAmelCase ( self : str , _lowercase : list[ndarray] , _lowercase : ndarray ):
"""simple docstring"""
_UpperCamelCase: List[str] = observations
_UpperCamelCase: Optional[int] = classes
# using Wolfe's Dual to calculate w.
# Primal problem: minimize 1/2*norm_squared(w)
# constraint: yn(w . xn + b) >= 1
#
# With l a vector
# Dual problem: maximize sum_n(ln) -
# 1/2 * sum_n(sum_m(ln*lm*yn*ym*xn . xm))
# constraint: self.C >= ln >= 0
# and sum_n(ln*yn) = 0
# Then we get w using w = sum_n(ln*yn*xn)
# At the end we can get b ~= mean(yn - w . xn)
#
# Since we use kernels, we only need l_star to calculate b
# and to classify observations
((_UpperCamelCase) , ): Any = np.shape(_lowercase )
def to_minimize(_lowercase : ndarray ) -> float:
_UpperCamelCase: Optional[int] = 0
((_UpperCamelCase) , ): str = np.shape(_lowercase )
for i in range(_lowercase ):
for j in range(_lowercase ):
s += (
candidate[i]
* candidate[j]
* classes[i]
* classes[j]
* self.kernel(observations[i] , observations[j] )
)
return 1 / 2 * s - sum(_lowercase )
_UpperCamelCase: Optional[Any] = LinearConstraint(_lowercase , 0 , 0 )
_UpperCamelCase: Optional[int] = Bounds(0 , self.regularization )
_UpperCamelCase: Dict = minimize(
_lowercase , np.ones(_lowercase ) , bounds=_lowercase , constraints=[ly_contraint] ).x
_UpperCamelCase: Union[str, Any] = l_star
# calculating mean offset of separation plane to points
_UpperCamelCase: List[str] = 0
for i in range(_lowercase ):
for j in range(_lowercase ):
s += classes[i] - classes[i] * self.optimum[i] * self.kernel(
observations[i] , observations[j] )
_UpperCamelCase: str = s / n
def lowerCAmelCase ( self : Optional[Any] , _lowercase : ndarray ):
"""simple docstring"""
_UpperCamelCase: Optional[int] = sum(
self.optimum[n]
* self.classes[n]
* self.kernel(self.observations[n] , _lowercase )
for n in range(len(self.classes ) ) )
return 1 if s + self.offset >= 0 else -1
if __name__ == "__main__":
import doctest
doctest.testmod() | 264 | 0 |
class _UpperCamelCase :
def __init__( self , __UpperCamelCase )-> None:
__lowerCAmelCase = size
__lowerCAmelCase = [0] * size
__lowerCAmelCase = [0] * size
@staticmethod
def __UpperCAmelCase ( __UpperCamelCase )-> int:
return index | (index + 1)
@staticmethod
def __UpperCAmelCase ( __UpperCamelCase )-> int:
return (index & (index + 1)) - 1
def __UpperCAmelCase ( self , __UpperCamelCase , __UpperCamelCase )-> None:
__lowerCAmelCase = value
while index < self.size:
__lowerCAmelCase = self.get_prev(UpperCamelCase__ ) + 1
if current_left_border == index:
__lowerCAmelCase = value
else:
__lowerCAmelCase = max(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
__lowerCAmelCase = self.get_next(UpperCamelCase__ )
def __UpperCAmelCase ( self , __UpperCamelCase , __UpperCamelCase )-> int:
right -= 1 # Because of right is exclusive
__lowerCAmelCase = 0
while left <= right:
__lowerCAmelCase = self.get_prev(UpperCamelCase__ )
if left <= current_left:
__lowerCAmelCase = max(UpperCamelCase__ , self.tree[right] )
__lowerCAmelCase = current_left
else:
__lowerCAmelCase = max(UpperCamelCase__ , self.arr[right] )
right -= 1
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 367 | '''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
_UpperCAmelCase : List[Any] = {
'''configuration_layoutlmv2''': ['''LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LayoutLMv2Config'''],
'''processing_layoutlmv2''': ['''LayoutLMv2Processor'''],
'''tokenization_layoutlmv2''': ['''LayoutLMv2Tokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : str = ['''LayoutLMv2TokenizerFast''']
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : int = ['''LayoutLMv2FeatureExtractor''']
_UpperCAmelCase : str = ['''LayoutLMv2ImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : int = [
'''LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''LayoutLMv2ForQuestionAnswering''',
'''LayoutLMv2ForSequenceClassification''',
'''LayoutLMv2ForTokenClassification''',
'''LayoutLMv2Layer''',
'''LayoutLMv2Model''',
'''LayoutLMv2PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_layoutlmva import LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor, LayoutLMvaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaLayer,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
else:
import sys
_UpperCAmelCase : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 107 | 0 |
'''simple docstring'''
import logging
import math
from functools import partial
from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union
import torch
from .tensor_utils import tensor_tree_map, tree_map
def UpperCamelCase( UpperCAmelCase_ ):
UpperCAmelCase : Tuple = []
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
for v in tree.values():
shapes.extend(_fetch_dims(UpperCAmelCase_ ) )
elif isinstance(UpperCAmelCase_ , (list, tuple) ):
for t in tree:
shapes.extend(_fetch_dims(UpperCAmelCase_ ) )
elif isinstance(UpperCAmelCase_ , torch.Tensor ):
shapes.append(tree.shape )
else:
raise ValueError('Not supported' )
return shapes
@torch.jit.ignore
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : List[str] = []
for d in reversed(UpperCAmelCase_ ):
idx.append(flat_idx % d )
UpperCAmelCase : Tuple = flat_idx // d
return tuple(reversed(UpperCAmelCase_ ) )
@torch.jit.ignore
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = None , UpperCAmelCase_ = None , ):
# start_edges and end_edges both indicate whether, starting from any given
# dimension, the start/end index is at the top/bottom edge of the
# corresponding tensor, modeled as a tree
def reduce_edge_list(UpperCAmelCase_ ) -> None:
UpperCAmelCase : str = True
for i in range(len(UpperCAmelCase_ ) ):
UpperCAmelCase : List[Any] = -1 * (i + 1)
l[reversed_idx] &= tally
UpperCAmelCase : int = l[reversed_idx]
if start_edges is None:
UpperCAmelCase : Union[str, Any] = [s == 0 for s in start]
reduce_edge_list(UpperCAmelCase_ )
if end_edges is None:
UpperCAmelCase : List[str] = [e == (d - 1) for e, d in zip(UpperCAmelCase_ , UpperCAmelCase_ )]
reduce_edge_list(UpperCAmelCase_ )
# Base cases. Either start/end are empty and we're done, or the final,
# one-dimensional tensor can be simply sliced
if len(UpperCAmelCase_ ) == 0:
return [()]
elif len(UpperCAmelCase_ ) == 1:
return [(slice(start[0] , end[0] + 1 ),)]
UpperCAmelCase : List[Tuple[slice, ...]] = []
UpperCAmelCase : List[slice] = []
# Dimensions common to start and end can be selected directly
for s, e in zip(UpperCAmelCase_ , UpperCAmelCase_ ):
if s == e:
path_list.append(slice(UpperCAmelCase_ , s + 1 ) )
else:
break
UpperCAmelCase : Tuple[slice, ...] = tuple(UpperCAmelCase_ )
UpperCAmelCase : Tuple = len(UpperCAmelCase_ )
# start == end, and we're done
if divergence_idx == len(UpperCAmelCase_ ):
return [path]
def upper() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
UpperCAmelCase : int = start[divergence_idx]
return tuple(
path + (slice(UpperCAmelCase_ , sdi + 1 ),) + s
for s in _get_minimal_slice_set(
start[divergence_idx + 1 :] , [d - 1 for d in dims[divergence_idx + 1 :]] , dims[divergence_idx + 1 :] , start_edges=start_edges[divergence_idx + 1 :] , end_edges=[True for _ in end_edges[divergence_idx + 1 :]] , ) )
def lower() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
UpperCAmelCase : List[str] = end[divergence_idx]
return tuple(
path + (slice(UpperCAmelCase_ , edi + 1 ),) + s
for s in _get_minimal_slice_set(
[0 for _ in start[divergence_idx + 1 :]] , end[divergence_idx + 1 :] , dims[divergence_idx + 1 :] , start_edges=[True for _ in start_edges[divergence_idx + 1 :]] , end_edges=end_edges[divergence_idx + 1 :] , ) )
# If both start and end are at the edges of the subtree rooted at
# divergence_idx, we can just select the whole subtree at once
if start_edges[divergence_idx] and end_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] + 1 ),) )
# If just start is at the edge, we can grab almost all of the subtree,
# treating only the ragged bottom edge as an edge case
elif start_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] ),) )
slices.extend(lower() )
# Analogous to the previous case, but the top is ragged this time
elif end_edges[divergence_idx]:
slices.extend(upper() )
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] + 1 ),) )
# If both sides of the range are ragged, we need to handle both sides
# separately. If there's contiguous meat in between them, we can index it
# in one big chunk
else:
slices.extend(upper() )
UpperCAmelCase : Dict = end[divergence_idx] - start[divergence_idx]
if middle_ground > 1:
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] ),) )
slices.extend(lower() )
return slices
@torch.jit.ignore
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : Dict = t.shape[:no_batch_dims]
UpperCAmelCase : int = list(_flat_idx_to_idx(UpperCAmelCase_ , UpperCAmelCase_ ) )
# _get_minimal_slice_set is inclusive
UpperCAmelCase : Optional[int] = list(_flat_idx_to_idx(flat_end - 1 , UpperCAmelCase_ ) )
# Get an ordered list of slices to perform
UpperCAmelCase : Any = _get_minimal_slice_set(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , )
UpperCAmelCase : str = [t[s] for s in slices]
return torch.cat([s.view((-1,) + t.shape[no_batch_dims:] ) for s in sliced_tensors] )
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = False , UpperCAmelCase_ = None , UpperCAmelCase_ = False , ):
if not (len(UpperCAmelCase_ ) > 0):
raise ValueError('Must provide at least one input' )
UpperCAmelCase : List[str] = [shape[:no_batch_dims] for shape in _fetch_dims(UpperCAmelCase_ )]
UpperCAmelCase : Optional[Any] = tuple([max(UpperCAmelCase_ ) for s in zip(*UpperCAmelCase_ )] )
def _prep_inputs(UpperCAmelCase_ ) -> torch.Tensor:
if not low_mem:
if not sum(t.shape[:no_batch_dims] ) == no_batch_dims:
UpperCAmelCase : Any = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
UpperCAmelCase : int = t.reshape(-1 , *t.shape[no_batch_dims:] )
else:
UpperCAmelCase : Tuple = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
return t
UpperCAmelCase : Dict[str, Any] = tensor_tree_map(_prep_inputs , UpperCAmelCase_ )
UpperCAmelCase : Dict = None
if _out is not None:
UpperCAmelCase : Dict = tensor_tree_map(lambda UpperCAmelCase_ : t.view([-1] + list(t.shape[no_batch_dims:] ) ) , _out )
UpperCAmelCase : Optional[Any] = 1
for d in orig_batch_dims:
flat_batch_dim *= d
UpperCAmelCase : List[str] = flat_batch_dim // chunk_size + (flat_batch_dim % chunk_size != 0)
def _select_chunk(UpperCAmelCase_ ) -> torch.Tensor:
return t[i : i + chunk_size] if t.shape[0] != 1 else t
UpperCAmelCase : List[str] = 0
UpperCAmelCase : Tuple = prepped_outputs
for _ in range(UpperCAmelCase_ ):
# Chunk the input
if not low_mem:
UpperCAmelCase : Optional[Any] = _select_chunk
else:
UpperCAmelCase : Any = partial(
_chunk_slice , flat_start=UpperCAmelCase_ , flat_end=min(UpperCAmelCase_ , i + chunk_size ) , no_batch_dims=len(UpperCAmelCase_ ) , )
UpperCAmelCase : Dict[str, Any] = tensor_tree_map(UpperCAmelCase_ , UpperCAmelCase_ )
# Run the layer on the chunk
UpperCAmelCase : Dict = layer(**UpperCAmelCase_ )
# Allocate space for the output
if out is None:
UpperCAmelCase : Union[str, Any] = tensor_tree_map(lambda UpperCAmelCase_ : t.new_zeros((flat_batch_dim,) + t.shape[1:] ) , UpperCAmelCase_ )
# Put the chunk in its pre-allocated space
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
def assign(UpperCAmelCase_ , UpperCAmelCase_ ) -> None:
for k, v in da.items():
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
assign(UpperCAmelCase_ , da[k] )
else:
if _add_into_out:
v[i : i + chunk_size] += da[k]
else:
UpperCAmelCase : int = da[k]
assign(UpperCAmelCase_ , UpperCAmelCase_ )
elif isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
for xa, xa in zip(UpperCAmelCase_ , UpperCAmelCase_ ):
if _add_into_out:
xa[i : i + chunk_size] += xa
else:
UpperCAmelCase : Optional[int] = xa
elif isinstance(UpperCAmelCase_ , torch.Tensor ):
if _add_into_out:
out[i : i + chunk_size] += output_chunk
else:
UpperCAmelCase : Tuple = output_chunk
else:
raise ValueError('Not supported' )
i += chunk_size
UpperCAmelCase : List[str] = tensor_tree_map(lambda UpperCAmelCase_ : t.view(orig_batch_dims + t.shape[1:] ) , UpperCAmelCase_ )
return out
class A_ :
'''simple docstring'''
def __init__( self : Any , lowercase_ : int = 512 , ) -> int:
UpperCAmelCase : Any = max_chunk_size
UpperCAmelCase : Optional[int] = None
UpperCAmelCase : Optional[tuple] = None
def UpperCAmelCase_ ( self : str , lowercase_ : Callable , lowercase_ : tuple , lowercase_ : int ) -> int:
logging.info('Tuning chunk size...' )
if min_chunk_size >= self.max_chunk_size:
return min_chunk_size
UpperCAmelCase : List[int] = [2**l for l in range(int(math.log(self.max_chunk_size , 2 ) ) + 1 )]
UpperCAmelCase : Optional[int] = [c for c in candidates if c > min_chunk_size]
UpperCAmelCase : List[Any] = [min_chunk_size] + candidates
candidates[-1] += 4
def test_chunk_size(lowercase_ : int ) -> bool:
try:
with torch.no_grad():
fn(*lowercase_ , chunk_size=lowercase_ )
return True
except RuntimeError:
return False
UpperCAmelCase : List[Any] = 0
UpperCAmelCase : List[Any] = len(lowercase_ ) - 1
while i > min_viable_chunk_size_index:
UpperCAmelCase : Optional[Any] = test_chunk_size(candidates[i] )
if not viable:
UpperCAmelCase : str = (min_viable_chunk_size_index + i) // 2
else:
UpperCAmelCase : Union[str, Any] = i
UpperCAmelCase : str = (i + len(lowercase_ ) - 1) // 2
return candidates[min_viable_chunk_size_index]
def UpperCAmelCase_ ( self : Optional[int] , lowercase_ : Iterable , lowercase_ : Iterable ) -> bool:
UpperCAmelCase : Optional[int] = True
for aa, aa in zip(lowercase_ , lowercase_ ):
assert type(lowercase_ ) == type(lowercase_ )
if isinstance(lowercase_ , (list, tuple) ):
consistent &= self._compare_arg_caches(lowercase_ , lowercase_ )
elif isinstance(lowercase_ , lowercase_ ):
UpperCAmelCase : Tuple = [v for _, v in sorted(aa.items() , key=lambda lowercase_ : x[0] )]
UpperCAmelCase : Optional[int] = [v for _, v in sorted(aa.items() , key=lambda lowercase_ : x[0] )]
consistent &= self._compare_arg_caches(lowercase_ , lowercase_ )
else:
consistent &= aa == aa
return consistent
def UpperCAmelCase_ ( self : List[Any] , lowercase_ : Callable , lowercase_ : tuple , lowercase_ : int , ) -> int:
UpperCAmelCase : str = True
UpperCAmelCase : tuple = tree_map(lambda lowercase_ : a.shape if isinstance(lowercase_ , torch.Tensor ) else a , lowercase_ , lowercase_ )
if self.cached_arg_data is not None:
# If args have changed shape/value, we need to re-tune
assert len(self.cached_arg_data ) == len(lowercase_ )
UpperCAmelCase : int = self._compare_arg_caches(self.cached_arg_data , lowercase_ )
else:
# Otherwise, we can reuse the precomputed value
UpperCAmelCase : Union[str, Any] = False
if not consistent:
UpperCAmelCase : Any = self._determine_favorable_chunk_size(
lowercase_ , lowercase_ , lowercase_ , )
UpperCAmelCase : int = arg_data
assert self.cached_chunk_size is not None
return self.cached_chunk_size
| 695 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class A_ ( unittest.TestCase ):
'''simple docstring'''
@property
def UpperCAmelCase_ ( self : int ) -> Union[str, Any]:
torch.manual_seed(0 )
UpperCAmelCase : Any = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , )
return model
def UpperCAmelCase_ ( self : str ) -> Optional[Any]:
UpperCAmelCase : Dict = self.dummy_uncond_unet
UpperCAmelCase : Dict = KarrasVeScheduler()
UpperCAmelCase : str = KarrasVePipeline(unet=lowercase_ , scheduler=lowercase_ )
pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
UpperCAmelCase : Optional[int] = torch.manual_seed(0 )
UpperCAmelCase : Optional[int] = pipe(num_inference_steps=2 , generator=lowercase_ , output_type='numpy' ).images
UpperCAmelCase : Optional[int] = torch.manual_seed(0 )
UpperCAmelCase : Optional[Any] = pipe(num_inference_steps=2 , generator=lowercase_ , output_type='numpy' , return_dict=lowercase_ )[0]
UpperCAmelCase : Optional[int] = image[0, -3:, -3:, -1]
UpperCAmelCase : int = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
UpperCAmelCase : Any = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class A_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase_ ( self : Optional[Any] ) -> Tuple:
UpperCAmelCase : Dict = 'google/ncsnpp-celebahq-256'
UpperCAmelCase : Any = UNetaDModel.from_pretrained(lowercase_ )
UpperCAmelCase : Union[str, Any] = KarrasVeScheduler()
UpperCAmelCase : Dict = KarrasVePipeline(unet=lowercase_ , scheduler=lowercase_ )
pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
UpperCAmelCase : Union[str, Any] = torch.manual_seed(0 )
UpperCAmelCase : Dict = pipe(num_inference_steps=20 , generator=lowercase_ , output_type='numpy' ).images
UpperCAmelCase : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
UpperCAmelCase : Optional[int] = np.array([0.578, 0.5811, 0.5924, 0.5809, 0.587, 0.5886, 0.5861, 0.5802, 0.586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 695 | 1 |
'''simple docstring'''
from . import __version__
# Backward compatibility imports, to make sure all those objects can be found in file_utils
from .utils import (
CLOUDFRONT_DISTRIB_PREFIX,
CONFIG_NAME,
DISABLE_TELEMETRY,
DUMMY_INPUTS,
DUMMY_MASK,
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
FEATURE_EXTRACTOR_NAME,
FLAX_WEIGHTS_NAME,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
MODEL_CARD_NAME,
MULTIPLE_CHOICE_DUMMY_INPUTS,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
SENTENCEPIECE_UNDERLINE,
SPIECE_UNDERLINE,
TF2_WEIGHTS_NAME,
TF_WEIGHTS_NAME,
TORCH_FX_REQUIRED_VERSION,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
USE_JAX,
USE_TF,
USE_TORCH,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
ContextManagers,
DummyObject,
EntryNotFoundError,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
TensorType,
_LazyModule,
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
cached_property,
copy_func,
default_cache_path,
define_sagemaker_information,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
get_torch_version,
has_file,
http_user_agent,
is_apex_available,
is_bsa_available,
is_coloredlogs_available,
is_datasets_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_librosa_available,
is_offline_mode,
is_onnx_available,
is_pandas_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_tensor,
is_tensorflow_probability_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_training_run_on_sagemaker,
is_vision_available,
replace_return_docstrings,
requires_backends,
to_numpy,
to_py_obj,
torch_only_method,
)
| 396 | '''simple docstring'''
import argparse
import json
import numpy
import torch
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def __snake_case ( lowerCAmelCase : List[Any] , lowerCAmelCase : Any ):
# Load checkpoint
__UpperCAmelCase = torch.load(lowerCAmelCase , map_location='cpu' )
__UpperCAmelCase = chkpt['model']
# We have the base model one level deeper than the original XLM repository
__UpperCAmelCase = {}
for k, v in state_dict.items():
if "pred_layer" in k:
__UpperCAmelCase = v
else:
__UpperCAmelCase = v
__UpperCAmelCase = chkpt['params']
__UpperCAmelCase = {n: v for n, v in config.items() if not isinstance(lowerCAmelCase , (torch.FloatTensor, numpy.ndarray) )}
__UpperCAmelCase = chkpt['dico_word2id']
__UpperCAmelCase = {s + '</w>' if s.find('@@' ) == -1 and i > 13 else s.replace('@@' , '' ): i for s, i in vocab.items()}
# Save pytorch-model
__UpperCAmelCase = pytorch_dump_folder_path + '/' + WEIGHTS_NAME
__UpperCAmelCase = pytorch_dump_folder_path + '/' + CONFIG_NAME
__UpperCAmelCase = pytorch_dump_folder_path + '/' + VOCAB_FILES_NAMES['vocab_file']
print(F"""Save PyTorch model to {pytorch_weights_dump_path}""" )
torch.save(lowerCAmelCase , lowerCAmelCase )
print(F"""Save configuration file to {pytorch_config_dump_path}""" )
with open(lowerCAmelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(lowerCAmelCase , indent=2 ) + '\n' )
print(F"""Save vocab file to {pytorch_config_dump_path}""" )
with open(lowerCAmelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(lowerCAmelCase , indent=2 ) + '\n' )
if __name__ == "__main__":
_UpperCamelCase : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--xlm_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
_UpperCamelCase : List[str] = parser.parse_args()
convert_xlm_checkpoint_to_pytorch(args.xlm_checkpoint_path, args.pytorch_dump_folder_path)
| 396 | 1 |
"""simple docstring"""
import heapq
def snake_case__ ( _SCREAMING_SNAKE_CASE ) ->str:
UpperCAmelCase__ = []
# for each node and his adjacency list add them and the rank of the node to queue
# using heapq module the queue will be filled like a Priority Queue
# heapq works with a min priority queue, so I used -1*len(v) to build it
for key, value in graph.items():
# O(log(n))
heapq.heappush(_SCREAMING_SNAKE_CASE , [-1 * len(_SCREAMING_SNAKE_CASE ), (key, value)] )
# chosen_vertices = set of chosen vertices
UpperCAmelCase__ = set()
# while queue isn't empty and there are still edges
# (queue[0][0] is the rank of the node with max rank)
while queue and queue[0][0] != 0:
# extract vertex with max rank from queue and add it to chosen_vertices
UpperCAmelCase__ = heapq.heappop(_SCREAMING_SNAKE_CASE )[1][0]
chosen_vertices.add(_SCREAMING_SNAKE_CASE )
# Remove all arcs adjacent to argmax
for elem in queue:
# if v haven't adjacent node, skip
if elem[0] == 0:
continue
# if argmax is reachable from elem
# remove argmax from elem's adjacent list and update his rank
if argmax in elem[1][1]:
UpperCAmelCase__ = elem[1][1].index(_SCREAMING_SNAKE_CASE )
del elem[1][1][index]
elem[0] += 1
# re-order the queue
heapq.heapify(_SCREAMING_SNAKE_CASE )
return chosen_vertices
if __name__ == "__main__":
import doctest
doctest.testmod()
a : Optional[int] = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
print(F'''Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}''')
| 706 |
"""simple docstring"""
def snake_case__ ( _SCREAMING_SNAKE_CASE ) ->Tuple: # noqa: E741
UpperCAmelCase__ = len(_SCREAMING_SNAKE_CASE )
UpperCAmelCase__ = 0
UpperCAmelCase__ = [0] * n
UpperCAmelCase__ = [False] * n
UpperCAmelCase__ = [False] * n
def dfs(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
if parent == root:
out_edge_count += 1
UpperCAmelCase__ = True
UpperCAmelCase__ = at
for to in l[at]:
if to == parent:
pass
elif not visited[to]:
UpperCAmelCase__ = dfs(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCAmelCase__ = min(low[at] , low[to] )
# AP found via bridge
if at < low[to]:
UpperCAmelCase__ = True
# AP found via cycle
if at == low[to]:
UpperCAmelCase__ = True
else:
UpperCAmelCase__ = min(low[at] , _SCREAMING_SNAKE_CASE )
return out_edge_count
for i in range(_SCREAMING_SNAKE_CASE ):
if not visited[i]:
UpperCAmelCase__ = 0
UpperCAmelCase__ = dfs(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , -1 , _SCREAMING_SNAKE_CASE )
UpperCAmelCase__ = out_edge_count > 1
for x in range(len(_SCREAMING_SNAKE_CASE ) ):
if is_art[x] is True:
print(_SCREAMING_SNAKE_CASE )
# Adjacency list of graph
a : Tuple = {
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
}
compute_ap(data)
| 422 | 0 |
from collections.abc import Iterable
from typing import Generic, TypeVar
__A = TypeVar('_T')
class SCREAMING_SNAKE_CASE ( Generic[_T] ):
"""simple docstring"""
def __init__( self: Optional[Any] , __A: Iterable[_T] | None = None ) -> None:
_A = list(iterable or [] )
_A = []
def __len__( self: Optional[Any] ) -> int:
return len(self._stacka ) + len(self._stacka )
def __repr__( self: Dict ) -> str:
return f"""Queue({tuple(self._stacka[::-1] + self._stacka )})"""
def __A ( self: int , __A: _T ) -> None:
self._stacka.append(__A )
def __A ( self: Any ) -> _T:
_A = self._stacka.pop
_A = self._stacka.append
if not self._stacka:
while self._stacka:
stacka_append(stacka_pop() )
if not self._stacka:
raise IndexError('''Queue is empty''' )
return self._stacka.pop()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 484 |
import importlib
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Union
import torch
from ..utils import BaseOutput
__A = 'scheduler_config.json'
class SCREAMING_SNAKE_CASE ( snake_case ):
"""simple docstring"""
A_ = 1
A_ = 2
A_ = 3
A_ = 4
A_ = 5
A_ = 6
A_ = 7
A_ = 8
A_ = 9
A_ = 10
A_ = 11
A_ = 12
A_ = 13
A_ = 14
@dataclass
class SCREAMING_SNAKE_CASE ( snake_case ):
"""simple docstring"""
A_ = 42
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
A_ = SCHEDULER_CONFIG_NAME
A_ = []
A_ = True
@classmethod
def __A ( cls: str , __A: Dict[str, Any] = None , __A: Optional[str] = None , __A: List[Any]=False , **__A: str , ) -> List[str]:
_A ,_A ,_A = cls.load_config(
pretrained_model_name_or_path=__A , subfolder=__A , return_unused_kwargs=__A , return_commit_hash=__A , **__A , )
return cls.from_config(__A , return_unused_kwargs=__A , **__A )
def __A ( self: List[Any] , __A: Union[str, os.PathLike] , __A: bool = False , **__A: Dict ) -> Dict:
self.save_config(save_directory=__A , push_to_hub=__A , **__A )
@property
def __A ( self: Optional[Any] ) -> Any:
return self._get_compatibles()
@classmethod
def __A ( cls: Optional[int] ) -> List[str]:
_A = list(set([cls.__name__] + cls._compatibles ) )
_A = importlib.import_module(__name__.split('''.''' )[0] )
_A = [
getattr(__A , __A ) for c in compatible_classes_str if hasattr(__A , __A )
]
return compatible_classes
| 484 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_snake_case : Union[str, Any] = {
"configuration_nezha": ["NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP", "NezhaConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : Union[str, Any] = [
"NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST",
"NezhaForNextSentencePrediction",
"NezhaForMaskedLM",
"NezhaForPreTraining",
"NezhaForMultipleChoice",
"NezhaForQuestionAnswering",
"NezhaForSequenceClassification",
"NezhaForTokenClassification",
"NezhaModel",
"NezhaPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_nezha import NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP, NezhaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nezha import (
NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
NezhaPreTrainedModel,
)
else:
import sys
_snake_case : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 705 |
"""simple docstring"""
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision.transforms import functional as F
from transformers import DetrImageProcessor, TableTransformerConfig, TableTransformerForObjectDetection
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case : Optional[Any] = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
_snake_case : Any = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F"""transformer.encoder.layers.{i}.self_attn.out_proj.weight""", F"""encoder.layers.{i}.self_attn.out_proj.weight""")
)
rename_keys.append(
(F"""transformer.encoder.layers.{i}.self_attn.out_proj.bias""", F"""encoder.layers.{i}.self_attn.out_proj.bias""")
)
rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.weight""", F"""encoder.layers.{i}.fc1.weight"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.bias""", F"""encoder.layers.{i}.fc1.bias"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.weight""", F"""encoder.layers.{i}.fc2.weight"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.bias""", F"""encoder.layers.{i}.fc2.bias"""))
rename_keys.append(
(F"""transformer.encoder.layers.{i}.norm1.weight""", F"""encoder.layers.{i}.self_attn_layer_norm.weight""")
)
rename_keys.append((F"""transformer.encoder.layers.{i}.norm1.bias""", F"""encoder.layers.{i}.self_attn_layer_norm.bias"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.norm2.weight""", F"""encoder.layers.{i}.final_layer_norm.weight"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.norm2.bias""", F"""encoder.layers.{i}.final_layer_norm.bias"""))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(F"""transformer.decoder.layers.{i}.self_attn.out_proj.weight""", F"""decoder.layers.{i}.self_attn.out_proj.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.self_attn.out_proj.bias""", F"""decoder.layers.{i}.self_attn.out_proj.bias""")
)
rename_keys.append(
(
F"""transformer.decoder.layers.{i}.multihead_attn.out_proj.weight""",
F"""decoder.layers.{i}.encoder_attn.out_proj.weight""",
)
)
rename_keys.append(
(
F"""transformer.decoder.layers.{i}.multihead_attn.out_proj.bias""",
F"""decoder.layers.{i}.encoder_attn.out_proj.bias""",
)
)
rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.weight""", F"""decoder.layers.{i}.fc1.weight"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.bias""", F"""decoder.layers.{i}.fc1.bias"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.weight""", F"""decoder.layers.{i}.fc2.weight"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.bias""", F"""decoder.layers.{i}.fc2.bias"""))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm1.weight""", F"""decoder.layers.{i}.self_attn_layer_norm.weight""")
)
rename_keys.append((F"""transformer.decoder.layers.{i}.norm1.bias""", F"""decoder.layers.{i}.self_attn_layer_norm.bias"""))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm2.weight""", F"""decoder.layers.{i}.encoder_attn_layer_norm.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm2.bias""", F"""decoder.layers.{i}.encoder_attn_layer_norm.bias""")
)
rename_keys.append((F"""transformer.decoder.layers.{i}.norm3.weight""", F"""decoder.layers.{i}.final_layer_norm.weight"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.norm3.bias""", F"""decoder.layers.{i}.final_layer_norm.bias"""))
# convolutional projection + query embeddings + layernorm of encoder + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
('input_proj.weight', 'input_projection.weight'),
('input_proj.bias', 'input_projection.bias'),
('query_embed.weight', 'query_position_embeddings.weight'),
('transformer.encoder.norm.weight', 'encoder.layernorm.weight'),
('transformer.encoder.norm.bias', 'encoder.layernorm.bias'),
('transformer.decoder.norm.weight', 'decoder.layernorm.weight'),
('transformer.decoder.norm.bias', 'decoder.layernorm.bias'),
('class_embed.weight', 'class_labels_classifier.weight'),
('class_embed.bias', 'class_labels_classifier.bias'),
('bbox_embed.layers.0.weight', 'bbox_predictor.layers.0.weight'),
('bbox_embed.layers.0.bias', 'bbox_predictor.layers.0.bias'),
('bbox_embed.layers.1.weight', 'bbox_predictor.layers.1.weight'),
('bbox_embed.layers.1.bias', 'bbox_predictor.layers.1.bias'),
('bbox_embed.layers.2.weight', 'bbox_predictor.layers.2.weight'),
('bbox_embed.layers.2.bias', 'bbox_predictor.layers.2.bias'),
]
)
def A__ ( UpperCamelCase , UpperCamelCase , UpperCamelCase ):
A = state_dict.pop(UpperCamelCase )
A = val
def A__ ( UpperCamelCase ):
A = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
A = key.replace("backbone.0.body" , "backbone.conv_encoder.model" )
A = value
else:
A = value
return new_state_dict
def A__ ( UpperCamelCase ):
A = ""
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
A = state_dict.pop(F"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight" )
A = state_dict.pop(F"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias" )
# next, add query, keys and values (in that order) to the state dict
A = in_proj_weight[:256, :]
A = in_proj_bias[:256]
A = in_proj_weight[256:512, :]
A = in_proj_bias[256:512]
A = in_proj_weight[-256:, :]
A = in_proj_bias[-256:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
A = state_dict.pop(F"{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight" )
A = state_dict.pop(F"{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias" )
# next, add query, keys and values (in that order) to the state dict
A = in_proj_weight[:256, :]
A = in_proj_bias[:256]
A = in_proj_weight[256:512, :]
A = in_proj_bias[256:512]
A = in_proj_weight[-256:, :]
A = in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
A = state_dict.pop(
F"{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight" )
A = state_dict.pop(F"{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias" )
# next, add query, keys and values (in that order) of cross-attention to the state dict
A = in_proj_weight_cross_attn[:256, :]
A = in_proj_bias_cross_attn[:256]
A = in_proj_weight_cross_attn[256:512, :]
A = in_proj_bias_cross_attn[256:512]
A = in_proj_weight_cross_attn[-256:, :]
A = in_proj_bias_cross_attn[-256:]
def A__ ( UpperCamelCase , UpperCamelCase ):
A, A = image.size
A = max(UpperCamelCase , UpperCamelCase )
A = 800 if "detection" in checkpoint_url else 1_000
A = target_max_size / current_max_size
A = image.resize((int(round(scale * width ) ), int(round(scale * height ) )) )
return resized_image
def A__ ( UpperCamelCase ):
A = F.to_tensor(UpperCamelCase )
A = F.normalize(UpperCamelCase , mean=[0.4_85, 0.4_56, 0.4_06] , std=[0.2_29, 0.2_24, 0.2_25] )
return image
@torch.no_grad()
def A__ ( UpperCamelCase , UpperCamelCase , UpperCamelCase ):
logger.info("Converting model..." )
# load original state dict
A = torch.hub.load_state_dict_from_url(UpperCamelCase , map_location="cpu" )
# rename keys
for src, dest in rename_keys:
rename_key(UpperCamelCase , UpperCamelCase , UpperCamelCase )
A = rename_backbone_keys(UpperCamelCase )
# query, key and value matrices need special treatment
read_in_q_k_v(UpperCamelCase )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
A = "model."
for key in state_dict.copy().keys():
if not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ):
A = state_dict.pop(UpperCamelCase )
A = val
# create HuggingFace model and load state dict
A = TableTransformerConfig(
backbone="resnet18" , mask_loss_coefficient=1 , dice_loss_coefficient=1 , ce_loss_coefficient=1 , bbox_loss_coefficient=5 , giou_loss_coefficient=2 , eos_coefficient=0.4 , class_cost=1 , bbox_cost=5 , giou_cost=2 , )
if "detection" in checkpoint_url:
A = 15
A = 2
A = {0: "table", 1: "table rotated"}
A = idalabel
A = {v: k for k, v in idalabel.items()}
else:
A = 125
A = 6
A = {
0: "table",
1: "table column",
2: "table row",
3: "table column header",
4: "table projected row header",
5: "table spanning cell",
}
A = idalabel
A = {v: k for k, v in idalabel.items()}
A = DetrImageProcessor(
format="coco_detection" , max_size=800 if "detection" in checkpoint_url else 1_000 )
A = TableTransformerForObjectDetection(UpperCamelCase )
model.load_state_dict(UpperCamelCase )
model.eval()
# verify our conversion
A = "example_pdf.png" if "detection" in checkpoint_url else "example_table.png"
A = hf_hub_download(repo_id="nielsr/example-pdf" , repo_type="dataset" , filename=UpperCamelCase )
A = Image.open(UpperCamelCase ).convert("RGB" )
A = normalize(resize(UpperCamelCase , UpperCamelCase ) ).unsqueeze(0 )
A = model(UpperCamelCase )
if "detection" in checkpoint_url:
A = (1, 15, 3)
A = torch.tensor(
[[-6.78_97, -16.99_85, 6.79_37], [-8.01_86, -22.21_92, 6.96_77], [-7.31_17, -21.07_08, 7.40_55]] )
A = torch.tensor([[0.48_67, 0.17_67, 0.67_32], [0.67_18, 0.44_79, 0.38_30], [0.47_16, 0.17_60, 0.63_64]] )
else:
A = (1, 125, 7)
A = torch.tensor(
[[-18.14_30, -8.32_14, 4.82_74], [-18.46_85, -7.13_61, -4.26_67], [-26.36_93, -9.34_29, -4.99_62]] )
A = torch.tensor([[0.49_83, 0.55_95, 0.94_40], [0.49_16, 0.63_15, 0.59_54], [0.61_08, 0.86_37, 0.11_35]] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, :3, :3] , UpperCamelCase , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , UpperCamelCase , atol=1E-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(F"Saving PyTorch model and image processor to {pytorch_dump_folder_path}..." )
Path(UpperCamelCase ).mkdir(exist_ok=UpperCamelCase )
model.save_pretrained(UpperCamelCase )
image_processor.save_pretrained(UpperCamelCase )
if push_to_hub:
# Push model to HF hub
logger.info("Pushing model to the hub..." )
A = (
"microsoft/table-transformer-detection"
if "detection" in checkpoint_url
else "microsoft/table-transformer-structure-recognition"
)
model.push_to_hub(UpperCamelCase )
image_processor.push_to_hub(UpperCamelCase )
if __name__ == "__main__":
_snake_case : Optional[Any] = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_url',
default='https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth',
type=str,
choices=[
'https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth',
'https://pubtables1m.blob.core.windows.net/model/pubtables1m_structure_detr_r18.pth',
],
help='URL of the Table Transformer checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
_snake_case : Any = parser.parse_args()
convert_table_transformer_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 524 | 0 |
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, PegasusConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFPegasusForConditionalGeneration, TFPegasusModel
@require_tf
class __A:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = PegasusConfig
SCREAMING_SNAKE_CASE__ = {}
SCREAMING_SNAKE_CASE__ = """gelu"""
def __init__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=13 , SCREAMING_SNAKE_CASE_=7 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=99 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=37 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=40 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=1 , SCREAMING_SNAKE_CASE_=0 , ):
UpperCamelCase__ = parent
UpperCamelCase__ = batch_size
UpperCamelCase__ = seq_length
UpperCamelCase__ = is_training
UpperCamelCase__ = use_labels
UpperCamelCase__ = vocab_size
UpperCamelCase__ = hidden_size
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = intermediate_size
UpperCamelCase__ = hidden_dropout_prob
UpperCamelCase__ = attention_probs_dropout_prob
UpperCamelCase__ = max_position_embeddings
UpperCamelCase__ = eos_token_id
UpperCamelCase__ = pad_token_id
UpperCamelCase__ = bos_token_id
def UpperCAmelCase_ (self ):
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
UpperCamelCase__ = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
UpperCamelCase__ = tf.concat([input_ids, eos_tensor] , axis=1 )
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase__ = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
UpperCamelCase__ = prepare_pegasus_inputs_dict(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return config, inputs_dict
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = TFPegasusModel(config=SCREAMING_SNAKE_CASE_ ).get_decoder()
UpperCamelCase__ = inputs_dict["""input_ids"""]
UpperCamelCase__ = input_ids[:1, :]
UpperCamelCase__ = inputs_dict["""attention_mask"""][:1, :]
UpperCamelCase__ = inputs_dict["""head_mask"""]
UpperCamelCase__ = 1
# first forward pass
UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , head_mask=SCREAMING_SNAKE_CASE_ , use_cache=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ , UpperCamelCase__ = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
UpperCamelCase__ = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCamelCase__ = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
UpperCamelCase__ = tf.concat([input_ids, next_tokens] , axis=-1 )
UpperCamelCase__ = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ )[0]
UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , past_key_values=SCREAMING_SNAKE_CASE_ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
UpperCamelCase__ = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
UpperCamelCase__ = output_from_no_past[:, -3:, random_slice_idx]
UpperCamelCase__ = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , rtol=1E-3 )
def __magic_name__ ( __a : List[str] , __a : Any , __a : Tuple , __a : int=None , __a : Union[str, Any]=None , __a : int=None , __a : Union[str, Any]=None , __a : List[str]=None , ):
'''simple docstring'''
if attention_mask is None:
UpperCamelCase__ = tf.cast(tf.math.not_equal(__a , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
UpperCamelCase__ = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
UpperCamelCase__ = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
UpperCamelCase__ = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
UpperCamelCase__ = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class __A( __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = (TFPegasusForConditionalGeneration, TFPegasusModel) if is_tf_available() else ()
SCREAMING_SNAKE_CASE__ = (TFPegasusForConditionalGeneration,) if is_tf_available() else ()
SCREAMING_SNAKE_CASE__ = (
{
"""conversational""": TFPegasusForConditionalGeneration,
"""feature-extraction""": TFPegasusModel,
"""summarization""": TFPegasusForConditionalGeneration,
"""text2text-generation""": TFPegasusForConditionalGeneration,
"""translation""": TFPegasusForConditionalGeneration,
}
if is_tf_available()
else {}
)
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
def UpperCAmelCase_ (self ):
UpperCamelCase__ = TFPegasusModelTester(self )
UpperCamelCase__ = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ (self ):
self.config_tester.run_common_tests()
def UpperCAmelCase_ (self ):
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*SCREAMING_SNAKE_CASE_ )
@require_sentencepiece
@require_tokenizers
@require_tf
class __A( unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = [
""" PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.""",
""" The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" """,
]
SCREAMING_SNAKE_CASE__ = [
"""California's largest electricity provider has cut power to hundreds of thousands of customers in an effort to"""
""" reduce the risk of wildfires.""",
"""N-Dubz have revealed they\'re \"grateful\" to have been nominated for four Mobo Awards.""",
] # differs slightly from pytorch, likely due to numerical differences in linear layers
SCREAMING_SNAKE_CASE__ = """google/pegasus-xsum"""
@cached_property
def UpperCAmelCase_ (self ):
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def UpperCAmelCase_ (self ):
UpperCamelCase__ = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def UpperCAmelCase_ (self , **SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = self.translate_src_text(**SCREAMING_SNAKE_CASE_ )
assert self.expected_text == generated_words
def UpperCAmelCase_ (self , **SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = self.tokenizer(self.src_text , **SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ , return_tensors="""tf""" )
UpperCamelCase__ = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=SCREAMING_SNAKE_CASE_ , )
UpperCamelCase__ = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=SCREAMING_SNAKE_CASE_ )
return generated_words
@slow
def UpperCAmelCase_ (self ):
self._assert_generated_batch_equal_expected()
| 513 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase_ = {
'''configuration_instructblip''': [
'''INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''InstructBlipConfig''',
'''InstructBlipQFormerConfig''',
'''InstructBlipVisionConfig''',
],
'''processing_instructblip''': ['''InstructBlipProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = [
'''INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''InstructBlipQFormerModel''',
'''InstructBlipPreTrainedModel''',
'''InstructBlipForConditionalGeneration''',
'''InstructBlipVisionModel''',
]
if TYPE_CHECKING:
from .configuration_instructblip import (
INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
InstructBlipConfig,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
)
from .processing_instructblip import InstructBlipProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_instructblip import (
INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
InstructBlipForConditionalGeneration,
InstructBlipPreTrainedModel,
InstructBlipQFormerModel,
InstructBlipVisionModel,
)
else:
import sys
lowerCamelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 513 | 1 |
'''simple docstring'''
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
__snake_case = logging.get_logger(__name__)
def a ( __a ) -> List[str]:
'''simple docstring'''
UpperCamelCase__ :Optional[int] = R'''\w+[.]\d+'''
UpperCamelCase__ :Tuple = re.findall(__a , __a )
for pat in pats:
UpperCamelCase__ :Dict = key.replace(__a , '''_'''.join(pat.split('''.''' ) ) )
return key
def a ( __a , __a , __a ) -> int:
'''simple docstring'''
UpperCamelCase__ :Optional[Any] = pt_tuple_key[:-1] + ('''scale''',)
if (
any('''norm''' in str_ for str_ in pt_tuple_key )
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
UpperCamelCase__ :Tuple = pt_tuple_key[:-1] + ('''scale''',)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
UpperCamelCase__ :Optional[Any] = pt_tuple_key[:-1] + ('''scale''',)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
UpperCamelCase__ :Optional[int] = pt_tuple_key[:-1] + ('''embedding''',)
return renamed_pt_tuple_key, pt_tensor
# conv layer
UpperCamelCase__ :Tuple = pt_tuple_key[:-1] + ('''kernel''',)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
UpperCamelCase__ :List[str] = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
UpperCamelCase__ :int = pt_tuple_key[:-1] + ('''kernel''',)
if pt_tuple_key[-1] == "weight":
UpperCamelCase__ :Tuple = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
UpperCamelCase__ :Optional[Any] = pt_tuple_key[:-1] + ('''weight''',)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
UpperCamelCase__ :Any = pt_tuple_key[:-1] + ('''bias''',)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def a ( __a , __a , __a=42 ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ :Dict = {k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
UpperCamelCase__ :Optional[int] = flax_model.init_weights(PRNGKey(__a ) )
UpperCamelCase__ :int = flatten_dict(__a )
UpperCamelCase__ :Union[str, Any] = {}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
UpperCamelCase__ :List[Any] = rename_key(__a )
UpperCamelCase__ :Union[str, Any] = tuple(renamed_pt_key.split('''.''' ) )
# Correctly rename weight parameters
UpperCamelCase__ , UpperCamelCase__ :Any = rename_key_and_reshape_tensor(__a , __a , __a )
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
f'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '''
f'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' )
# also add unexpected weight so that warning is thrown
UpperCamelCase__ :Optional[Any] = jnp.asarray(__a )
return unflatten_dict(__a ) | 280 |
'''simple docstring'''
import sys
__snake_case = (
'''73167176531330624919225119674426574742355349194934'''
'''96983520312774506326239578318016984801869478851843'''
'''85861560789112949495459501737958331952853208805511'''
'''12540698747158523863050715693290963295227443043557'''
'''66896648950445244523161731856403098711121722383113'''
'''62229893423380308135336276614282806444486645238749'''
'''30358907296290491560440772390713810515859307960866'''
'''70172427121883998797908792274921901699720888093776'''
'''65727333001053367881220235421809751254540594752243'''
'''52584907711670556013604839586446706324415722155397'''
'''53697817977846174064955149290862569321978468622482'''
'''83972241375657056057490261407972968652414535100474'''
'''82166370484403199890008895243450658541227588666881'''
'''16427171479924442928230863465674813919123162824586'''
'''17866458359124566529476545682848912883142607690042'''
'''24219022671055626321111109370544217506941658960408'''
'''07198403850962455444362981230987879927244284909188'''
'''84580156166097919133875499200524063689912560717606'''
'''05886116467109405077541002256983155200055935729725'''
'''71636269561882670428252483600823257530420752963450'''
)
def a ( __a ) -> int:
'''simple docstring'''
UpperCamelCase__ :Tuple = 1
for digit in s:
product *= int(__a )
return product
def a ( __a = N ) -> int:
'''simple docstring'''
UpperCamelCase__ :Tuple = -sys.maxsize - 1
UpperCamelCase__ :Dict = n[:13]
UpperCamelCase__ :Dict = 13
while cur_index < len(__a ) - 13:
if int(n[cur_index] ) >= int(substr[0] ):
UpperCamelCase__ :Dict = substr[1:] + n[cur_index]
cur_index += 1
else:
UpperCamelCase__ :List[str] = max(__a , str_eval(__a ) )
UpperCamelCase__ :Union[str, Any] = n[cur_index : cur_index + 13]
cur_index += 13
return largest_product
if __name__ == "__main__":
print(F"""{solution() = }""") | 280 | 1 |
'''simple docstring'''
def a_ ( __UpperCAmelCase ) -> int:
"""simple docstring"""
if not isinstance(__lowerCamelCase , __lowerCamelCase ) or number < 0:
raise ValueError('Input must be a non-negative integer' )
snake_case: Optional[Any] =0
while number:
# This way we arrive at next set bit (next 1) instead of looping
# through each bit and checking for 1s hence the
# loop won't run 32 times it will only run the number of `1` times
number &= number - 1
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 350 |
from __future__ import annotations
import math
from collections import Counter
from string import ascii_lowercase
def __lowerCAmelCase ( __lowerCamelCase : str ) -> None:
__lowerCAmelCase , __lowerCAmelCase =analyze_text(__lowerCamelCase )
__lowerCAmelCase =list(""" """ + ascii_lowercase )
# what is our total sum of probabilities.
__lowerCAmelCase =sum(single_char_strings.values() )
# one length string
__lowerCAmelCase =0
# for each alpha we go in our dict and if it is in it we calculate entropy
for ch in my_alphas:
if ch in single_char_strings:
__lowerCAmelCase =single_char_strings[ch]
__lowerCAmelCase =my_str / all_sum
my_fir_sum += prob * math.loga(__lowerCamelCase ) # entropy formula.
# print entropy
print(f"""{round(-1 * my_fir_sum ):.1f}""" )
# two len string
__lowerCAmelCase =sum(two_char_strings.values() )
__lowerCAmelCase =0
# for each alpha (two in size) calculate entropy.
for cha in my_alphas:
for cha in my_alphas:
__lowerCAmelCase =cha + cha
if sequence in two_char_strings:
__lowerCAmelCase =two_char_strings[sequence]
__lowerCAmelCase =int(__lowerCamelCase ) / all_sum
my_sec_sum += prob * math.loga(__lowerCamelCase )
# print second entropy
print(f"""{round(-1 * my_sec_sum ):.1f}""" )
# print the difference between them
print(f"""{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}""" )
def __lowerCAmelCase ( __lowerCamelCase : str ) -> tuple[dict, dict]:
__lowerCAmelCase =Counter() # type: ignore
__lowerCAmelCase =Counter() # type: ignore
single_char_strings[text[-1]] += 1
# first case when we have space at start.
two_char_strings[" " + text[0]] += 1
for i in range(0 , len(__lowerCamelCase ) - 1 ):
single_char_strings[text[i]] += 1
two_char_strings[text[i : i + 2]] += 1
return single_char_strings, two_char_strings
def __lowerCAmelCase ( ) -> str:
import doctest
doctest.testmod()
# text = (
# "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark "
# "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest "
# "jointure saw horrible. He private he on be imagine suppose. Fertile "
# "beloved evident through no service elderly is. Blind there if every no so "
# "at. Own neglected you preferred way sincerity delivered his attempted. To "
# "of message cottage windows do besides against uncivil. Delightful "
# "unreserved impossible few estimating men favourable see entreaties. She "
# "propriety immediate was improving. He or entrance humoured likewise "
# "moderate. Much nor game son say feel. Fat make met can must form into "
# "gate. Me we offending prevailed discovery. "
# )
# calculate_prob(text)
if __name__ == "__main__":
main()
| 354 | 0 |
from __future__ import annotations
from math import gcd
def __SCREAMING_SNAKE_CASE ( UpperCamelCase : int , UpperCamelCase : int = 2 , UpperCamelCase : int = 1 , UpperCamelCase : int = 3 , ) -> int | None:
"""simple docstring"""
if num < 2:
raise ValueError("""The input value cannot be less than 2""" )
# Because of the relationship between ``f(f(x))`` and ``f(x)``, this
# algorithm struggles to find factors that are divisible by two.
# As a workaround, we specifically check for two and even inputs.
# See: https://math.stackexchange.com/a/2856214/165820
if num > 2 and num % 2 == 0:
return 2
# Pollard's Rho algorithm requires a function that returns pseudorandom
# values between 0 <= X < ``num``. It doesn't need to be random in the
# sense that the output value is cryptographically secure or difficult
# to calculate, it only needs to be random in the sense that all output
# values should be equally likely to appear.
# For this reason, Pollard suggested using ``f(x) = (x**2 - 1) % num``
# However, the success of Pollard's algorithm isn't guaranteed and is
# determined in part by the initial seed and the chosen random function.
# To make retries easier, we will instead use ``f(x) = (x**2 + C) % num``
# where ``C`` is a value that we can modify between each attempt.
def rand_fn(UpperCamelCase : int , UpperCamelCase : int , UpperCamelCase : int ) -> int:
return (pow(UpperCamelCase , 2 ) + step) % modulus
for _ in range(UpperCamelCase ):
# These track the position within the cycle detection logic.
a_ = seed
a_ = seed
while True:
# At each iteration, the tortoise moves one step and the hare moves two.
a_ = rand_fn(UpperCamelCase , UpperCamelCase , UpperCamelCase )
a_ = rand_fn(UpperCamelCase , UpperCamelCase , UpperCamelCase )
a_ = rand_fn(UpperCamelCase , UpperCamelCase , UpperCamelCase )
# At some point both the tortoise and the hare will enter a cycle whose
# length ``p`` is a divisor of ``num``. Once in that cycle, at some point
# the tortoise and hare will end up on the same value modulo ``p``.
# We can detect when this happens because the position difference between
# the tortoise and the hare will share a common divisor with ``num``.
a_ = gcd(hare - tortoise , UpperCamelCase )
if divisor == 1:
# No common divisor yet, just keep searching.
continue
else:
# We found a common divisor!
if divisor == num:
# Unfortunately, the divisor is ``num`` itself and is useless.
break
else:
# The divisor is a nontrivial factor of ``num``!
return divisor
# If we made it here, then this attempt failed.
# We need to pick a new starting seed for the tortoise and hare
# in addition to a new step value for the random function.
# To keep this example implementation deterministic, the
# new values will be generated based on currently available
# values instead of using something like ``random.randint``.
# We can use the hare's position as the new seed.
# This is actually what Richard Brent's the "optimized" variant does.
a_ = hare
# The new step value for the random function can just be incremented.
# At first the results will be similar to what the old function would
# have produced, but the value will quickly diverge after a bit.
step += 1
# We haven't found a divisor within the requested number of attempts.
# We were unlucky or ``num`` itself is actually prime.
return None
if __name__ == "__main__":
import argparse
_A = argparse.ArgumentParser()
parser.add_argument(
'num',
type=int,
help='The value to find a divisor of',
)
parser.add_argument(
'--attempts',
type=int,
default=3,
help='The number of attempts before giving up',
)
_A = parser.parse_args()
_A = pollard_rho(args.num, attempts=args.attempts)
if divisor is None:
print(f'{args.num} is probably prime')
else:
_A = args.num // divisor
print(f'{args.num} = {divisor} * {quotient}') | 403 |
def __SCREAMING_SNAKE_CASE ( UpperCamelCase : int = 10**9 ) -> int:
"""simple docstring"""
a_ = 1
a_ = 2
a_ = 0
a_ = 0
a_ = 0
while perimeter <= max_perimeter:
perimeters_sum += perimeter
prev_value += 2 * value
value += prev_value
a_ = 2 * value + 2 if i % 2 == 0 else 2 * value - 2
i += 1
return perimeters_sum
if __name__ == "__main__":
print(f'{solution() = }') | 403 | 1 |
'''simple docstring'''
from typing import Optional, Tuple, Union
import tensorflow as tf
from ...activations_tf import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_tf_outputs import (
TFBaseModelOutputWithNoAttention,
TFBaseModelOutputWithPoolingAndNoAttention,
TFSequenceClassifierOutput,
)
from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs
from ...tf_utils import shape_list
from ...utils import logging
from .configuration_regnet import RegNetConfig
_UpperCamelCase = logging.get_logger(__name__)
# General docstring
_UpperCamelCase = """RegNetConfig"""
# Base docstring
_UpperCamelCase = """facebook/regnet-y-040"""
_UpperCamelCase = [1, 1088, 7, 7]
# Image classification docstring
_UpperCamelCase = """facebook/regnet-y-040"""
_UpperCamelCase = """tabby, tabby cat"""
_UpperCamelCase = [
"""facebook/regnet-y-040""",
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class __magic_name__ ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self , lowerCamelCase , lowerCamelCase = 3 , lowerCamelCase = 1 , lowerCamelCase = 1 , lowerCamelCase = "relu" , **lowerCamelCase , ):
'''simple docstring'''
super().__init__(**lowerCamelCase )
# The padding and conv has been verified in
# https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb
__A : str = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 )
__A : List[str] = tf.keras.layers.ConvaD(
filters=lowerCamelCase , kernel_size=lowerCamelCase , strides=lowerCamelCase , padding="VALID" , groups=lowerCamelCase , use_bias=lowerCamelCase , name="convolution" , )
__A : Optional[int] = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name="normalization" )
__A : Tuple = ACTaFN[activation] if activation is not None else tf.identity
def lowerCAmelCase__ ( self , lowerCamelCase ):
'''simple docstring'''
__A : Optional[Any] = self.convolution(self.padding(lowerCamelCase ) )
__A : Union[str, Any] = self.normalization(lowerCamelCase )
__A : Any = self.activation(lowerCamelCase )
return hidden_state
class __magic_name__ ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self , lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
super().__init__(**lowerCamelCase )
__A : Optional[Any] = config.num_channels
__A : Any = TFRegNetConvLayer(
out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name="embedder" , )
def lowerCAmelCase__ ( self , lowerCamelCase ):
'''simple docstring'''
__A : Optional[int] = shape_list(lowerCamelCase )[1]
if tf.executing_eagerly() and num_channels != self.num_channels:
raise ValueError(
"Make sure that the channel dimension of the pixel values match with the one set in the configuration." )
# When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format.
# So change the input format from `NCHW` to `NHWC`.
# shape = (batch_size, in_height, in_width, in_channels=num_channels)
__A : Optional[Any] = tf.transpose(lowerCamelCase , perm=(0, 2, 3, 1) )
__A : int = self.embedder(lowerCamelCase )
return hidden_state
class __magic_name__ ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self , lowerCamelCase , lowerCamelCase = 2 , **lowerCamelCase ):
'''simple docstring'''
super().__init__(**lowerCamelCase )
__A : Dict = tf.keras.layers.ConvaD(
filters=lowerCamelCase , kernel_size=1 , strides=lowerCamelCase , use_bias=lowerCamelCase , name="convolution" )
__A : Dict = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name="normalization" )
def lowerCAmelCase__ ( self , lowerCamelCase , lowerCamelCase = False ):
'''simple docstring'''
return self.normalization(self.convolution(lowerCamelCase ) , training=lowerCamelCase )
class __magic_name__ ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self , lowerCamelCase , lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
super().__init__(**lowerCamelCase )
__A : List[Any] = tf.keras.layers.GlobalAveragePoolingaD(keepdims=lowerCamelCase , name="pooler" )
__A : List[str] = [
tf.keras.layers.ConvaD(filters=lowerCamelCase , kernel_size=1 , activation="relu" , name="attention.0" ),
tf.keras.layers.ConvaD(filters=lowerCamelCase , kernel_size=1 , activation="sigmoid" , name="attention.2" ),
]
def lowerCAmelCase__ ( self , lowerCamelCase ):
'''simple docstring'''
__A : Optional[Any] = self.pooler(lowerCamelCase )
for layer_module in self.attention:
__A : List[Any] = layer_module(lowerCamelCase )
__A : int = hidden_state * pooled
return hidden_state
class __magic_name__ ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = 1 , **lowerCamelCase ):
'''simple docstring'''
super().__init__(**lowerCamelCase )
__A : Optional[int] = in_channels != out_channels or stride != 1
__A : List[str] = max(1 , out_channels // config.groups_width )
__A : int = (
TFRegNetShortCut(lowerCamelCase , stride=lowerCamelCase , name="shortcut" )
if should_apply_shortcut
else tf.keras.layers.Activation("linear" , name="shortcut" )
)
# `self.layers` instead of `self.layer` because that is a reserved argument.
__A : Optional[Any] = [
TFRegNetConvLayer(lowerCamelCase , kernel_size=1 , activation=config.hidden_act , name="layer.0" ),
TFRegNetConvLayer(
lowerCamelCase , stride=lowerCamelCase , groups=lowerCamelCase , activation=config.hidden_act , name="layer.1" ),
TFRegNetConvLayer(lowerCamelCase , kernel_size=1 , activation=lowerCamelCase , name="layer.2" ),
]
__A : str = ACTaFN[config.hidden_act]
def lowerCAmelCase__ ( self , lowerCamelCase ):
'''simple docstring'''
__A : Optional[Any] = hidden_state
for layer_module in self.layers:
__A : Tuple = layer_module(lowerCamelCase )
__A : Union[str, Any] = self.shortcut(lowerCamelCase )
hidden_state += residual
__A : Tuple = self.activation(lowerCamelCase )
return hidden_state
class __magic_name__ ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = 1 , **lowerCamelCase ):
'''simple docstring'''
super().__init__(**lowerCamelCase )
__A : Union[str, Any] = in_channels != out_channels or stride != 1
__A : str = max(1 , out_channels // config.groups_width )
__A : Tuple = (
TFRegNetShortCut(lowerCamelCase , stride=lowerCamelCase , name="shortcut" )
if should_apply_shortcut
else tf.keras.layers.Activation("linear" , name="shortcut" )
)
__A : Tuple = [
TFRegNetConvLayer(lowerCamelCase , kernel_size=1 , activation=config.hidden_act , name="layer.0" ),
TFRegNetConvLayer(
lowerCamelCase , stride=lowerCamelCase , groups=lowerCamelCase , activation=config.hidden_act , name="layer.1" ),
TFRegNetSELayer(lowerCamelCase , reduced_channels=int(round(in_channels / 4 ) ) , name="layer.2" ),
TFRegNetConvLayer(lowerCamelCase , kernel_size=1 , activation=lowerCamelCase , name="layer.3" ),
]
__A : str = ACTaFN[config.hidden_act]
def lowerCAmelCase__ ( self , lowerCamelCase ):
'''simple docstring'''
__A : Tuple = hidden_state
for layer_module in self.layers:
__A : Optional[int] = layer_module(lowerCamelCase )
__A : int = self.shortcut(lowerCamelCase )
hidden_state += residual
__A : str = self.activation(lowerCamelCase )
return hidden_state
class __magic_name__ ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = 2 , lowerCamelCase = 2 , **lowerCamelCase ):
'''simple docstring'''
super().__init__(**lowerCamelCase )
__A : Dict = TFRegNetXLayer if config.layer_type == "x" else TFRegNetYLayer
__A : Tuple = [
# downsampling is done in the first layer with stride of 2
layer(lowerCamelCase , lowerCamelCase , lowerCamelCase , stride=lowerCamelCase , name="layers.0" ),
*[layer(lowerCamelCase , lowerCamelCase , lowerCamelCase , name=f"layers.{i+1}" ) for i in range(depth - 1 )],
]
def lowerCAmelCase__ ( self , lowerCamelCase ):
'''simple docstring'''
for layer_module in self.layers:
__A : Optional[int] = layer_module(lowerCamelCase )
return hidden_state
class __magic_name__ ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self , lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
super().__init__(**lowerCamelCase )
__A : List[Any] = []
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
TFRegNetStage(
lowerCamelCase , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name="stages.0" , ) )
__A : List[Any] = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for i, ((in_channels, out_channels), depth) in enumerate(zip(lowerCamelCase , config.depths[1:] ) ):
self.stages.append(TFRegNetStage(lowerCamelCase , lowerCamelCase , lowerCamelCase , depth=lowerCamelCase , name=f"stages.{i+1}" ) )
def lowerCAmelCase__ ( self , lowerCamelCase , lowerCamelCase = False , lowerCamelCase = True ):
'''simple docstring'''
__A : Optional[int] = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
__A : Any = hidden_states + (hidden_state,)
__A : List[Any] = stage_module(lowerCamelCase )
if output_hidden_states:
__A : Union[str, Any] = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return TFBaseModelOutputWithNoAttention(last_hidden_state=lowerCamelCase , hidden_states=lowerCamelCase )
@keras_serializable
class __magic_name__ ( tf.keras.layers.Layer ):
"""simple docstring"""
lowerCamelCase__ = RegNetConfig
def __init__( self , lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
super().__init__(**lowerCamelCase )
__A : List[str] = config
__A : List[Any] = TFRegNetEmbeddings(lowerCamelCase , name="embedder" )
__A : List[Any] = TFRegNetEncoder(lowerCamelCase , name="encoder" )
__A : List[str] = tf.keras.layers.GlobalAveragePoolingaD(keepdims=lowerCamelCase , name="pooler" )
@unpack_inputs
def lowerCAmelCase__ ( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = False , ):
'''simple docstring'''
__A : Tuple = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__A : str = return_dict if return_dict is not None else self.config.use_return_dict
__A : Tuple = self.embedder(lowerCamelCase , training=lowerCamelCase )
__A : Optional[int] = self.encoder(
lowerCamelCase , output_hidden_states=lowerCamelCase , return_dict=lowerCamelCase , training=lowerCamelCase )
__A : Optional[int] = encoder_outputs[0]
__A : Tuple = self.pooler(lowerCamelCase )
# Change to NCHW output format have uniformity in the modules
__A : Optional[Any] = tf.transpose(lowerCamelCase , perm=(0, 3, 1, 2) )
__A : Optional[int] = tf.transpose(lowerCamelCase , perm=(0, 3, 1, 2) )
# Change the other hidden state outputs to NCHW as well
if output_hidden_states:
__A : int = tuple([tf.transpose(lowerCamelCase , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=lowerCamelCase , pooler_output=lowerCamelCase , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , )
class __magic_name__ ( lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase__ = RegNetConfig
lowerCamelCase__ = 'regnet'
lowerCamelCase__ = 'pixel_values'
@property
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 224, 224) , dtype=tf.floataa )}
_UpperCamelCase = r"""
Parameters:
This model is a Tensorflow
[tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a
regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and
behavior.
config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.
"""
_UpperCamelCase = r"""
Args:
pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConveNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
'The bare RegNet model outputting raw features without any specific head on top.' , lowerCAmelCase , )
class __magic_name__ ( lowerCAmelCase ):
"""simple docstring"""
def __init__( self , lowerCamelCase , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
super().__init__(lowerCamelCase , *lowerCamelCase , **lowerCamelCase )
__A : int = TFRegNetMainLayer(lowerCamelCase , name="regnet" )
@unpack_inputs
@add_start_docstrings_to_model_forward(lowerCamelCase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowerCamelCase , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def lowerCAmelCase__ ( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase=False , ):
'''simple docstring'''
__A : Optional[int] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__A : str = return_dict if return_dict is not None else self.config.use_return_dict
__A : Tuple = self.regnet(
pixel_values=lowerCamelCase , output_hidden_states=lowerCamelCase , return_dict=lowerCamelCase , training=lowerCamelCase , )
if not return_dict:
return (outputs[0],) + outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , )
@add_start_docstrings(
'\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ' , lowerCAmelCase , )
class __magic_name__ ( lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
def __init__( self , lowerCamelCase , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
super().__init__(lowerCamelCase , *lowerCamelCase , **lowerCamelCase )
__A : List[str] = config.num_labels
__A : List[str] = TFRegNetMainLayer(lowerCamelCase , name="regnet" )
# classification head
__A : Dict = [
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(config.num_labels , name="classifier.1" ) if config.num_labels > 0 else tf.identity,
]
@unpack_inputs
@add_start_docstrings_to_model_forward(lowerCamelCase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowerCamelCase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def lowerCAmelCase__ ( self , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase=False , ):
'''simple docstring'''
__A : Tuple = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__A : Tuple = return_dict if return_dict is not None else self.config.use_return_dict
__A : List[Any] = self.regnet(
lowerCamelCase , output_hidden_states=lowerCamelCase , return_dict=lowerCamelCase , training=lowerCamelCase )
__A : Union[str, Any] = outputs.pooler_output if return_dict else outputs[1]
__A : int = self.classifier[0](lowerCamelCase )
__A : Dict = self.classifier[1](lowerCamelCase )
__A : List[Any] = None if labels is None else self.hf_compute_loss(labels=lowerCamelCase , logits=lowerCamelCase )
if not return_dict:
__A : Optional[int] = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(loss=lowerCamelCase , logits=lowerCamelCase , hidden_states=outputs.hidden_states )
| 111 |
'''simple docstring'''
# Function to print upper half of diamond (pyramid)
def _lowercase (SCREAMING_SNAKE_CASE ):
'''simple docstring'''
for i in range(0 , SCREAMING_SNAKE_CASE ):
for _ in range(0 , n - i - 1 ): # printing spaces
print(" " , end="" )
for _ in range(0 , i + 1 ): # printing stars
print("* " , end="" )
print()
def _lowercase (SCREAMING_SNAKE_CASE ):
'''simple docstring'''
for i in range(SCREAMING_SNAKE_CASE , 0 , -1 ):
for _ in range(SCREAMING_SNAKE_CASE , 0 , -1 ): # printing stars
print("* " , end="" )
print()
for _ in range(n - i + 1 , 0 , -1 ): # printing spaces
print(" " , end="" )
def _lowercase (SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if n <= 0:
print(" ... .... nothing printing :(" )
return
floyd(SCREAMING_SNAKE_CASE ) # upper half
reverse_floyd(SCREAMING_SNAKE_CASE ) # lower half
if __name__ == "__main__":
print(r"""| /\ | |- | |- |--| |\ /| |-""")
print(r"""|/ \| |- |_ |_ |__| | \/ | |_""")
_UpperCamelCase = 1
while K:
_UpperCamelCase = int(input("""enter the number and , and see the magic : """))
print()
pretty_print(user_number)
_UpperCamelCase = int(input("""press 0 to exit... and 1 to continue..."""))
print("""Good Bye...""")
| 111 | 1 |
'''simple docstring'''
def UpperCAmelCase_ ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
lowerCAmelCase__ : Optional[Any] = [False] * len(lowerCamelCase_ )
lowerCAmelCase__ : List[str] = []
queue.append(lowerCamelCase_ )
lowerCAmelCase__ : Optional[int] = True
while queue:
lowerCAmelCase__ : Dict = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(lowerCamelCase_ )
lowerCAmelCase__ : Dict = True
lowerCAmelCase__ : List[str] = u
return visited[t]
def UpperCAmelCase_ ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = [-1] * (len(lowerCamelCase_ ))
lowerCAmelCase__ : Tuple = 0
while bfs(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
lowerCAmelCase__ : int = float("Inf" )
lowerCAmelCase__ : Union[str, Any] = sink
while s != source:
# Find the minimum value in select path
lowerCAmelCase__ : Dict = min(lowerCamelCase_ , graph[parent[s]][s] )
lowerCAmelCase__ : Optional[int] = parent[s]
max_flow += path_flow
lowerCAmelCase__ : List[str] = sink
while v != source:
lowerCAmelCase__ : Optional[Any] = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
lowerCAmelCase__ : Optional[int] = parent[v]
return max_flow
snake_case = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
snake_case , snake_case = 0, 5
print(ford_fulkerson(graph, source, sink))
| 714 |
'''simple docstring'''
from __future__ import annotations
class lowerCAmelCase :
def __init__( self : List[str] , a__ : int ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = data
lowerCAmelCase__ : Node | None = None
lowerCAmelCase__ : Node | None = None
def UpperCAmelCase_ ( lowerCamelCase_ ): # In Order traversal of the tree
"""simple docstring"""
if tree:
display(tree.left )
print(tree.data )
display(tree.right )
def UpperCAmelCase_ ( lowerCamelCase_ ):
"""simple docstring"""
return 1 + max(depth_of_tree(tree.left ) , depth_of_tree(tree.right ) ) if tree else 0
def UpperCAmelCase_ ( lowerCamelCase_ ):
"""simple docstring"""
if not tree:
return True
if tree.left and tree.right:
return is_full_binary_tree(tree.left ) and is_full_binary_tree(tree.right )
else:
return not tree.left and not tree.right
def UpperCAmelCase_ ( ): # Main function for testing.
"""simple docstring"""
lowerCAmelCase__ : Tuple = Node(1 )
lowerCAmelCase__ : List[str] = Node(2 )
lowerCAmelCase__ : str = Node(3 )
lowerCAmelCase__ : Any = Node(4 )
lowerCAmelCase__ : Union[str, Any] = Node(5 )
lowerCAmelCase__ : int = Node(6 )
lowerCAmelCase__ : Any = Node(7 )
lowerCAmelCase__ : Any = Node(8 )
lowerCAmelCase__ : Optional[Any] = Node(9 )
print(is_full_binary_tree(lowerCamelCase_ ) )
print(depth_of_tree(lowerCamelCase_ ) )
print("Tree is: " )
display(lowerCamelCase_ )
if __name__ == "__main__":
main()
| 568 | 0 |
from __future__ import annotations
from collections.abc import Iterable, Iterator
from dataclasses import dataclass
A__ : Tuple = (3, 9, -1_1, 0, 7, 5, 1, -1)
A__ : List[str] = (4, 6, 2, 0, 8, 1_0, 3, -2)
@dataclass
class lowercase :
__a = 42
__a = 42
class lowercase :
def __init__( self , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
lowerCAmelCase__ : Node | None = None
for i in sorted(SCREAMING_SNAKE_CASE__ , reverse=SCREAMING_SNAKE_CASE__ ):
lowerCAmelCase__ : List[Any] = Node(SCREAMING_SNAKE_CASE__ , self.head )
def __iter__( self ):
"""simple docstring"""
lowerCAmelCase__ : Tuple = self.head
while node:
yield node.data
lowerCAmelCase__ : Dict = node.next_node
def __len__( self ):
"""simple docstring"""
return sum(1 for _ in self )
def __str__( self ):
"""simple docstring"""
return " -> ".join([str(SCREAMING_SNAKE_CASE__ ) for node in self] )
def _a ( __UpperCamelCase : SortedLinkedList ,__UpperCamelCase : SortedLinkedList ):
return SortedLinkedList(list(__UpperCamelCase ) + list(__UpperCamelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
A__ : Optional[int] = SortedLinkedList
print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
| 233 |
from bisect import bisect
from itertools import accumulate
def _a ( __UpperCamelCase : int ,__UpperCamelCase : Union[str, Any] ,__UpperCamelCase : Tuple ,__UpperCamelCase : List[Any] ):
lowerCAmelCase__ : int = sorted(zip(__UpperCamelCase ,__UpperCamelCase ) ,key=lambda __UpperCamelCase : x[0] / x[1] ,reverse=__UpperCamelCase )
lowerCAmelCase__ , lowerCAmelCase__ : Dict = [i[0] for i in r], [i[1] for i in r]
lowerCAmelCase__ : Tuple = list(accumulate(__UpperCamelCase ) )
lowerCAmelCase__ : List[Any] = bisect(__UpperCamelCase ,__UpperCamelCase )
return (
0
if k == 0
else sum(vl[:k] ) + (w - acc[k - 1]) * (vl[k]) / (wt[k])
if k != n
else sum(vl[:k] )
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 233 | 1 |
import warnings
from transformers import AutoTokenizer
from transformers.utils import is_torch_available
from transformers.utils.generic import ExplicitEnum
from ...processing_utils import ProcessorMixin
if is_torch_available():
import torch
class _lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ ='''char'''
lowerCAmelCase__ ='''bpe'''
lowerCAmelCase__ ='''wp'''
lowerCamelCase__ = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE)
class _lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ =['''image_processor''', '''char_tokenizer''']
lowerCAmelCase__ ='''ViTImageProcessor'''
lowerCAmelCase__ ='''MgpstrTokenizer'''
def __init__( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
snake_case__ : str =None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , __SCREAMING_SNAKE_CASE , )
snake_case__ : Any =kwargs.pop('''feature_extractor''' )
snake_case__ : Union[str, Any] =image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
snake_case__ : Dict =tokenizer
snake_case__ : Optional[Any] =AutoTokenizer.from_pretrained('''gpt2''' )
snake_case__ : int =AutoTokenizer.from_pretrained('''bert-base-uncased''' )
super().__init__(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def __call__( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
if images is None and text is None:
raise ValueError('''You need to specify either an `images` or `text` input to process.''' )
if images is not None:
snake_case__ : str =self.image_processor(__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
if text is not None:
snake_case__ : int =self.char_tokenizer(__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
if text is None:
return inputs
elif images is None:
return encodings
else:
snake_case__ : Dict =encodings['''input_ids''']
return inputs
def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
snake_case__ : List[str] =sequences
snake_case__ : Tuple =char_preds.size(0 )
snake_case__ : str =self._decode_helper(__SCREAMING_SNAKE_CASE , '''char''' )
snake_case__ : List[str] =self._decode_helper(__SCREAMING_SNAKE_CASE , '''bpe''' )
snake_case__ : Any =self._decode_helper(__SCREAMING_SNAKE_CASE , '''wp''' )
snake_case__ : List[str] =[]
snake_case__ : List[Any] =[]
for i in range(__SCREAMING_SNAKE_CASE ):
snake_case__ : Optional[int] =[char_scores[i], bpe_scores[i], wp_scores[i]]
snake_case__ : int =[char_strs[i], bpe_strs[i], wp_strs[i]]
snake_case__ : Optional[Any] =scores.index(max(__SCREAMING_SNAKE_CASE ) )
final_strs.append(strs[max_score_index] )
final_scores.append(scores[max_score_index] )
snake_case__ : Optional[Any] ={}
snake_case__ : List[str] =final_strs
snake_case__ : Optional[int] =final_scores
snake_case__ : Optional[Any] =char_strs
snake_case__ : Dict =bpe_strs
snake_case__ : List[str] =wp_strs
return out
def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
if format == DecodeType.CHARACTER:
snake_case__ : Dict =self.char_decode
snake_case__ : List[Any] =1
snake_case__ : List[str] ='''[s]'''
elif format == DecodeType.BPE:
snake_case__ : str =self.bpe_decode
snake_case__ : str =2
snake_case__ : Tuple ='''#'''
elif format == DecodeType.WORDPIECE:
snake_case__ : Optional[Any] =self.wp_decode
snake_case__ : Optional[Any] =102
snake_case__ : List[str] ='''[SEP]'''
else:
raise ValueError(f'''Format {format} is not supported.''' )
snake_case__ : int =[], []
snake_case__ : str =pred_logits.size(0 )
snake_case__ : Optional[Any] =pred_logits.size(1 )
snake_case__ : Optional[int] =pred_logits.topk(1 , dim=-1 , largest=__SCREAMING_SNAKE_CASE , sorted=__SCREAMING_SNAKE_CASE )
snake_case__ : int =preds_index.view(-1 , __SCREAMING_SNAKE_CASE )[:, 1:]
snake_case__ : List[Any] =decoder(__SCREAMING_SNAKE_CASE )
snake_case__ : Dict =torch.nn.functional.softmax(__SCREAMING_SNAKE_CASE , dim=2 ).max(dim=2 )
snake_case__ : Dict =preds_max_prob[:, 1:]
for index in range(__SCREAMING_SNAKE_CASE ):
snake_case__ : List[str] =preds_str[index].find(__SCREAMING_SNAKE_CASE )
snake_case__ : List[Any] =preds_str[index][:pred_eos]
snake_case__ : int =preds_index[index].cpu().tolist()
snake_case__ : Tuple =pred_index.index(__SCREAMING_SNAKE_CASE ) if eos_token in pred_index else -1
snake_case__ : Any =preds_max_prob[index][: pred_eos_index + 1]
snake_case__ : Optional[int] =pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0
dec_strs.append(__SCREAMING_SNAKE_CASE )
conf_scores.append(__SCREAMING_SNAKE_CASE )
return dec_strs, conf_scores
def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
"""simple docstring"""
snake_case__ : List[str] =[seq.replace(''' ''' , '''''' ) for seq in self.char_tokenizer.batch_decode(__SCREAMING_SNAKE_CASE )]
return decode_strs
def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
"""simple docstring"""
return self.bpe_tokenizer.batch_decode(__SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
snake_case__ : List[str] =[seq.replace(''' ''' , '''''' ) for seq in self.wp_tokenizer.batch_decode(__SCREAMING_SNAKE_CASE )]
return decode_strs
| 712 |
def lowercase_ ( SCREAMING_SNAKE_CASE : list[int] , SCREAMING_SNAKE_CASE : list[int] ):
"""simple docstring"""
# Check if the input is valid
if not len(SCREAMING_SNAKE_CASE ) == len(SCREAMING_SNAKE_CASE ) == 3:
raise ValueError('''Please enter a valid equation.''' )
if equationa[0] == equationa[1] == equationa[0] == equationa[1] == 0:
raise ValueError('''Both a & b of two equations can\'t be zero.''' )
# Extract the coefficients
snake_case__, snake_case__, snake_case__ : List[Any] =equationa
snake_case__, snake_case__, snake_case__ : Any =equationa
# Calculate the determinants of the matrices
snake_case__ : str =aa * ba - aa * ba
snake_case__ : Any =ca * ba - ca * ba
snake_case__ : Any =aa * ca - aa * ca
# Check if the system of linear equations has a solution (using Cramer's rule)
if determinant == 0:
if determinant_x == determinant_y == 0:
raise ValueError('''Infinite solutions. (Consistent system)''' )
else:
raise ValueError('''No solution. (Inconsistent system)''' )
else:
if determinant_x == determinant_y == 0:
# Trivial solution (Inconsistent system)
return (0.0, 0.0)
else:
snake_case__ : str =determinant_x / determinant
snake_case__ : List[str] =determinant_y / determinant
# Non-Trivial Solution (Consistent system)
return (x, y)
| 408 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
"naver-clova-ix/donut-base": "https://huggingface.co/naver-clova-ix/donut-base/resolve/main/config.json",
# See all Donut models at https://huggingface.co/models?filter=donut-swin
}
class SCREAMING_SNAKE_CASE ( a ):
"""simple docstring"""
a_ : Tuple ="donut-swin"
a_ : int ={
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self : Optional[Any] , _snake_case : Dict=224 , _snake_case : Any=4 , _snake_case : Union[str, Any]=3 , _snake_case : List[str]=96 , _snake_case : Union[str, Any]=[2, 2, 6, 2] , _snake_case : int=[3, 6, 12, 24] , _snake_case : List[str]=7 , _snake_case : List[str]=4.0 , _snake_case : Union[str, Any]=True , _snake_case : Union[str, Any]=0.0 , _snake_case : List[Any]=0.0 , _snake_case : int=0.1 , _snake_case : List[Any]="gelu" , _snake_case : str=False , _snake_case : int=0.02 , _snake_case : Dict=1E-5 , **_snake_case : Any , ) -> List[str]:
'''simple docstring'''
super().__init__(**_snake_case )
a__ = image_size
a__ = patch_size
a__ = num_channels
a__ = embed_dim
a__ = depths
a__ = len(_snake_case )
a__ = num_heads
a__ = window_size
a__ = mlp_ratio
a__ = qkv_bias
a__ = hidden_dropout_prob
a__ = attention_probs_dropout_prob
a__ = drop_path_rate
a__ = hidden_act
a__ = use_absolute_embeddings
a__ = layer_norm_eps
a__ = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
a__ = int(embed_dim * 2 ** (len(_snake_case ) - 1) )
| 232 | """simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
HubertConfig,
HubertForCTC,
HubertModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
}
def _lowerCamelCase ( UpperCAmelCase__,UpperCAmelCase__,UpperCAmelCase__,UpperCAmelCase__,UpperCAmelCase__ ) -> int:
'''simple docstring'''
for attribute in key.split('.' ):
a__ = getattr(UpperCAmelCase__,UpperCAmelCase__ )
if weight_type is not None:
a__ = getattr(UpperCAmelCase__,UpperCAmelCase__ ).shape
else:
a__ = hf_pointer.shape
assert hf_shape == value.shape, (
f'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
a__ = value
elif weight_type == "weight_g":
a__ = value
elif weight_type == "weight_v":
a__ = value
elif weight_type == "bias":
a__ = value
else:
a__ = value
logger.info(f'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def _lowerCamelCase ( UpperCAmelCase__,UpperCAmelCase__,UpperCAmelCase__ ) -> int:
'''simple docstring'''
a__ = []
a__ = fairseq_model.state_dict()
a__ = hf_model.hubert.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
a__ = False
if "conv_layers" in name:
load_conv_layer(
UpperCAmelCase__,UpperCAmelCase__,UpperCAmelCase__,UpperCAmelCase__,hf_model.config.feat_extract_norm == 'group',)
a__ = True
else:
for key, mapped_key in MAPPING.items():
a__ = 'hubert.' + mapped_key if (is_finetuned and mapped_key != 'lm_head') else mapped_key
if key in name or (key.split('w2v_model.' )[-1] == name.split('.' )[0] and not is_finetuned):
a__ = True
if "*" in mapped_key:
a__ = name.split(UpperCAmelCase__ )[0].split('.' )[-2]
a__ = mapped_key.replace('*',UpperCAmelCase__ )
if "weight_g" in name:
a__ = 'weight_g'
elif "weight_v" in name:
a__ = 'weight_v'
elif "weight" in name:
a__ = 'weight'
elif "bias" in name:
a__ = 'bias'
else:
a__ = None
set_recursively(UpperCAmelCase__,UpperCAmelCase__,UpperCAmelCase__,UpperCAmelCase__,UpperCAmelCase__ )
continue
if not is_used:
unused_weights.append(UpperCAmelCase__ )
logger.warning(f'''Unused weights: {unused_weights}''' )
def _lowerCamelCase ( UpperCAmelCase__,UpperCAmelCase__,UpperCAmelCase__,UpperCAmelCase__,UpperCAmelCase__ ) -> str:
'''simple docstring'''
a__ = full_name.split('conv_layers.' )[-1]
a__ = name.split('.' )
a__ = int(items[0] )
a__ = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
a__ = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
a__ = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
a__ = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
a__ = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(UpperCAmelCase__ )
@torch.no_grad()
def _lowerCamelCase ( UpperCAmelCase__,UpperCAmelCase__,UpperCAmelCase__=None,UpperCAmelCase__=None,UpperCAmelCase__=True ) -> int:
'''simple docstring'''
if config_path is not None:
a__ = HubertConfig.from_pretrained(UpperCAmelCase__ )
else:
a__ = HubertConfig()
if is_finetuned:
if dict_path:
a__ = Dictionary.load(UpperCAmelCase__ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
a__ = target_dict.pad_index
a__ = target_dict.bos_index
a__ = target_dict.eos_index
a__ = len(target_dict.symbols )
a__ = os.path.join(UpperCAmelCase__,'vocab.json' )
if not os.path.isdir(UpperCAmelCase__ ):
logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(UpperCAmelCase__ ) )
return
os.makedirs(UpperCAmelCase__,exist_ok=UpperCAmelCase__ )
with open(UpperCAmelCase__,'w',encoding='utf-8' ) as vocab_handle:
json.dump(target_dict.indices,UpperCAmelCase__ )
a__ = WavaVecaCTCTokenizer(
UpperCAmelCase__,unk_token=target_dict.unk_word,pad_token=target_dict.pad_word,bos_token=target_dict.bos_word,eos_token=target_dict.eos_word,word_delimiter_token='|',do_lower_case=UpperCAmelCase__,)
a__ = True if config.feat_extract_norm == 'layer' else False
a__ = WavaVecaFeatureExtractor(
feature_size=1,sampling_rate=1_60_00,padding_value=0,do_normalize=UpperCAmelCase__,return_attention_mask=UpperCAmelCase__,)
a__ = WavaVecaProcessor(feature_extractor=UpperCAmelCase__,tokenizer=UpperCAmelCase__ )
processor.save_pretrained(UpperCAmelCase__ )
a__ = HubertForCTC(UpperCAmelCase__ )
else:
a__ = HubertModel(UpperCAmelCase__ )
if is_finetuned:
a__ , a__ , a__ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path],arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
else:
a__ , a__ , a__ = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
a__ = model[0].eval()
recursively_load_weights(UpperCAmelCase__,UpperCAmelCase__,UpperCAmelCase__ )
hf_wavavec.save_pretrained(UpperCAmelCase__ )
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
__magic_name__ = parser.parse_args()
convert_hubert_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 232 | 1 |
import math
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import SchedulerMixin, SchedulerOutput
class lowercase__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
A__= 1
@register_to_config
def __init__( self : Optional[Any] , _lowercase : int = 10_00 , _lowercase : Optional[Union[np.ndarray, List[float]]] = None ):
"""simple docstring"""
self.set_timesteps(_lowercase )
# standard deviation of the initial noise distribution
UpperCAmelCase__ = 1.0
# For now we only support F-PNDM, i.e. the runge-kutta method
# For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf
# mainly at formula (9), (12), (13) and the Algorithm 2.
UpperCAmelCase__ = 4
# running values
UpperCAmelCase__ = []
def _UpperCAmelCase ( self : List[Any] , _lowercase : int , _lowercase : Union[str, torch.device] = None ):
"""simple docstring"""
UpperCAmelCase__ = num_inference_steps
UpperCAmelCase__ = torch.linspace(1 , 0 , num_inference_steps + 1 )[:-1]
UpperCAmelCase__ = torch.cat([steps, torch.tensor([0.0] )] )
if self.config.trained_betas is not None:
UpperCAmelCase__ = torch.tensor(self.config.trained_betas , dtype=torch.floataa )
else:
UpperCAmelCase__ = torch.sin(steps * math.pi / 2 ) ** 2
UpperCAmelCase__ = (1.0 - self.betas**2) ** 0.5
UpperCAmelCase__ = (torch.atana(self.betas , self.alphas ) / math.pi * 2)[:-1]
UpperCAmelCase__ = timesteps.to(_lowercase )
UpperCAmelCase__ = []
def _UpperCAmelCase ( self : str , _lowercase : torch.FloatTensor , _lowercase : int , _lowercase : torch.FloatTensor , _lowercase : bool = True , ):
"""simple docstring"""
if self.num_inference_steps is None:
raise ValueError(
"Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler" )
UpperCAmelCase__ = (self.timesteps == timestep).nonzero().item()
UpperCAmelCase__ = timestep_index + 1
UpperCAmelCase__ = sample * self.betas[timestep_index] + model_output * self.alphas[timestep_index]
self.ets.append(_lowercase )
if len(self.ets ) == 1:
UpperCAmelCase__ = self.ets[-1]
elif len(self.ets ) == 2:
UpperCAmelCase__ = (3 * self.ets[-1] - self.ets[-2]) / 2
elif len(self.ets ) == 3:
UpperCAmelCase__ = (23 * self.ets[-1] - 16 * self.ets[-2] + 5 * self.ets[-3]) / 12
else:
UpperCAmelCase__ = (1 / 24) * (55 * self.ets[-1] - 59 * self.ets[-2] + 37 * self.ets[-3] - 9 * self.ets[-4])
UpperCAmelCase__ = self._get_prev_sample(_lowercase , _lowercase , _lowercase , _lowercase )
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=_lowercase )
def _UpperCAmelCase ( self : Dict , _lowercase : torch.FloatTensor , *_lowercase : List[str] , **_lowercase : Optional[Any] ):
"""simple docstring"""
return sample
def _UpperCAmelCase ( self : Optional[int] , _lowercase : List[Any] , _lowercase : str , _lowercase : Optional[int] , _lowercase : Optional[Any] ):
"""simple docstring"""
UpperCAmelCase__ = self.alphas[timestep_index]
UpperCAmelCase__ = self.betas[timestep_index]
UpperCAmelCase__ = self.alphas[prev_timestep_index]
UpperCAmelCase__ = self.betas[prev_timestep_index]
UpperCAmelCase__ = (sample - sigma * ets) / max(_lowercase , 1E-8 )
UpperCAmelCase__ = next_alpha * pred + ets * next_sigma
return prev_sample
def __len__( self : List[str] ):
"""simple docstring"""
return self.config.num_train_timesteps
| 277 |
from .imports import is_tqdm_available
if is_tqdm_available():
from tqdm.auto import tqdm as _tqdm
from ..state import PartialState
def __UpperCAmelCase ( __A = True , *__A , **__A ) -> Any:
'''simple docstring'''
if not is_tqdm_available():
raise ImportError("Accelerate's `tqdm` module requires `tqdm` to be installed. Please run `pip install tqdm`." )
UpperCAmelCase__ = False
if main_process_only:
UpperCAmelCase__ = PartialState().local_process_index == 0
return _tqdm(*__A , **__A , disable=__A )
| 277 | 1 |
"""simple docstring"""
import pytest
import requests
from datasets.utils.file_utils import http_head
from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline
@pytest.mark.integration
def __lowercase ( ):
with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ):
with pytest.raises(_a ):
requests.request('''GET''' , '''https://huggingface.co''' )
with pytest.raises(requests.exceptions.ConnectTimeout ):
requests.request('''GET''' , '''https://huggingface.co''' , timeout=1.0 )
@pytest.mark.integration
def __lowercase ( ):
with offline(OfflineSimulationMode.CONNECTION_FAILS ):
with pytest.raises(requests.exceptions.ConnectionError ):
requests.request('''GET''' , '''https://huggingface.co''' )
def __lowercase ( ):
with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ):
with pytest.raises(_a ):
http_head('''https://huggingface.co''' )
| 123 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _UpperCAmelCase ( lowerCAmelCase__ , unittest.TestCase):
_lowerCAmelCase : List[str] = KandinskyInpaintPipeline
_lowerCAmelCase : Tuple = ["""prompt""", """image_embeds""", """negative_image_embeds""", """image""", """mask_image"""]
_lowerCAmelCase : Optional[int] = [
"""prompt""",
"""negative_prompt""",
"""image_embeds""",
"""negative_image_embeds""",
"""image""",
"""mask_image""",
]
_lowerCAmelCase : Union[str, Any] = [
"""generator""",
"""height""",
"""width""",
"""latents""",
"""guidance_scale""",
"""negative_prompt""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
_lowerCAmelCase : List[str] = False
@property
def _snake_case ( self : Optional[Any] ):
return 32
@property
def _snake_case ( self : List[Any] ):
return 32
@property
def _snake_case ( self : List[Any] ):
return self.time_input_dim
@property
def _snake_case ( self : Any ):
return self.time_input_dim * 4
@property
def _snake_case ( self : Any ):
return 100
@property
def _snake_case ( self : str ):
snake_case_ : str = XLMRobertaTokenizerFast.from_pretrained('''YiYiXu/tiny-random-mclip-base''' )
return tokenizer
@property
def _snake_case ( self : Dict ):
torch.manual_seed(0 )
snake_case_ : Union[str, Any] = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1005 , )
snake_case_ : Tuple = MultilingualCLIP(lowercase_ )
snake_case_ : List[Any] = text_encoder.eval()
return text_encoder
@property
def _snake_case ( self : int ):
torch.manual_seed(0 )
snake_case_ : Dict = {
'''in_channels''': 9,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''text_image''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''text_image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
snake_case_ : Union[str, Any] = UNetaDConditionModel(**lowercase_ )
return model
@property
def _snake_case ( self : int ):
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def _snake_case ( self : Optional[int] ):
torch.manual_seed(0 )
snake_case_ : Any = VQModel(**self.dummy_movq_kwargs )
return model
def _snake_case ( self : Optional[int] ):
snake_case_ : Any = self.dummy_text_encoder
snake_case_ : Optional[int] = self.dummy_tokenizer
snake_case_ : Any = self.dummy_unet
snake_case_ : Tuple = self.dummy_movq
snake_case_ : List[Any] = DDIMScheduler(
num_train_timesteps=1000 , beta_schedule='''linear''' , beta_start=0.0_00_85 , beta_end=0.0_12 , clip_sample=lowercase_ , set_alpha_to_one=lowercase_ , steps_offset=1 , prediction_type='''epsilon''' , thresholding=lowercase_ , )
snake_case_ : int = {
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def _snake_case ( self : int , lowercase_ : List[Any] , lowercase_ : Union[str, Any]=0 ):
snake_case_ : List[Any] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(lowercase_ ) ).to(lowercase_ )
snake_case_ : Optional[int] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(lowercase_ )
# create init_image
snake_case_ : Union[str, Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(lowercase_ ) ).to(lowercase_ )
snake_case_ : int = image.cpu().permute(0 , 2 , 3 , 1 )[0]
snake_case_ : Any = Image.fromarray(np.uinta(lowercase_ ) ).convert('''RGB''' ).resize((256, 256) )
# create mask
snake_case_ : Tuple = np.ones((64, 64) , dtype=np.floataa )
snake_case_ : Dict = 0
if str(lowercase_ ).startswith('''mps''' ):
snake_case_ : List[Any] = torch.manual_seed(lowercase_ )
else:
snake_case_ : List[str] = torch.Generator(device=lowercase_ ).manual_seed(lowercase_ )
snake_case_ : List[Any] = {
'''prompt''': '''horse''',
'''image''': init_image,
'''mask_image''': mask,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''num_inference_steps''': 2,
'''guidance_scale''': 4.0,
'''output_type''': '''np''',
}
return inputs
def _snake_case ( self : str ):
snake_case_ : Any = '''cpu'''
snake_case_ : List[str] = self.get_dummy_components()
snake_case_ : Tuple = self.pipeline_class(**lowercase_ )
snake_case_ : int = pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
snake_case_ : str = pipe(**self.get_dummy_inputs(lowercase_ ) )
snake_case_ : int = output.images
snake_case_ : Tuple = pipe(
**self.get_dummy_inputs(lowercase_ ) , return_dict=lowercase_ , )[0]
snake_case_ : Optional[int] = image[0, -3:, -3:, -1]
snake_case_ : Tuple = image_from_tuple[0, -3:, -3:, -1]
print(f"image.shape {image.shape}" )
assert image.shape == (1, 64, 64, 3)
snake_case_ : str = np.array(
[0.8_32_69_19, 0.73_79_04_67, 0.20_91_85_81, 0.9_30_96_12, 0.5_51_17_91, 0.43_71_33_28, 0.5_51_33_21, 0.49_92_29_34, 0.59_49_77_86] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), f" expected_slice {expected_slice}, but got {image_slice.flatten()}"
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
def _snake_case ( self : Any ):
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase):
def _snake_case ( self : Any ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _snake_case ( self : Dict ):
snake_case_ : str = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy''' )
snake_case_ : List[Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' )
snake_case_ : int = np.ones((768, 768) , dtype=np.floataa )
snake_case_ : str = 0
snake_case_ : Tuple = '''a hat'''
snake_case_ : Any = KandinskyPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-1-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(lowercase_ )
snake_case_ : List[str] = KandinskyInpaintPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-1-inpaint''' , torch_dtype=torch.floataa )
snake_case_ : Optional[Any] = pipeline.to(lowercase_ )
pipeline.set_progress_bar_config(disable=lowercase_ )
snake_case_ : Union[str, Any] = torch.Generator(device='''cpu''' ).manual_seed(0 )
snake_case_, snake_case_ : Tuple = pipe_prior(
lowercase_ , generator=lowercase_ , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple()
snake_case_ : Any = pipeline(
lowercase_ , image=lowercase_ , mask_image=lowercase_ , image_embeds=lowercase_ , negative_image_embeds=lowercase_ , generator=lowercase_ , num_inference_steps=100 , height=768 , width=768 , output_type='''np''' , )
snake_case_ : List[Any] = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(lowercase_ , lowercase_ )
| 123 | 1 |
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ = False ) -> Union[str, Any]:
"""simple docstring"""
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
A__ = f"""Expected string as input, found {type(lowerCamelCase_ )}"""
raise ValueError(lowerCamelCase_ )
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
A__ = f"""Expected boolean as use_pascal parameter, found {type(lowerCamelCase_ )}"""
raise ValueError(lowerCamelCase_ )
A__ = input_str.split('''_''' )
A__ = 0 if use_pascal else 1
A__ = words[start_index:]
A__ = [word[0].upper() + word[1:] for word in words_to_capitalize]
A__ = '''''' if use_pascal else words[0]
return "".join([initial_word, *capitalized_words] )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 712 |
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> int:
"""simple docstring"""
A__ = 1
for i in range(1 , num + 1 ):
fact *= i
return fact
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> int:
"""simple docstring"""
A__ = 0
while number > 0:
A__ = number % 10
sum_of_digits += last_digit
A__ = number // 10 # Removing the last_digit from the given number
return sum_of_digits
def SCREAMING_SNAKE_CASE ( lowercase_ = 100 ) -> int:
"""simple docstring"""
A__ = factorial(lowercase_ )
A__ = split_and_add(lowercase_ )
return result
if __name__ == "__main__":
print(solution(int(input("""Enter the Number: """).strip())))
| 177 | 0 |
"""simple docstring"""
import argparse
import torch
from transformers import (
UniSpeechSatConfig,
UniSpeechSatForAudioFrameClassification,
UniSpeechSatForSequenceClassification,
UniSpeechSatForXVector,
WavaVecaFeatureExtractor,
logging,
)
logging.set_verbosity_info()
lowerCAmelCase__ = logging.get_logger(__name__)
def snake_case_ ( A_ : List[Any], A_ : int, A_ : List[Any] ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = UniSpeechSatForSequenceClassification.from_pretrained(A_, config=A_ )
_lowerCamelCase : List[str] = downstream_dict['''projector.weight''']
_lowerCamelCase : Union[str, Any] = downstream_dict['''projector.bias''']
_lowerCamelCase : Union[str, Any] = downstream_dict['''model.post_net.linear.weight''']
_lowerCamelCase : int = downstream_dict['''model.post_net.linear.bias''']
return model
def snake_case_ ( A_ : List[str], A_ : Optional[Any], A_ : Dict ):
'''simple docstring'''
_lowerCamelCase : int = UniSpeechSatForAudioFrameClassification.from_pretrained(A_, config=A_ )
_lowerCamelCase : Dict = downstream_dict['''model.linear.weight''']
_lowerCamelCase : int = downstream_dict['''model.linear.bias''']
return model
def snake_case_ ( A_ : int, A_ : str, A_ : Tuple ):
'''simple docstring'''
_lowerCamelCase : List[Any] = UniSpeechSatForXVector.from_pretrained(A_, config=A_ )
_lowerCamelCase : Tuple = downstream_dict['''connector.weight''']
_lowerCamelCase : Any = downstream_dict['''connector.bias''']
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
_lowerCamelCase : Dict = downstream_dict[
F'''model.framelevel_feature_extractor.module.{i}.kernel.weight'''
]
_lowerCamelCase : int = downstream_dict[F'''model.framelevel_feature_extractor.module.{i}.kernel.bias''']
_lowerCamelCase : str = downstream_dict['''model.utterancelevel_feature_extractor.linear1.weight''']
_lowerCamelCase : Optional[Any] = downstream_dict['''model.utterancelevel_feature_extractor.linear1.bias''']
_lowerCamelCase : List[str] = downstream_dict['''model.utterancelevel_feature_extractor.linear2.weight''']
_lowerCamelCase : int = downstream_dict['''model.utterancelevel_feature_extractor.linear2.bias''']
_lowerCamelCase : Dict = downstream_dict['''objective.W''']
return model
@torch.no_grad()
def snake_case_ ( A_ : Union[str, Any], A_ : Optional[int], A_ : Union[str, Any], A_ : str ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = torch.load(A_, map_location='''cpu''' )
_lowerCamelCase : str = checkpoint['''Downstream''']
_lowerCamelCase : Any = UniSpeechSatConfig.from_pretrained(A_ )
_lowerCamelCase : Tuple = WavaVecaFeatureExtractor.from_pretrained(
A_, return_attention_mask=A_, do_normalize=A_ )
_lowerCamelCase : Tuple = hf_config.architectures[0]
if arch.endswith('''ForSequenceClassification''' ):
_lowerCamelCase : Union[str, Any] = convert_classification(A_, A_, A_ )
elif arch.endswith('''ForAudioFrameClassification''' ):
_lowerCamelCase : int = convert_diarization(A_, A_, A_ )
elif arch.endswith('''ForXVector''' ):
_lowerCamelCase : Tuple = convert_xvector(A_, A_, A_ )
else:
raise NotImplementedError(F'''S3PRL weights conversion is not supported for {arch}''' )
if hf_config.use_weighted_layer_sum:
_lowerCamelCase : Tuple = checkpoint['''Featurizer''']['''weights''']
hf_feature_extractor.save_pretrained(A_ )
hf_model.save_pretrained(A_ )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument(
'''--base_model_name''', default=None, type=str, help='''Name of the huggingface pretrained base model.'''
)
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to the huggingface classifier config.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to the s3prl checkpoint.''')
parser.add_argument('''--model_dump_path''', default=None, type=str, help='''Path to the final converted model.''')
lowerCAmelCase__ = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 83 |
"""simple docstring"""
import argparse
import json
import subprocess
def UpperCamelCase ( _lowerCAmelCase : Optional[Any], _lowerCAmelCase : Optional[int] ) -> Union[str, Any]:
_UpperCAmelCase : Tuple = []
_UpperCAmelCase : Dict = (
f'''curl -H "Accept: application/vnd.github+json" -H "Authorization: Bearer {token}"'''
""" https://api.github.com/repos/huggingface/transformers/actions/runners"""
)
_UpperCAmelCase : List[str] = subprocess.run(_lowerCAmelCase, shell=_lowerCAmelCase, stdout=subprocess.PIPE )
_UpperCAmelCase : List[str] = output.stdout.decode("""utf-8""" )
_UpperCAmelCase : Optional[int] = json.loads(_lowerCAmelCase )
_UpperCAmelCase : Dict = status["""runners"""]
for runner in runners:
if runner["name"] in target_runners:
if runner["status"] == "offline":
offline_runners.append(_lowerCAmelCase )
# save the result so we can report them on Slack
with open("""offline_runners.txt""", """w""" ) as fp:
fp.write(json.dumps(_lowerCAmelCase ) )
if len(_lowerCAmelCase ) > 0:
_UpperCAmelCase : List[str] = """\n""".join([x["""name"""] for x in offline_runners] )
raise ValueError(f'''The following runners are offline:\n{failed}''' )
if __name__ == "__main__":
def UpperCamelCase ( _lowerCAmelCase : List[Any] ) -> Any:
return values.split(""",""" )
lowerCamelCase__ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--target_runners''',
default=None,
type=list_str,
required=True,
help='''Comma-separated list of runners to check status.''',
)
parser.add_argument(
'''--token''', default=None, type=str, required=True, help='''A token that has actions:read permission.'''
)
lowerCamelCase__ : Dict = parser.parse_args()
get_runner_status(args.target_runners, args.token)
| 238 | 0 |
'''simple docstring'''
class lowerCAmelCase__ :
def __init__( self : Dict , lowerCamelCase__ : int ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase : Tuple = n
_UpperCAmelCase : List[str] = [None] * self.n
_UpperCAmelCase : List[str] = 0 # index of the first element
_UpperCAmelCase : Dict = 0
_UpperCAmelCase : List[Any] = 0
def __len__( self : Optional[int] ) ->int:
'''simple docstring'''
return self.size
def lowerCAmelCase__ ( self : Any ) ->bool:
'''simple docstring'''
return self.size == 0
def lowerCAmelCase__ ( self : Union[str, Any] ) ->Optional[int]:
'''simple docstring'''
return False if self.is_empty() else self.array[self.front]
def lowerCAmelCase__ ( self : Union[str, Any] , lowerCamelCase__ : Optional[Any] ) ->Union[str, Any]:
'''simple docstring'''
if self.size >= self.n:
raise Exception("QUEUE IS FULL" )
_UpperCAmelCase : List[str] = data
_UpperCAmelCase : Dict = (self.rear + 1) % self.n
self.size += 1
return self
def lowerCAmelCase__ ( self : Any ) ->int:
'''simple docstring'''
if self.size == 0:
raise Exception("UNDERFLOW" )
_UpperCAmelCase : Dict = self.array[self.front]
_UpperCAmelCase : str = None
_UpperCAmelCase : List[Any] = (self.front + 1) % self.n
self.size -= 1
return temp
| 40 |
'''simple docstring'''
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowerCAmelCase__ ( UpperCAmelCase__ ):
lowerCAmelCase : Any = ["image_processor", "tokenizer"]
lowerCAmelCase : List[Any] = "BlipImageProcessor"
lowerCAmelCase : Union[str, Any] = "AutoTokenizer"
def __init__( self : Any , lowerCamelCase__ : Tuple , lowerCamelCase__ : int ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = False
super().__init__(lowerCamelCase__ , lowerCamelCase__ )
_UpperCAmelCase : Tuple = self.image_processor
def __call__( self : Dict , lowerCamelCase__ : ImageInput = None , lowerCamelCase__ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , lowerCamelCase__ : bool = True , lowerCamelCase__ : Union[bool, str, PaddingStrategy] = False , lowerCamelCase__ : Union[bool, str, TruncationStrategy] = None , lowerCamelCase__ : Optional[int] = None , lowerCamelCase__ : int = 0 , lowerCamelCase__ : Optional[int] = None , lowerCamelCase__ : Optional[bool] = None , lowerCamelCase__ : bool = False , lowerCamelCase__ : bool = False , lowerCamelCase__ : bool = False , lowerCamelCase__ : bool = False , lowerCamelCase__ : bool = False , lowerCamelCase__ : bool = True , lowerCamelCase__ : Optional[Union[str, TensorType]] = None , **lowerCamelCase__ : Tuple , ) ->BatchEncoding:
'''simple docstring'''
if images is None and text is None:
raise ValueError("You have to specify either images or text." )
# Get only text
if images is None:
_UpperCAmelCase : Optional[int] = self.tokenizer
_UpperCAmelCase : List[Any] = self.tokenizer(
text=lowerCamelCase__ , add_special_tokens=lowerCamelCase__ , padding=lowerCamelCase__ , truncation=lowerCamelCase__ , max_length=lowerCamelCase__ , stride=lowerCamelCase__ , pad_to_multiple_of=lowerCamelCase__ , return_attention_mask=lowerCamelCase__ , return_overflowing_tokens=lowerCamelCase__ , return_special_tokens_mask=lowerCamelCase__ , return_offsets_mapping=lowerCamelCase__ , return_token_type_ids=lowerCamelCase__ , return_length=lowerCamelCase__ , verbose=lowerCamelCase__ , return_tensors=lowerCamelCase__ , **lowerCamelCase__ , )
return text_encoding
# add pixel_values
_UpperCAmelCase : Optional[int] = self.image_processor(lowerCamelCase__ , return_tensors=lowerCamelCase__ )
if text is not None:
_UpperCAmelCase : Dict = self.tokenizer(
text=lowerCamelCase__ , add_special_tokens=lowerCamelCase__ , padding=lowerCamelCase__ , truncation=lowerCamelCase__ , max_length=lowerCamelCase__ , stride=lowerCamelCase__ , pad_to_multiple_of=lowerCamelCase__ , return_attention_mask=lowerCamelCase__ , return_overflowing_tokens=lowerCamelCase__ , return_special_tokens_mask=lowerCamelCase__ , return_offsets_mapping=lowerCamelCase__ , return_token_type_ids=lowerCamelCase__ , return_length=lowerCamelCase__ , verbose=lowerCamelCase__ , return_tensors=lowerCamelCase__ , **lowerCamelCase__ , )
else:
_UpperCAmelCase : int = None
if text_encoding is not None:
encoding_image_processor.update(lowerCamelCase__ )
return encoding_image_processor
def lowerCAmelCase__ ( self : List[Any] , *lowerCamelCase__ : Optional[int] , **lowerCamelCase__ : Dict ) ->Optional[Any]:
'''simple docstring'''
return self.tokenizer.batch_decode(*lowerCamelCase__ , **lowerCamelCase__ )
def lowerCAmelCase__ ( self : int , *lowerCamelCase__ : Dict , **lowerCamelCase__ : str ) ->Optional[int]:
'''simple docstring'''
return self.tokenizer.decode(*lowerCamelCase__ , **lowerCamelCase__ )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def lowerCAmelCase__ ( self : Any ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = self.tokenizer.model_input_names
_UpperCAmelCase : Dict = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 40 | 1 |
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..bit import BitConfig
__UpperCamelCase : Any = logging.get_logger(__name__)
__UpperCamelCase : Optional[Any] = {
'''Intel/dpt-large''': '''https://huggingface.co/Intel/dpt-large/resolve/main/config.json''',
# See all DPT models at https://huggingface.co/models?filter=dpt
}
class a ( a__ ):
snake_case__ = '''dpt'''
def __init__( self , _snake_case=7_68 , _snake_case=12 , _snake_case=12 , _snake_case=30_72 , _snake_case="gelu" , _snake_case=0.0 , _snake_case=0.0 , _snake_case=0.02 , _snake_case=1E-12 , _snake_case=3_84 , _snake_case=16 , _snake_case=3 , _snake_case=False , _snake_case=True , _snake_case=[2, 5, 8, 11] , _snake_case="project" , _snake_case=[4, 2, 1, 0.5] , _snake_case=[96, 1_92, 3_84, 7_68] , _snake_case=2_56 , _snake_case=-1 , _snake_case=False , _snake_case=True , _snake_case=0.4 , _snake_case=2_55 , _snake_case=0.1 , _snake_case=[1, 10_24, 24, 24] , _snake_case=[0, 1] , _snake_case=None , **_snake_case , ):
"""simple docstring"""
super().__init__(**_snake_case )
lowerCAmelCase = hidden_size
lowerCAmelCase = is_hybrid
if self.is_hybrid:
if backbone_config is None:
logger.info('Initializing the config with a `BiT` backbone.' )
lowerCAmelCase = {
'global_padding': 'same',
'layer_type': 'bottleneck',
'depths': [3, 4, 9],
'out_features': ['stage1', 'stage2', 'stage3'],
'embedding_dynamic_padding': True,
}
lowerCAmelCase = BitConfig(**_snake_case )
elif isinstance(_snake_case , _snake_case ):
logger.info('Initializing the config with a `BiT` backbone.' )
lowerCAmelCase = BitConfig(**_snake_case )
elif isinstance(_snake_case , _snake_case ):
lowerCAmelCase = backbone_config
else:
raise ValueError(
F'backbone_config must be a dictionary or a `PretrainedConfig`, got {backbone_config.__class__}.' )
lowerCAmelCase = backbone_featmap_shape
lowerCAmelCase = neck_ignore_stages
if readout_type != "project":
raise ValueError('Readout type must be \'project\' when using `DPT-hybrid` mode.' )
else:
lowerCAmelCase = None
lowerCAmelCase = None
lowerCAmelCase = []
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_act
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = initializer_range
lowerCAmelCase = layer_norm_eps
lowerCAmelCase = image_size
lowerCAmelCase = patch_size
lowerCAmelCase = num_channels
lowerCAmelCase = qkv_bias
lowerCAmelCase = backbone_out_indices
if readout_type not in ["ignore", "add", "project"]:
raise ValueError('Readout_type must be one of [\'ignore\', \'add\', \'project\']' )
lowerCAmelCase = readout_type
lowerCAmelCase = reassemble_factors
lowerCAmelCase = neck_hidden_sizes
lowerCAmelCase = fusion_hidden_size
lowerCAmelCase = head_in_index
lowerCAmelCase = use_batch_norm_in_fusion_residual
# auxiliary head attributes (semantic segmentation)
lowerCAmelCase = use_auxiliary_head
lowerCAmelCase = auxiliary_loss_weight
lowerCAmelCase = semantic_loss_ignore_index
lowerCAmelCase = semantic_classifier_dropout
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
lowerCAmelCase = self.backbone_config.to_dict()
lowerCAmelCase = self.__class__.model_type
return output
| 4 |
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import platform
import numpy as np
import psutil
import torch
from accelerate import __version__ as version
from accelerate.commands.config import default_config_file, load_config_from_file
from ..utils import is_npu_available, is_xpu_available
def a_ ( lowerCAmelCase_ : str=None ):
if subparsers is not None:
__lowerCAmelCase = subparsers.add_parser('env' )
else:
__lowerCAmelCase = argparse.ArgumentParser('Accelerate env command' )
parser.add_argument(
'--config_file', default=lowerCAmelCase_, help='The config file to use for the default values in the launching script.' )
if subparsers is not None:
parser.set_defaults(func=lowerCAmelCase_ )
return parser
def a_ ( lowerCAmelCase_ : Optional[int] ):
__lowerCAmelCase = torch.__version__
__lowerCAmelCase = torch.cuda.is_available()
__lowerCAmelCase = is_xpu_available()
__lowerCAmelCase = is_npu_available()
__lowerCAmelCase = 'Not found'
# Get the default from the config file.
if args.config_file is not None or os.path.isfile(lowerCAmelCase_ ):
__lowerCAmelCase = load_config_from_file(args.config_file ).to_dict()
__lowerCAmelCase = {
'`Accelerate` version': version,
'Platform': platform.platform(),
'Python version': platform.python_version(),
'Numpy version': np.__version__,
'PyTorch version (GPU?)': F"""{pt_version} ({pt_cuda_available})""",
'PyTorch XPU available': str(lowerCAmelCase_ ),
'PyTorch NPU available': str(lowerCAmelCase_ ),
'System RAM': F"""{psutil.virtual_memory().total / 1024 ** 3:.2f} GB""",
}
if pt_cuda_available:
__lowerCAmelCase = torch.cuda.get_device_name()
print('\nCopy-and-paste the text below in your GitHub issue\n' )
print('\n'.join([F"""- {prop}: {val}""" for prop, val in info.items()] ) )
print('- `Accelerate` default config:' if args.config_file is None else '- `Accelerate` config passed:' )
__lowerCAmelCase = (
'\n'.join([F"""\t- {prop}: {val}""" for prop, val in accelerate_config.items()] )
if isinstance(lowerCAmelCase_, lowerCAmelCase_ )
else F"""\t{accelerate_config}"""
)
print(lowerCAmelCase_ )
__lowerCAmelCase = accelerate_config
return info
def a_ ( ):
__lowerCAmelCase = env_command_parser()
__lowerCAmelCase = parser.parse_args()
env_command(lowerCAmelCase_ )
return 0
if __name__ == "__main__":
raise SystemExit(main())
| 53 | 0 |
import os
import sys
import unittest
lowerCAmelCase__ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
lowerCAmelCase__ = os.path.join(git_repo_path, "src", "diffusers")
class __magic_name__ ( unittest.TestCase ):
def _UpperCamelCase ( self : Any ) -> Union[str, Any]:
UpperCAmelCase = find_backend(" if not is_torch_available():" )
self.assertEqual(lowerCAmelCase__ , "torch" )
# backend_with_underscore = find_backend(" if not is_tensorflow_text_available():")
# self.assertEqual(backend_with_underscore, "tensorflow_text")
UpperCAmelCase = find_backend(" if not (is_torch_available() and is_transformers_available()):" )
self.assertEqual(lowerCAmelCase__ , "torch_and_transformers" )
# double_backend_with_underscore = find_backend(
# " if not (is_sentencepiece_available() and is_tensorflow_text_available()):"
# )
# self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text")
UpperCAmelCase = find_backend(
" if not (is_torch_available() and is_transformers_available() and is_onnx_available()):" )
self.assertEqual(lowerCAmelCase__ , "torch_and_transformers_and_onnx" )
def _UpperCamelCase ( self : Optional[int] ) -> List[str]:
UpperCAmelCase = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn("torch" , lowerCAmelCase__ )
self.assertIn("torch_and_transformers" , lowerCAmelCase__ )
self.assertIn("flax_and_transformers" , lowerCAmelCase__ )
self.assertIn("torch_and_transformers_and_onnx" , lowerCAmelCase__ )
# Likewise, we can't assert on the exact content of a key
self.assertIn("UNet2DModel" , objects["torch"] )
self.assertIn("FlaxUNet2DConditionModel" , objects["flax"] )
self.assertIn("StableDiffusionPipeline" , objects["torch_and_transformers"] )
self.assertIn("FlaxStableDiffusionPipeline" , objects["flax_and_transformers"] )
self.assertIn("LMSDiscreteScheduler" , objects["torch_and_scipy"] )
self.assertIn("OnnxStableDiffusionPipeline" , objects["torch_and_transformers_and_onnx"] )
def _UpperCamelCase ( self : Optional[Any] ) -> int:
UpperCAmelCase = create_dummy_object("CONSTANT" , "'torch'" )
self.assertEqual(lowerCAmelCase__ , "\nCONSTANT = None\n" )
UpperCAmelCase = create_dummy_object("function" , "'torch'" )
self.assertEqual(
lowerCAmelCase__ , "\ndef function(*args, **kwargs):\n requires_backends(function, 'torch')\n" )
UpperCAmelCase = "\nclass FakeClass(metaclass=DummyObject):\n _backends = 'torch'\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, 'torch')\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, 'torch')\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, 'torch')\n"
UpperCAmelCase = create_dummy_object("FakeClass" , "'torch'" )
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def _UpperCamelCase ( self : Any ) -> int:
UpperCAmelCase = "# This file is autogenerated by the command `make fix-copies`, do not edit.\nfrom ..utils import DummyObject, requires_backends\n\n\nCONSTANT = None\n\n\ndef function(*args, **kwargs):\n requires_backends(function, [\"torch\"])\n\n\nclass FakeClass(metaclass=DummyObject):\n _backends = [\"torch\"]\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, [\"torch\"])\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, [\"torch\"])\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, [\"torch\"])\n"
UpperCAmelCase = create_dummy_files({"torch": ["CONSTANT", "function", "FakeClass"]} )
self.assertEqual(dummy_files["torch"] , lowerCAmelCase__ )
| 1 |
lowerCAmelCase__ = {
"a": "AAAAA",
"b": "AAAAB",
"c": "AAABA",
"d": "AAABB",
"e": "AABAA",
"f": "AABAB",
"g": "AABBA",
"h": "AABBB",
"i": "ABAAA",
"j": "BBBAA",
"k": "ABAAB",
"l": "ABABA",
"m": "ABABB",
"n": "ABBAA",
"o": "ABBAB",
"p": "ABBBA",
"q": "ABBBB",
"r": "BAAAA",
"s": "BAAAB",
"t": "BAABA",
"u": "BAABB",
"v": "BBBAB",
"w": "BABAA",
"x": "BABAB",
"y": "BABBA",
"z": "BABBB",
" ": " ",
}
lowerCAmelCase__ = {value: key for key, value in encode_dict.items()}
def _lowerCAmelCase( __A ):
UpperCAmelCase = ""
for letter in word.lower():
if letter.isalpha() or letter == " ":
encoded += encode_dict[letter]
else:
raise Exception("encode() accepts only letters of the alphabet and spaces" )
return encoded
def _lowerCAmelCase( __A ):
if set(__A ) - {"A", "B", " "} != set():
raise Exception("decode() accepts only 'A', 'B' and spaces" )
UpperCAmelCase = ""
for word in coded.split():
while len(__A ) != 0:
decoded += decode_dict[word[:5]]
UpperCAmelCase = word[5:]
decoded += " "
return decoded.strip()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 1 | 1 |
import os
import time
import warnings
from dataclasses import dataclass, field
from enum import Enum
from typing import List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import logging
from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors
from ..processors.utils import InputFeatures
__a: Union[str, Any] = logging.get_logger(__name__)
@dataclass
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
_lowerCamelCase = field(metadata={'''help''': '''The name of the task to train on: ''' + ''', '''.join(glue_processors.keys() )} )
_lowerCamelCase = field(
metadata={'''help''': '''The input data dir. Should contain the .tsv files (or other data files) for the task.'''} )
_lowerCamelCase = field(
default=128 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
_lowerCamelCase = field(
default=UpperCAmelCase , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
def lowerCamelCase ( self : Optional[int] ) -> int:
"""simple docstring"""
_UpperCAmelCase = self.task_name.lower()
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_lowerCamelCase = '''train'''
_lowerCamelCase = '''dev'''
_lowerCamelCase = '''test'''
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_lowerCamelCase = 42
_lowerCamelCase = 42
_lowerCamelCase = 42
def __init__( self : Any , lowerCamelCase : GlueDataTrainingArguments , lowerCamelCase : PreTrainedTokenizerBase , lowerCamelCase : Optional[int] = None , lowerCamelCase : Union[str, Split] = Split.train , lowerCamelCase : Optional[str] = None , ) -> int:
"""simple docstring"""
warnings.warn(
"""This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets """
"""library. You can have a look at this example script for pointers: """
"""https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py""" , lowerCamelCase , )
_UpperCAmelCase = args
_UpperCAmelCase = glue_processors[args.task_name]()
_UpperCAmelCase = glue_output_modes[args.task_name]
if isinstance(lowerCamelCase , lowerCamelCase ):
try:
_UpperCAmelCase = Split[mode]
except KeyError:
raise KeyError("""mode is not a valid split name""" )
# Load data features from cache or dataset file
_UpperCAmelCase = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , f"""cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}""" , )
_UpperCAmelCase = self.processor.get_labels()
if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in (
"RobertaTokenizer",
"RobertaTokenizerFast",
"XLMRobertaTokenizer",
"BartTokenizer",
"BartTokenizerFast",
):
# HACK(label indices are swapped in RoBERTa pretrained model)
_UpperCAmelCase , _UpperCAmelCase = label_list[2], label_list[1]
_UpperCAmelCase = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
_UpperCAmelCase = cached_features_file + """.lock"""
with FileLock(lowerCamelCase ):
if os.path.exists(lowerCamelCase ) and not args.overwrite_cache:
_UpperCAmelCase = time.time()
_UpperCAmelCase = torch.load(lowerCamelCase )
logger.info(
f"""Loading features from cached file {cached_features_file} [took %.3f s]""" , time.time() - start )
else:
logger.info(f"""Creating features from dataset file at {args.data_dir}""" )
if mode == Split.dev:
_UpperCAmelCase = self.processor.get_dev_examples(args.data_dir )
elif mode == Split.test:
_UpperCAmelCase = self.processor.get_test_examples(args.data_dir )
else:
_UpperCAmelCase = self.processor.get_train_examples(args.data_dir )
if limit_length is not None:
_UpperCAmelCase = examples[:limit_length]
_UpperCAmelCase = glue_convert_examples_to_features(
lowerCamelCase , lowerCamelCase , max_length=args.max_seq_length , label_list=lowerCamelCase , output_mode=self.output_mode , )
_UpperCAmelCase = time.time()
torch.save(self.features , lowerCamelCase )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f"""Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]""" )
def __len__( self : Union[str, Any] ) -> Any:
"""simple docstring"""
return len(self.features )
def __getitem__( self : str , lowerCamelCase : Any ) -> InputFeatures:
"""simple docstring"""
return self.features[i]
def lowerCamelCase ( self : int ) -> List[Any]:
"""simple docstring"""
return self.label_list | 108 |
from __future__ import annotations
from bisect import bisect_left
from functools import total_ordering
from heapq import merge
@total_ordering
class __magic_name__ ( __UpperCAmelCase):
'''simple docstring'''
def __lt__( self: List[Any] , _lowerCamelCase: List[Any] ):
return self[-1] < other[-1]
def __eq__( self: Optional[int] , _lowerCamelCase: int ):
return self[-1] == other[-1]
def a (_lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ = []
# sort into stacks
for element in collection:
SCREAMING_SNAKE_CASE_ = Stack([element] )
SCREAMING_SNAKE_CASE_ = bisect_left(_lowerCAmelCase , _lowerCAmelCase )
if i != len(_lowerCAmelCase ):
stacks[i].append(_lowerCAmelCase )
else:
stacks.append(_lowerCAmelCase )
# use a heap-based merge to merge stack efficiently
SCREAMING_SNAKE_CASE_ = merge(*(reversed(_lowerCAmelCase ) for stack in stacks) )
return collection
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE =input("""Enter numbers separated by a comma:\n""").strip()
__SCREAMING_SNAKE_CASE =[int(item) for item in user_input.split(""",""")]
print(patience_sort(unsorted))
| 234 | 0 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import center_crop, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
_lowerCamelCase = logging.get_logger(__name__)
class lowerCamelCase_ ( lowercase ):
"""simple docstring"""
_lowerCAmelCase : str = ["pixel_values"]
def __init__( self , UpperCAmelCase__ = True , UpperCAmelCase__ = None , UpperCAmelCase__ = PIL.Image.BICUBIC , UpperCAmelCase__ = True , UpperCAmelCase__ = None , UpperCAmelCase__ = 1 / 255 , UpperCAmelCase__ = True , UpperCAmelCase__ = True , UpperCAmelCase__ = None , UpperCAmelCase__ = None , **UpperCAmelCase__ , ):
super().__init__(**UpperCAmelCase__ )
SCREAMING_SNAKE_CASE__ = size if size is not None else {"height": 256, "width": 256}
SCREAMING_SNAKE_CASE__ = get_size_dict(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE__ = crop_size if crop_size is not None else {"height": 224, "width": 224}
SCREAMING_SNAKE_CASE__ = get_size_dict(UpperCAmelCase__ , param_name="crop_size" )
SCREAMING_SNAKE_CASE__ = do_resize
SCREAMING_SNAKE_CASE__ = size
SCREAMING_SNAKE_CASE__ = resample
SCREAMING_SNAKE_CASE__ = do_center_crop
SCREAMING_SNAKE_CASE__ = crop_size
SCREAMING_SNAKE_CASE__ = do_rescale
SCREAMING_SNAKE_CASE__ = rescale_factor
SCREAMING_SNAKE_CASE__ = do_normalize
SCREAMING_SNAKE_CASE__ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
SCREAMING_SNAKE_CASE__ = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowerCAmelCase__ ( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = PIL.Image.BICUBIC , UpperCAmelCase__ = None , **UpperCAmelCase__ , ):
SCREAMING_SNAKE_CASE__ = get_size_dict(UpperCAmelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(f'''The size dictionary must have keys \'height\' and \'width\'. Got {size.keys()}''' )
return resize(
UpperCAmelCase__ , size=(size["height"], size["width"]) , resample=UpperCAmelCase__ , data_format=UpperCAmelCase__ , **UpperCAmelCase__ )
def lowerCAmelCase__ ( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = None , **UpperCAmelCase__ , ):
SCREAMING_SNAKE_CASE__ = get_size_dict(UpperCAmelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(f'''The size dictionary must have keys \'height\' and \'width\'. Got {size.keys()}''' )
return center_crop(UpperCAmelCase__ , size=(size["height"], size["width"]) , data_format=UpperCAmelCase__ , **UpperCAmelCase__ )
def lowerCAmelCase__ ( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = None , **UpperCAmelCase__ , ):
return rescale(UpperCAmelCase__ , scale=UpperCAmelCase__ , data_format=UpperCAmelCase__ , **UpperCAmelCase__ )
def lowerCAmelCase__ ( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = None , **UpperCAmelCase__ , ):
return normalize(UpperCAmelCase__ , mean=UpperCAmelCase__ , std=UpperCAmelCase__ , data_format=UpperCAmelCase__ , **UpperCAmelCase__ )
def lowerCAmelCase__ ( self , UpperCAmelCase__ , UpperCAmelCase__ = None , UpperCAmelCase__ = None , UpperCAmelCase__=None , UpperCAmelCase__ = None , UpperCAmelCase__ = None , UpperCAmelCase__ = None , UpperCAmelCase__ = None , UpperCAmelCase__ = None , UpperCAmelCase__ = None , UpperCAmelCase__ = None , UpperCAmelCase__ = None , UpperCAmelCase__ = ChannelDimension.FIRST , **UpperCAmelCase__ , ):
SCREAMING_SNAKE_CASE__ = do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE__ = resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE__ = do_center_crop if do_center_crop is not None else self.do_center_crop
SCREAMING_SNAKE_CASE__ = do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE__ = rescale_factor if rescale_factor is not None else self.rescale_factor
SCREAMING_SNAKE_CASE__ = do_normalize if do_normalize is not None else self.do_normalize
SCREAMING_SNAKE_CASE__ = image_mean if image_mean is not None else self.image_mean
SCREAMING_SNAKE_CASE__ = image_std if image_std is not None else self.image_std
SCREAMING_SNAKE_CASE__ = size if size is not None else self.size
SCREAMING_SNAKE_CASE__ = get_size_dict(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE__ = crop_size if crop_size is not None else self.crop_size
SCREAMING_SNAKE_CASE__ = get_size_dict(UpperCAmelCase__ , param_name="crop_size" )
SCREAMING_SNAKE_CASE__ = make_list_of_images(UpperCAmelCase__ )
if not valid_images(UpperCAmelCase__ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE__ = [to_numpy_array(UpperCAmelCase__ ) for image in images]
if do_resize:
SCREAMING_SNAKE_CASE__ = [self.resize(image=UpperCAmelCase__ , size=UpperCAmelCase__ , resample=UpperCAmelCase__ ) for image in images]
if do_center_crop:
SCREAMING_SNAKE_CASE__ = [self.center_crop(image=UpperCAmelCase__ , size=UpperCAmelCase__ ) for image in images]
if do_rescale:
SCREAMING_SNAKE_CASE__ = [self.rescale(image=UpperCAmelCase__ , scale=UpperCAmelCase__ ) for image in images]
if do_normalize:
SCREAMING_SNAKE_CASE__ = [self.normalize(image=UpperCAmelCase__ , mean=UpperCAmelCase__ , std=UpperCAmelCase__ ) for image in images]
SCREAMING_SNAKE_CASE__ = [to_channel_dimension_format(UpperCAmelCase__ , UpperCAmelCase__ ) for image in images]
SCREAMING_SNAKE_CASE__ = {"pixel_values": images}
return BatchFeature(data=UpperCAmelCase__ , tensor_type=UpperCAmelCase__ )
| 112 |
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def __lowercase ( ):
SCREAMING_SNAKE_CASE__ = ArgumentParser("Accelerate CLI tool" , usage="accelerate <command> [<args>]" , allow_abbrev=lowerCamelCase_ )
SCREAMING_SNAKE_CASE__ = parser.add_subparsers(help="accelerate command helpers" )
# Register commands
get_config_parser(subparsers=lowerCamelCase_ )
env_command_parser(subparsers=lowerCamelCase_ )
launch_command_parser(subparsers=lowerCamelCase_ )
tpu_command_parser(subparsers=lowerCamelCase_ )
test_command_parser(subparsers=lowerCamelCase_ )
# Let's go
SCREAMING_SNAKE_CASE__ = parser.parse_args()
if not hasattr(lowerCamelCase_ , "func" ):
parser.print_help()
exit(1 )
# Run
args.func(lowerCamelCase_ )
if __name__ == "__main__":
main()
| 112 | 1 |
def __lowerCAmelCase ( UpperCamelCase ) -> Tuple:
# if the collection is empty, returns empty
if collection == []:
return []
# get some information about the collection
lowerCAmelCase__ : List[Any] = len(UpperCamelCase )
lowerCAmelCase__ : Dict = max(UpperCamelCase )
lowerCAmelCase__ : int = min(UpperCamelCase )
# create the counting array
lowerCAmelCase__ : Optional[int] = coll_max + 1 - coll_min
lowerCAmelCase__ : List[str] = [0] * counting_arr_length
# count how much a number appears in the collection
for number in collection:
counting_arr[number - coll_min] += 1
# sum each position with it's predecessors. now, counting_arr[i] tells
# us how many elements <= i has in the collection
for i in range(1 , UpperCamelCase ):
lowerCAmelCase__ : str = counting_arr[i] + counting_arr[i - 1]
# create the output collection
lowerCAmelCase__ : List[Any] = [0] * coll_len
# place the elements in the output, respecting the original order (stable
# sort) from end to begin, updating counting_arr
for i in reversed(range(0 , UpperCamelCase ) ):
lowerCAmelCase__ : Optional[int] = collection[i]
counting_arr[collection[i] - coll_min] -= 1
return ordered
def __lowerCAmelCase ( UpperCamelCase ) -> Optional[int]:
return "".join([chr(UpperCamelCase ) for i in counting_sort([ord(UpperCamelCase ) for c in string] )] )
if __name__ == "__main__":
# Test string sort
assert counting_sort_string("""thisisthestring""") == "eghhiiinrsssttt"
lowerCAmelCase_ = input("""Enter numbers separated by a comma:\n""").strip()
lowerCAmelCase_ = [int(item) for item in user_input.split(""",""")]
print(counting_sort(unsorted))
| 678 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
"""naver-clova-ix/donut-base""": """https://huggingface.co/naver-clova-ix/donut-base/resolve/main/config.json""",
# See all Donut models at https://huggingface.co/models?filter=donut-swin
}
class _lowerCAmelCase ( _lowercase ):
A__ = 'donut-swin'
A__ = {
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self , __UpperCAmelCase=224 , __UpperCAmelCase=4 , __UpperCAmelCase=3 , __UpperCAmelCase=96 , __UpperCAmelCase=[2, 2, 6, 2] , __UpperCAmelCase=[3, 6, 12, 24] , __UpperCAmelCase=7 , __UpperCAmelCase=4.0 , __UpperCAmelCase=True , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.1 , __UpperCAmelCase="gelu" , __UpperCAmelCase=False , __UpperCAmelCase=0.02 , __UpperCAmelCase=1e-5 , **__UpperCAmelCase , ):
super().__init__(**__UpperCAmelCase )
lowerCAmelCase__ : Union[str, Any] = image_size
lowerCAmelCase__ : List[str] = patch_size
lowerCAmelCase__ : int = num_channels
lowerCAmelCase__ : Optional[Any] = embed_dim
lowerCAmelCase__ : int = depths
lowerCAmelCase__ : Dict = len(__UpperCAmelCase )
lowerCAmelCase__ : Union[str, Any] = num_heads
lowerCAmelCase__ : Dict = window_size
lowerCAmelCase__ : str = mlp_ratio
lowerCAmelCase__ : Optional[int] = qkv_bias
lowerCAmelCase__ : Any = hidden_dropout_prob
lowerCAmelCase__ : Union[str, Any] = attention_probs_dropout_prob
lowerCAmelCase__ : List[str] = drop_path_rate
lowerCAmelCase__ : Tuple = hidden_act
lowerCAmelCase__ : List[str] = use_absolute_embeddings
lowerCAmelCase__ : Dict = layer_norm_eps
lowerCAmelCase__ : Any = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
lowerCAmelCase__ : List[Any] = int(embed_dim * 2 ** (len(__UpperCAmelCase ) - 1) )
| 678 | 1 |
'''simple docstring'''
from typing import Dict, List, Optional, Tuple, Union
import torch
from ...models import AutoencoderKL, TransformeraDModel
from ...schedulers import KarrasDiffusionSchedulers
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class _UpperCamelCase ( SCREAMING_SNAKE_CASE):
'''simple docstring'''
def __init__( self , a_ , a_ , a_ , a_ = None , ) -> Optional[Any]:
super().__init__()
self.register_modules(transformer=a_ , vae=a_ , scheduler=a_ )
# create a imagenet -> id dictionary for easier use
lowercase : Optional[int] = {}
if idalabel is not None:
for key, value in idalabel.items():
for label in value.split("," ):
lowercase : List[Any] = int(a_ )
lowercase : Dict = dict(sorted(self.labels.items() ) )
def a__ ( self , a_ ) -> List[int]:
if not isinstance(a_ , a_ ):
lowercase : Optional[Any] = list(a_ )
for l in label:
if l not in self.labels:
raise ValueError(
F'''{l} does not exist. Please make sure to select one of the following labels: \n {self.labels}.''' )
return [self.labels[l] for l in label]
@torch.no_grad()
def __call__( self , a_ , a_ = 4.0 , a_ = None , a_ = 5_0 , a_ = "pil" , a_ = True , ) -> Union[ImagePipelineOutput, Tuple]:
lowercase : Tuple = len(a_ )
lowercase : List[Any] = self.transformer.config.sample_size
lowercase : str = self.transformer.config.in_channels
lowercase : Optional[Any] = randn_tensor(
shape=(batch_size, latent_channels, latent_size, latent_size) , generator=a_ , device=self.device , dtype=self.transformer.dtype , )
lowercase : Any = torch.cat([latents] * 2 ) if guidance_scale > 1 else latents
lowercase : Union[str, Any] = torch.tensor(a_ , device=self.device ).reshape(-1 )
lowercase : Dict = torch.tensor([1_0_0_0] * batch_size , device=self.device )
lowercase : Tuple = torch.cat([class_labels, class_null] , 0 ) if guidance_scale > 1 else class_labels
# set step values
self.scheduler.set_timesteps(a_ )
for t in self.progress_bar(self.scheduler.timesteps ):
if guidance_scale > 1:
lowercase : Optional[Any] = latent_model_input[: len(a_ ) // 2]
lowercase : Tuple = torch.cat([half, half] , dim=0 )
lowercase : Optional[int] = self.scheduler.scale_model_input(a_ , a_ )
lowercase : List[Any] = t
if not torch.is_tensor(a_ ):
# TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can
# This would be a good case for the `match` statement (Python 3.10+)
lowercase : int = latent_model_input.device.type == "mps"
if isinstance(a_ , a_ ):
lowercase : List[Any] = torch.floataa if is_mps else torch.floataa
else:
lowercase : Dict = torch.intaa if is_mps else torch.intaa
lowercase : Optional[Any] = torch.tensor([timesteps] , dtype=a_ , device=latent_model_input.device )
elif len(timesteps.shape ) == 0:
lowercase : List[Any] = timesteps[None].to(latent_model_input.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
lowercase : Optional[int] = timesteps.expand(latent_model_input.shape[0] )
# predict noise model_output
lowercase : Tuple = self.transformer(
a_ , timestep=a_ , class_labels=a_ ).sample
# perform guidance
if guidance_scale > 1:
lowercase , lowercase : Tuple = noise_pred[:, :latent_channels], noise_pred[:, latent_channels:]
lowercase , lowercase : str = torch.split(a_ , len(a_ ) // 2 , dim=0 )
lowercase : Union[str, Any] = uncond_eps + guidance_scale * (cond_eps - uncond_eps)
lowercase : Optional[int] = torch.cat([half_eps, half_eps] , dim=0 )
lowercase : Optional[Any] = torch.cat([eps, rest] , dim=1 )
# learned sigma
if self.transformer.config.out_channels // 2 == latent_channels:
lowercase , lowercase : Union[str, Any] = torch.split(a_ , a_ , dim=1 )
else:
lowercase : str = noise_pred
# compute previous image: x_t -> x_t-1
lowercase : Optional[int] = self.scheduler.step(a_ , a_ , a_ ).prev_sample
if guidance_scale > 1:
lowercase , lowercase : Any = latent_model_input.chunk(2 , dim=0 )
else:
lowercase : List[str] = latent_model_input
lowercase : List[str] = 1 / self.vae.config.scaling_factor * latents
lowercase : str = self.vae.decode(a_ ).sample
lowercase : Union[str, Any] = (samples / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
lowercase : Optional[Any] = samples.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
lowercase : Union[str, Any] = self.numpy_to_pil(a_ )
if not return_dict:
return (samples,)
return ImagePipelineOutput(images=a_ )
| 425 |
'''simple docstring'''
import argparse
import torch
# Step 1. clone https://github.com/microsoft/unilm
# Step 2. git checkout to https://github.com/microsoft/unilm/commit/b94ec76c36f02fb2b0bf0dcb0b8554a2185173cd
# Step 3. cd unilm
# Step 4. ln -s $(realpath wavlm/modules.py) ./ # create simlink
# import classes
from unilm.wavlm.WavLM import WavLM as WavLMOrig
from unilm.wavlm.WavLM import WavLMConfig as WavLMConfigOrig
from transformers import WavLMConfig, WavLMModel, logging
logging.set_verbosity_info()
lowerCAmelCase : Optional[Any] = logging.get_logger(__name__)
lowerCAmelCase : Tuple = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn.grep_linear""": """encoder.layers.*.attention.gru_rel_pos_linear""",
"""self_attn.relative_attention_bias""": """encoder.layers.*.attention.rel_attn_embed""",
"""self_attn.grep_a""": """encoder.layers.*.attention.gru_rel_pos_const""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """ctc_proj""",
"""mask_emb""": """masked_spec_embed""",
}
lowerCAmelCase : Dict = [
"""ctc_proj""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
]
def _A ( A ,A ,A ,A ,A ) -> str:
for attribute in key.split("." ):
lowercase : Any = getattr(A ,A )
if weight_type is not None:
lowercase : Optional[Any] = getattr(A ,A ).shape
else:
lowercase : List[str] = hf_pointer.shape
assert hf_shape == value.shape, (
F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
lowercase : Any = value
elif weight_type == "weight_g":
lowercase : Optional[Any] = value
elif weight_type == "weight_v":
lowercase : Tuple = value
elif weight_type == "bias":
lowercase : int = value
else:
lowercase : int = value
logger.info(F'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' )
def _A ( A ,A ) -> int:
lowercase : List[Any] = []
lowercase : int = fairseq_model.state_dict()
lowercase : Optional[Any] = hf_model.feature_extractor
for name, value in fairseq_dict.items():
lowercase : List[str] = False
if "conv_layers" in name:
load_conv_layer(
A ,A ,A ,A ,hf_model.config.feat_extract_norm == "group" ,)
lowercase : Optional[int] = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
lowercase : Union[str, Any] = True
if "*" in mapped_key:
lowercase : Dict = name.split(A )[0].split("." )[-2]
lowercase : Union[str, Any] = mapped_key.replace("*" ,A )
if "weight_g" in name:
lowercase : Union[str, Any] = "weight_g"
elif "weight_v" in name:
lowercase : Tuple = "weight_v"
elif "bias" in name and "relative_attention_bias" not in name:
lowercase : Union[str, Any] = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
lowercase : Any = "weight"
else:
lowercase : Tuple = None
set_recursively(A ,A ,A ,A ,A )
continue
if not is_used:
unused_weights.append(A )
logger.warning(F'''Unused weights: {unused_weights}''' )
def _A ( A ,A ,A ,A ,A ) -> Any:
lowercase : Optional[int] = full_name.split("conv_layers." )[-1]
lowercase : Any = name.split("." )
lowercase : Dict = int(items[0] )
lowercase : List[Any] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
lowercase : Tuple = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
lowercase : List[Any] = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
lowercase : str = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
lowercase : Optional[Any] = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(A )
@torch.no_grad()
def _A ( A ,A ,A=None ) -> Optional[Any]:
# load the pre-trained checkpoints
lowercase : Union[str, Any] = torch.load(A )
lowercase : List[Any] = WavLMConfigOrig(checkpoint["cfg"] )
lowercase : Tuple = WavLMOrig(A )
model.load_state_dict(checkpoint["model"] )
model.eval()
if config_path is not None:
lowercase : List[str] = WavLMConfig.from_pretrained(A )
else:
lowercase : Union[str, Any] = WavLMConfig()
lowercase : Optional[Any] = WavLMModel(A )
recursively_load_weights(A ,A )
hf_wavlm.save_pretrained(A )
if __name__ == "__main__":
lowerCAmelCase : Optional[Any] = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
lowerCAmelCase : int = parser.parse_args()
convert_wavlm_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 425 | 1 |
"""simple docstring"""
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
__UpperCamelCase : Tuple = logging.get_logger(__name__)
class a ( a__ ):
snake_case__ = ['''input_features''', '''is_longer''']
def __init__( self , _snake_case=64 , _snake_case=4_80_00 , _snake_case=4_80 , _snake_case=10 , _snake_case=10_24 , _snake_case=0.0 , _snake_case=False , _snake_case = 0 , _snake_case = 1_40_00 , _snake_case = None , _snake_case = "fusion" , _snake_case = "repeatpad" , **_snake_case , ):
"""simple docstring"""
super().__init__(
feature_size=_snake_case , sampling_rate=_snake_case , padding_value=_snake_case , return_attention_mask=_snake_case , **_snake_case , )
lowerCAmelCase = top_db
lowerCAmelCase = truncation
lowerCAmelCase = padding
lowerCAmelCase = fft_window_size
lowerCAmelCase = (fft_window_size >> 1) + 1
lowerCAmelCase = hop_length
lowerCAmelCase = max_length_s
lowerCAmelCase = max_length_s * sampling_rate
lowerCAmelCase = sampling_rate
lowerCAmelCase = frequency_min
lowerCAmelCase = frequency_max
lowerCAmelCase = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=_snake_case , min_frequency=_snake_case , max_frequency=_snake_case , sampling_rate=_snake_case , norm=_snake_case , mel_scale='htk' , )
lowerCAmelCase = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=_snake_case , min_frequency=_snake_case , max_frequency=_snake_case , sampling_rate=_snake_case , norm='slaney' , mel_scale='slaney' , )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = copy.deepcopy(self.__dict__ )
lowerCAmelCase = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def UpperCamelCase__ ( self , _snake_case , _snake_case = None ):
"""simple docstring"""
lowerCAmelCase = spectrogram(
_snake_case , window_function(self.fft_window_size , 'hann' ) , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=_snake_case , log_mel='dB' , )
return log_mel_spectrogram.T
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
lowerCAmelCase = np.array_split(list(range(0 , total_frames - chunk_frames + 1 ) ) , 3 )
if len(ranges[1] ) == 0:
# if the audio is too short, we just use the first chunk
lowerCAmelCase = [0]
if len(ranges[2] ) == 0:
# if the audio is too short, we just use the first chunk
lowerCAmelCase = [0]
# randomly choose index for each part
lowerCAmelCase = np.random.choice(ranges[0] )
lowerCAmelCase = np.random.choice(ranges[1] )
lowerCAmelCase = np.random.choice(ranges[2] )
lowerCAmelCase = mel[idx_front : idx_front + chunk_frames, :]
lowerCAmelCase = mel[idx_middle : idx_middle + chunk_frames, :]
lowerCAmelCase = mel[idx_back : idx_back + chunk_frames, :]
lowerCAmelCase = torch.tensor(mel[None, None, :] )
lowerCAmelCase = torch.nn.functional.interpolate(
_snake_case , size=[chunk_frames, 64] , mode='bilinear' , align_corners=_snake_case )
lowerCAmelCase = mel_shrink[0][0].numpy()
lowerCAmelCase = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0 )
return mel_fusion
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
lowerCAmelCase = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
lowerCAmelCase = len(_snake_case ) - max_length
lowerCAmelCase = np.random.randint(0 , overflow + 1 )
lowerCAmelCase = waveform[idx : idx + max_length]
lowerCAmelCase = self._np_extract_fbank_features(_snake_case , self.mel_filters_slaney )[None, :]
elif truncation == "fusion":
lowerCAmelCase = self._np_extract_fbank_features(_snake_case , self.mel_filters )
lowerCAmelCase = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
lowerCAmelCase = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
lowerCAmelCase = np.stack([mel, mel, mel, mel] , axis=0 )
lowerCAmelCase = False
else:
lowerCAmelCase = self._random_mel_fusion(_snake_case , _snake_case , _snake_case )
lowerCAmelCase = True
else:
raise NotImplementedError(F'data_truncating {truncation} not implemented' )
else:
lowerCAmelCase = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
lowerCAmelCase = int(max_length / len(_snake_case ) )
lowerCAmelCase = np.stack(np.tile(_snake_case , n_repeat + 1 ) )[:max_length]
if padding == "repeatpad":
lowerCAmelCase = int(max_length / len(_snake_case ) )
lowerCAmelCase = np.stack(np.tile(_snake_case , _snake_case ) )
lowerCAmelCase = np.pad(_snake_case , (0, max_length - waveform.shape[0]) , mode='constant' , constant_values=0 )
if truncation == "fusion":
lowerCAmelCase = self._np_extract_fbank_features(_snake_case , self.mel_filters )
lowerCAmelCase = np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0 )
else:
lowerCAmelCase = self._np_extract_fbank_features(_snake_case , self.mel_filters_slaney )[None, :]
return input_mel, longer
def __call__( self , _snake_case , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = None , **_snake_case , ):
"""simple docstring"""
lowerCAmelCase = truncation if truncation is not None else self.truncation
lowerCAmelCase = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a'
F' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input'
F' was sampled with {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
lowerCAmelCase = isinstance(_snake_case , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F'Only mono-channel audio is supported for input to {self}' )
lowerCAmelCase = is_batched_numpy or (
isinstance(_snake_case , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
lowerCAmelCase = [np.asarray(_snake_case , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(_snake_case , np.ndarray ):
lowerCAmelCase = np.asarray(_snake_case , dtype=np.floataa )
elif isinstance(_snake_case , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
lowerCAmelCase = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
lowerCAmelCase = [np.asarray(_snake_case )]
# convert to mel spectrogram, truncate and pad if needed.
lowerCAmelCase = [
self._get_input_mel(_snake_case , max_length if max_length else self.nb_max_samples , _snake_case , _snake_case )
for waveform in raw_speech
]
lowerCAmelCase = []
lowerCAmelCase = []
for mel, longer in padded_inputs:
input_mel.append(_snake_case )
is_longer.append(_snake_case )
if truncation == "fusion" and sum(_snake_case ) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
lowerCAmelCase = np.random.randint(0 , len(_snake_case ) )
lowerCAmelCase = True
if isinstance(input_mel[0] , _snake_case ):
lowerCAmelCase = [np.asarray(_snake_case , dtype=np.floataa ) for feature in input_mel]
# is_longer is a list of bool
lowerCAmelCase = [[longer] for longer in is_longer]
lowerCAmelCase = {'input_features': input_mel, 'is_longer': is_longer}
lowerCAmelCase = BatchFeature(_snake_case )
if return_tensors is not None:
lowerCAmelCase = input_features.convert_to_tensors(_snake_case )
return input_features
| 4 |
import requests
a__ : Any = 'YOUR API KEY'
def UpperCAmelCase_ ( _UpperCAmelCase :str , _UpperCAmelCase :str = giphy_api_key ) -> list:
'''simple docstring'''
A_ = '''+'''.join(query.split() )
A_ = f'https://api.giphy.com/v1/gifs/search?q={formatted_query}&api_key={api_key}'
A_ = requests.get(_UpperCAmelCase ).json()['''data''']
return [gif["url"] for gif in gifs]
if __name__ == "__main__":
print('\n'.join(get_gifs('space ship')))
| 188 | 0 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
_a : str = logging.get_logger(__name__)
class __A (__magic_name__ ):
snake_case :Optional[Any] = ["pixel_values"]
def __init__( self , UpperCamelCase_ = True , UpperCamelCase_ = None , UpperCamelCase_ = PILImageResampling.BILINEAR , UpperCamelCase_ = True , UpperCamelCase_ = None , UpperCamelCase_ = True , UpperCamelCase_ = 1 / 2_55 , UpperCamelCase_ = True , UpperCamelCase_ = None , UpperCamelCase_ = None , **UpperCamelCase_ , ):
super().__init__(**UpperCamelCase_ )
__UpperCAmelCase : List[str] = size if size is not None else {"shortest_edge": 2_56}
__UpperCAmelCase : int = get_size_dict(UpperCamelCase_ , default_to_square=UpperCamelCase_ )
__UpperCAmelCase : Any = crop_size if crop_size is not None else {"height": 2_24, "width": 2_24}
__UpperCAmelCase : str = get_size_dict(UpperCamelCase_ )
__UpperCAmelCase : Union[str, Any] = do_resize
__UpperCAmelCase : Dict = size
__UpperCAmelCase : Dict = resample
__UpperCAmelCase : str = do_center_crop
__UpperCAmelCase : Union[str, Any] = crop_size
__UpperCAmelCase : List[str] = do_rescale
__UpperCAmelCase : List[Any] = rescale_factor
__UpperCAmelCase : List[Any] = do_normalize
__UpperCAmelCase : str = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__UpperCAmelCase : int = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = PILImageResampling.BICUBIC , UpperCamelCase_ = None , **UpperCamelCase_ , ):
__UpperCAmelCase : List[str] = get_size_dict(UpperCamelCase_ , default_to_square=UpperCamelCase_ )
if "shortest_edge" not in size:
raise ValueError(f"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" )
__UpperCAmelCase : Optional[int] = get_resize_output_image_size(UpperCamelCase_ , size=size["shortest_edge"] , default_to_square=UpperCamelCase_ )
return resize(UpperCamelCase_ , size=UpperCamelCase_ , resample=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ )
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = None , **UpperCamelCase_ , ):
__UpperCAmelCase : List[str] = get_size_dict(UpperCamelCase_ )
return center_crop(UpperCamelCase_ , size=(size["height"], size["width"]) , data_format=UpperCamelCase_ , **UpperCamelCase_ )
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = None , **UpperCamelCase_ ):
return rescale(UpperCamelCase_ , scale=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ )
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = None , **UpperCamelCase_ , ):
return normalize(UpperCamelCase_ , mean=UpperCamelCase_ , std=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ )
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = ChannelDimension.FIRST , **UpperCamelCase_ , ):
__UpperCAmelCase : List[Any] = do_resize if do_resize is not None else self.do_resize
__UpperCAmelCase : Union[str, Any] = size if size is not None else self.size
__UpperCAmelCase : Optional[int] = get_size_dict(UpperCamelCase_ , default_to_square=UpperCamelCase_ )
__UpperCAmelCase : Tuple = resample if resample is not None else self.resample
__UpperCAmelCase : List[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
__UpperCAmelCase : Dict = crop_size if crop_size is not None else self.crop_size
__UpperCAmelCase : List[Any] = get_size_dict(UpperCamelCase_ )
__UpperCAmelCase : int = do_rescale if do_rescale is not None else self.do_rescale
__UpperCAmelCase : Union[str, Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
__UpperCAmelCase : Dict = do_normalize if do_normalize is not None else self.do_normalize
__UpperCAmelCase : List[Any] = image_mean if image_mean is not None else self.image_mean
__UpperCAmelCase : Union[str, Any] = image_std if image_std is not None else self.image_std
__UpperCAmelCase : Optional[Any] = make_list_of_images(UpperCamelCase_ )
if not valid_images(UpperCamelCase_ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
__UpperCAmelCase : List[Any] = [to_numpy_array(UpperCamelCase_ ) for image in images]
if do_resize:
__UpperCAmelCase : Tuple = [self.resize(image=UpperCamelCase_ , size=UpperCamelCase_ , resample=UpperCamelCase_ ) for image in images]
if do_center_crop:
__UpperCAmelCase : Optional[int] = [self.center_crop(image=UpperCamelCase_ , size=UpperCamelCase_ ) for image in images]
if do_rescale:
__UpperCAmelCase : Optional[Any] = [self.rescale(image=UpperCamelCase_ , scale=UpperCamelCase_ ) for image in images]
if do_normalize:
__UpperCAmelCase : Optional[Any] = [self.normalize(image=UpperCamelCase_ , mean=UpperCamelCase_ , std=UpperCamelCase_ ) for image in images]
__UpperCAmelCase : List[Any] = [to_channel_dimension_format(UpperCamelCase_ , UpperCamelCase_ ) for image in images]
__UpperCAmelCase : Optional[Any] = {"pixel_values": images}
return BatchFeature(data=UpperCamelCase_ , tensor_type=UpperCamelCase_ )
| 707 | '''simple docstring'''
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_a : Optional[Any] = logging.get_logger(__name__)
_a : int = {"vocab_file": "vocab.json", "merges_file": "merges.txt"}
# See all BART models at https://huggingface.co/models?filter=bart
_a : Tuple = {
"vocab_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/vocab.json",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/vocab.json",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json",
},
"merges_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/merges.txt",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/merges.txt",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt",
},
}
_a : List[Any] = {
"facebook/bart-base": 1024,
"facebook/bart-large": 1024,
"facebook/bart-large-mnli": 1024,
"facebook/bart-large-cnn": 1024,
"facebook/bart-large-xsum": 1024,
"yjernite/bart_eli5": 1024,
}
@lru_cache()
def _lowercase ( ) -> List[Any]:
"""simple docstring"""
__UpperCAmelCase : Dict = (
list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) )
)
__UpperCAmelCase : Optional[Any] = bs[:]
__UpperCAmelCase : Optional[int] = 0
for b in range(2**8 ):
if b not in bs:
bs.append(lowerCamelCase__ )
cs.append(2**8 + n )
n += 1
__UpperCAmelCase : Dict = [chr(lowerCamelCase__ ) for n in cs]
return dict(zip(lowerCamelCase__ , lowerCamelCase__ ) )
def _lowercase ( lowerCamelCase__ ) -> str:
"""simple docstring"""
__UpperCAmelCase : Dict = set()
__UpperCAmelCase : Union[str, Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__UpperCAmelCase : Optional[Any] = char
return pairs
class __A (__magic_name__ ):
snake_case :Optional[int] = VOCAB_FILES_NAMES
snake_case :List[Any] = PRETRAINED_VOCAB_FILES_MAP
snake_case :Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case :Optional[int] = ["input_ids", "attention_mask"]
def __init__( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_="replace" , UpperCamelCase_="<s>" , UpperCamelCase_="</s>" , UpperCamelCase_="</s>" , UpperCamelCase_="<s>" , UpperCamelCase_="<unk>" , UpperCamelCase_="<pad>" , UpperCamelCase_="<mask>" , UpperCamelCase_=False , **UpperCamelCase_ , ):
__UpperCAmelCase : str = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else bos_token
__UpperCAmelCase : List[str] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else eos_token
__UpperCAmelCase : Optional[int] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else sep_token
__UpperCAmelCase : int = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else cls_token
__UpperCAmelCase : Optional[int] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else unk_token
__UpperCAmelCase : Dict = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
__UpperCAmelCase : Union[str, Any] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else mask_token
super().__init__(
errors=UpperCamelCase_ , bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , add_prefix_space=UpperCamelCase_ , **UpperCamelCase_ , )
with open(UpperCamelCase_ , encoding="utf-8" ) as vocab_handle:
__UpperCAmelCase : int = json.load(UpperCamelCase_ )
__UpperCAmelCase : Any = {v: k for k, v in self.encoder.items()}
__UpperCAmelCase : Any = errors # how to handle errors in decoding
__UpperCAmelCase : str = bytes_to_unicode()
__UpperCAmelCase : List[str] = {v: k for k, v in self.byte_encoder.items()}
with open(UpperCamelCase_ , encoding="utf-8" ) as merges_handle:
__UpperCAmelCase : str = merges_handle.read().split("\n" )[1:-1]
__UpperCAmelCase : List[str] = [tuple(merge.split() ) for merge in bpe_merges]
__UpperCAmelCase : Union[str, Any] = dict(zip(UpperCamelCase_ , range(len(UpperCamelCase_ ) ) ) )
__UpperCAmelCase : Optional[int] = {}
__UpperCAmelCase : Optional[int] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
__UpperCAmelCase : Dict = re.compile(r"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" )
@property
def _snake_case ( self ):
return len(self.encoder )
def _snake_case ( self ):
return dict(self.encoder , **self.added_tokens_encoder )
def _snake_case ( self , UpperCamelCase_ ):
if token in self.cache:
return self.cache[token]
__UpperCAmelCase : List[str] = tuple(UpperCamelCase_ )
__UpperCAmelCase : str = get_pairs(UpperCamelCase_ )
if not pairs:
return token
while True:
__UpperCAmelCase : str = min(UpperCamelCase_ , key=lambda UpperCamelCase_ : self.bpe_ranks.get(UpperCamelCase_ , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
__UpperCAmelCase , __UpperCAmelCase : List[Any] = bigram
__UpperCAmelCase : Any = []
__UpperCAmelCase : List[str] = 0
while i < len(UpperCamelCase_ ):
try:
__UpperCAmelCase : Union[str, Any] = word.index(UpperCamelCase_ , UpperCamelCase_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
__UpperCAmelCase : str = j
if word[i] == first and i < len(UpperCamelCase_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__UpperCAmelCase : Dict = tuple(UpperCamelCase_ )
__UpperCAmelCase : str = new_word
if len(UpperCamelCase_ ) == 1:
break
else:
__UpperCAmelCase : int = get_pairs(UpperCamelCase_ )
__UpperCAmelCase : Optional[int] = " ".join(UpperCamelCase_ )
__UpperCAmelCase : Dict = word
return word
def _snake_case ( self , UpperCamelCase_ ):
__UpperCAmelCase : Optional[Any] = []
for token in re.findall(self.pat , UpperCamelCase_ ):
__UpperCAmelCase : Any = "".join(
self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(UpperCamelCase_ ).split(" " ) )
return bpe_tokens
def _snake_case ( self , UpperCamelCase_ ):
return self.encoder.get(UpperCamelCase_ , self.encoder.get(self.unk_token ) )
def _snake_case ( self , UpperCamelCase_ ):
return self.decoder.get(UpperCamelCase_ )
def _snake_case ( self , UpperCamelCase_ ):
__UpperCAmelCase : List[str] = "".join(UpperCamelCase_ )
__UpperCAmelCase : Union[str, Any] = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors )
return text
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
if not os.path.isdir(UpperCamelCase_ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
__UpperCAmelCase : Any = os.path.join(
UpperCamelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
__UpperCAmelCase : Optional[int] = os.path.join(
UpperCamelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(UpperCamelCase_ , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=UpperCamelCase_ , ensure_ascii=UpperCamelCase_ ) + "\n" )
__UpperCAmelCase : str = 0
with open(UpperCamelCase_ , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda UpperCamelCase_ : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
" Please check that the tokenizer is not corrupted!" )
__UpperCAmelCase : str = token_index
writer.write(" ".join(UpperCamelCase_ ) + "\n" )
index += 1
return vocab_file, merge_file
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__UpperCAmelCase : List[Any] = [self.cls_token_id]
__UpperCAmelCase : Tuple = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase_ , token_ids_a=UpperCamelCase_ , already_has_special_tokens=UpperCamelCase_ )
if token_ids_a is None:
return [1] + ([0] * len(UpperCamelCase_ )) + [1]
return [1] + ([0] * len(UpperCamelCase_ )) + [1, 1] + ([0] * len(UpperCamelCase_ )) + [1]
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
__UpperCAmelCase : int = [self.sep_token_id]
__UpperCAmelCase : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_=False , **UpperCamelCase_ ):
__UpperCAmelCase : List[str] = kwargs.pop("add_prefix_space" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(UpperCamelCase_ ) > 0 and not text[0].isspace()):
__UpperCAmelCase : Tuple = " " + text
return (text, kwargs)
| 10 | 0 |
from __future__ import annotations
def snake_case( __magic_name__ , __magic_name__ ) -> list[list[int]]:
'''simple docstring'''
lowercase : list[list[int]] = []
lowercase : list[int] = []
lowercase : int = 0
lowercase : Union[str, Any] = sum(_UpperCAmelCase )
create_state_space_tree(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
return result
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , ) -> None:
'''simple docstring'''
if sum(_UpperCAmelCase ) > max_sum or (remaining_nums_sum + sum(_UpperCAmelCase )) < max_sum:
return
if sum(_UpperCAmelCase ) == max_sum:
result.append(_UpperCAmelCase )
return
for index in range(_UpperCAmelCase , len(_UpperCAmelCase ) ):
create_state_space_tree(
_UpperCAmelCase , _UpperCAmelCase , index + 1 , [*path, nums[index]] , _UpperCAmelCase , remaining_nums_sum - nums[index] , )
lowerCAmelCase_ = [3, 34, 4, 12, 5, 2]
lowerCAmelCase_ = 9
lowerCAmelCase_ = generate_sum_of_subsets_soln(nums, max_sum)
print(*result) | 217 |
def UpperCamelCase ( _UpperCAmelCase : str ) -> bool:
'''simple docstring'''
_lowercase : List[str] = [int(_UpperCAmelCase ) for i in ip_va_address.split("." ) if i.isdigit()]
return len(_UpperCAmelCase ) == 4 and all(0 <= int(_UpperCAmelCase ) <= 254 for octet in octets )
if __name__ == "__main__":
UpperCamelCase_ : Dict = input().strip()
UpperCamelCase_ : List[Any] = """valid""" if is_ip_va_address_valid(ip) else """invalid"""
print(f'''{ip} is a {valid_or_invalid} IP v4 address.''')
| 461 | 0 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, TensorType
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = {
'openai/imagegpt-small': '',
'openai/imagegpt-medium': '',
'openai/imagegpt-large': '',
}
class __UpperCAmelCase ( __A ):
"""simple docstring"""
_lowerCamelCase = """imagegpt"""
_lowerCamelCase = ["""past_key_values"""]
_lowerCamelCase = {
"""hidden_size""": """n_embd""",
"""max_position_embeddings""": """n_positions""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self , __A=512 + 1 , __A=32 * 32 , __A=512 , __A=24 , __A=8 , __A=None , __A="quick_gelu" , __A=0.1 , __A=0.1 , __A=0.1 , __A=1E-5 , __A=0.02 , __A=True , __A=True , __A=False , __A=False , __A=False , **__A , ):
__a = vocab_size
__a = n_positions
__a = n_embd
__a = n_layer
__a = n_head
__a = n_inner
__a = activation_function
__a = resid_pdrop
__a = embd_pdrop
__a = attn_pdrop
__a = layer_norm_epsilon
__a = initializer_range
__a = scale_attn_weights
__a = use_cache
__a = scale_attn_by_inverse_layer_idx
__a = reorder_and_upcast_attn
__a = tie_word_embeddings
super().__init__(tie_word_embeddings=__A , **__A )
class __UpperCAmelCase ( __A ):
"""simple docstring"""
@property
def snake_case_ ( self ):
return OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """sequence"""}),
] )
def snake_case_ ( self , __A , __A = 1 , __A = -1 , __A = False , __A = None , __A = 3 , __A = 32 , __A = 32 , ):
__a = self._generate_dummy_images(__A , __A , __A , __A )
__a = dict(preprocessor(images=__A , return_tensors=__A ) )
return inputs
| 209 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LDMTextToImagePipeline, UNetaDConditionModel
from diffusers.utils.testing_utils import (
enable_full_determinism,
load_numpy,
nightly,
require_torch_gpu,
slow,
torch_device,
)
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __UpperCAmelCase ( __A , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = LDMTextToImagePipeline
_lowerCamelCase = TEXT_TO_IMAGE_PARAMS - {
"""negative_prompt""",
"""negative_prompt_embeds""",
"""cross_attention_kwargs""",
"""prompt_embeds""",
}
_lowerCamelCase = PipelineTesterMixin.required_optional_params - {
"""num_images_per_prompt""",
"""callback""",
"""callback_steps""",
}
_lowerCamelCase = TEXT_TO_IMAGE_BATCH_PARAMS
_lowerCamelCase = False
def snake_case_ ( self ):
torch.manual_seed(0 )
__a = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
__a = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=__A , set_alpha_to_one=__A , )
torch.manual_seed(0 )
__a = AutoencoderKL(
block_out_channels=(32, 64) , in_channels=3 , out_channels=3 , down_block_types=("""DownEncoderBlock2D""", """DownEncoderBlock2D""") , up_block_types=("""UpDecoderBlock2D""", """UpDecoderBlock2D""") , latent_channels=4 , )
torch.manual_seed(0 )
__a = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
__a = CLIPTextModel(__A )
__a = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
__a = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vqvae""": vae,
"""bert""": text_encoder,
"""tokenizer""": tokenizer,
}
return components
def snake_case_ ( self , __A , __A=0 ):
if str(__A ).startswith("""mps""" ):
__a = torch.manual_seed(__A )
else:
__a = torch.Generator(device=__A ).manual_seed(__A )
__a = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def snake_case_ ( self ):
__a = """cpu""" # ensure determinism for the device-dependent torch.Generator
__a = self.get_dummy_components()
__a = LDMTextToImagePipeline(**__A )
pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
__a = self.get_dummy_inputs(__A )
__a = pipe(**__A ).images
__a = image[0, -3:, -3:, -1]
assert image.shape == (1, 16, 16, 3)
__a = np.array([0.6101, 0.6156, 0.5622, 0.4895, 0.6661, 0.3804, 0.5748, 0.6136, 0.5014] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
@slow
@require_torch_gpu
class __UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def snake_case_ ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case_ ( self , __A , __A=torch.floataa , __A=0 ):
__a = torch.manual_seed(__A )
__a = np.random.RandomState(__A ).standard_normal((1, 4, 32, 32) )
__a = torch.from_numpy(__A ).to(device=__A , dtype=__A )
__a = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def snake_case_ ( self ):
__a = LDMTextToImagePipeline.from_pretrained("""CompVis/ldm-text2im-large-256""" ).to(__A )
pipe.set_progress_bar_config(disable=__A )
__a = self.get_inputs(__A )
__a = pipe(**__A ).images
__a = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 256, 256, 3)
__a = np.array([0.51825, 0.52850, 0.52543, 0.54258, 0.52304, 0.52569, 0.54363, 0.55276, 0.56878] )
__a = np.abs(expected_slice - image_slice ).max()
assert max_diff < 1E-3
@nightly
@require_torch_gpu
class __UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def snake_case_ ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case_ ( self , __A , __A=torch.floataa , __A=0 ):
__a = torch.manual_seed(__A )
__a = np.random.RandomState(__A ).standard_normal((1, 4, 32, 32) )
__a = torch.from_numpy(__A ).to(device=__A , dtype=__A )
__a = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 50,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def snake_case_ ( self ):
__a = LDMTextToImagePipeline.from_pretrained("""CompVis/ldm-text2im-large-256""" ).to(__A )
pipe.set_progress_bar_config(disable=__A )
__a = self.get_inputs(__A )
__a = pipe(**__A ).images[0]
__a = load_numpy(
"""https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/ldm_text2img/ldm_large_256_ddim.npy""" )
__a = np.abs(expected_image - image ).max()
assert max_diff < 1E-3
| 209 | 1 |
"""simple docstring"""
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def __a ( A , A , A , A , A ) -> List[str]:
'''simple docstring'''
with open(A ) as metadata_file:
A__ = json.load(A )
A__ = LukeConfig(use_entity_aware_attention=A , **metadata["model_config"] )
# Load in the weights from the checkpoint_path
A__ = torch.load(A , map_location="cpu" )["module"]
# Load the entity vocab file
A__ = load_original_entity_vocab(A )
# add an entry for [MASK2]
A__ = max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
A__ = XLMRobertaTokenizer.from_pretrained(metadata["model_config"]["bert_model_name"] )
# Add special tokens to the token vocabulary for downstream tasks
A__ = AddedToken("<ent>" , lstrip=A , rstrip=A )
A__ = AddedToken("<ent2>" , lstrip=A , rstrip=A )
tokenizer.add_special_tokens({"additional_special_tokens": [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(f"""Saving tokenizer to {pytorch_dump_folder_path}""" )
tokenizer.save_pretrained(A )
with open(os.path.join(A , "tokenizer_config.json" ) , "r" ) as f:
A__ = json.load(A )
A__ = "MLukeTokenizer"
with open(os.path.join(A , "tokenizer_config.json" ) , "w" ) as f:
json.dump(A , A )
with open(os.path.join(A , MLukeTokenizer.vocab_files_names["entity_vocab_file"] ) , "w" ) as f:
json.dump(A , A )
A__ = MLukeTokenizer.from_pretrained(A )
# Initialize the embeddings of the special tokens
A__ = tokenizer.convert_tokens_to_ids(["@"] )[0]
A__ = tokenizer.convert_tokens_to_ids(["#"] )[0]
A__ = state_dict["embeddings.word_embeddings.weight"]
A__ = word_emb[ent_init_index].unsqueeze(0 )
A__ = word_emb[enta_init_index].unsqueeze(0 )
A__ = torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
A__ = state_dict[bias_name]
A__ = decoder_bias[ent_init_index].unsqueeze(0 )
A__ = decoder_bias[enta_init_index].unsqueeze(0 )
A__ = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
A__ = f"""encoder.layer.{layer_index}.attention.self."""
A__ = state_dict[prefix + matrix_name]
A__ = state_dict[prefix + matrix_name]
A__ = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
A__ = state_dict["entity_embeddings.entity_embeddings.weight"]
A__ = entity_emb[entity_vocab["[MASK]"]].unsqueeze(0 )
A__ = torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
A__ = state_dict["entity_predictions.bias"]
A__ = entity_prediction_bias[entity_vocab["[MASK]"]].unsqueeze(0 )
A__ = torch.cat([entity_prediction_bias, entity_mask_bias] )
A__ = LukeForMaskedLM(config=A ).eval()
state_dict.pop("entity_predictions.decoder.weight" )
state_dict.pop("lm_head.decoder.weight" )
state_dict.pop("lm_head.decoder.bias" )
A__ = OrderedDict()
for key, value in state_dict.items():
if not (key.startswith("lm_head" ) or key.startswith("entity_predictions" )):
A__ = state_dict[key]
else:
A__ = state_dict[key]
A__ , A__ = model.load_state_dict(A , strict=A )
if set(A ) != {"luke.embeddings.position_ids"}:
raise ValueError(f"""Unexpected unexpected_keys: {unexpected_keys}""" )
if set(A ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(f"""Unexpected missing_keys: {missing_keys}""" )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
A__ = MLukeTokenizer.from_pretrained(A , task="entity_classification" )
A__ = "ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan)."
A__ = (0, 9)
A__ = tokenizer(A , entity_spans=[span] , return_tensors="pt" )
A__ = model(**A )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
A__ = torch.Size((1, 33, 768) )
A__ = torch.tensor([[0.08_92, 0.05_96, -0.28_19], [0.01_34, 0.11_99, 0.05_73], [-0.01_69, 0.09_27, 0.06_44]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
f"""Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}""" )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , A , atol=1E-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
A__ = torch.Size((1, 1, 768) )
A__ = torch.tensor([[-0.14_82, 0.06_09, 0.03_22]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
f"""Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is"""
f""" {expected_shape}""" )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , A , atol=1E-4 ):
raise ValueError
# Verify masked word/entity prediction
A__ = MLukeTokenizer.from_pretrained(A )
A__ = "Tokyo is the capital of <mask>."
A__ = (24, 30)
A__ = tokenizer(A , entity_spans=[span] , return_tensors="pt" )
A__ = model(**A )
A__ = encoding["input_ids"][0].tolist()
A__ = input_ids.index(tokenizer.convert_tokens_to_ids("<mask>" ) )
A__ = outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(A )
A__ = outputs.entity_logits[0][0].argmax().item()
A__ = [
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith("en:" )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print("Saving PyTorch model to {}".format(A ) )
model.save_pretrained(A )
def __a ( A ) -> Any:
'''simple docstring'''
A__ = ["[MASK]", "[PAD]", "[UNK]"]
A__ = [json.loads(A ) for line in open(A )]
A__ = {}
for entry in data:
A__ = entry["id"]
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
A__ = entity_id
break
A__ = f"""{language}:{entity_name}"""
A__ = entity_id
return new_mapping
if __name__ == "__main__":
__UpperCAmelCase =argparse.ArgumentParser()
# Required parameters
parser.add_argument("""--checkpoint_path""", type=str, help="""Path to a pytorch_model.bin file.""")
parser.add_argument(
"""--metadata_path""", default=None, type=str, help="""Path to a metadata.json file, defining the configuration."""
)
parser.add_argument(
"""--entity_vocab_path""",
default=None,
type=str,
help="""Path to an entity_vocab.tsv file, containing the entity vocabulary.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to where to dump the output PyTorch model."""
)
parser.add_argument(
"""--model_size""", default="""base""", type=str, choices=["""base""", """large"""], help="""Size of the model to be converted."""
)
__UpperCAmelCase =parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
) | 337 |
"""simple docstring"""
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision import transforms
from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def __a ( A ) -> List[Any]:
'''simple docstring'''
A__ = [2, 2, 6, 2] if "tiny" in model_name else [2, 2, 18, 2]
A__ = True if "large" in model_name or "huge" in model_name else False
A__ = True if "large" in model_name or "huge" in model_name else False
A__ = True if "large" in model_name or "huge" in model_name else False
if "large" in model_name or "xlarge" in model_name or "huge" in model_name:
if "fl3" in model_name:
A__ = [3, 3, 3, 3]
A__ = [5, 5, 5, 5]
elif "fl4" in model_name:
A__ = [4, 4, 4, 4]
A__ = [3, 3, 3, 3]
if "tiny" in model_name or "small" in model_name or "base" in model_name:
A__ = [3, 3, 3, 3]
if "lrf" in model_name:
A__ = [3, 3, 3, 3]
else:
A__ = [2, 2, 2, 2]
if "tiny" in model_name:
A__ = 96
elif "small" in model_name:
A__ = 96
elif "base" in model_name:
A__ = 128
elif "large" in model_name:
A__ = 192
elif "xlarge" in model_name:
A__ = 256
elif "huge" in model_name:
A__ = 352
# set label information
A__ = "huggingface/label-files"
if "large" in model_name or "huge" in model_name:
A__ = "imagenet-22k-id2label.json"
else:
A__ = "imagenet-1k-id2label.json"
A__ = json.load(open(hf_hub_download(A , A , repo_type="dataset" ) , "r" ) )
A__ = {int(A ): v for k, v in idalabel.items()}
A__ = {v: k for k, v in idalabel.items()}
A__ = FocalNetConfig(
embed_dim=A , depths=A , focal_levels=A , focal_windows=A , use_conv_embed=A , idalabel=A , labelaid=A , use_post_layernorm=A , use_layerscale=A , )
return config
def __a ( A ) -> int:
'''simple docstring'''
if "patch_embed.proj" in name:
A__ = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
if "patch_embed.norm" in name:
A__ = name.replace("patch_embed.norm" , "embeddings.norm" )
if "layers" in name:
A__ = "encoder." + name
if "encoder.layers" in name:
A__ = name.replace("encoder.layers" , "encoder.stages" )
if "downsample.proj" in name:
A__ = name.replace("downsample.proj" , "downsample.projection" )
if "blocks" in name:
A__ = name.replace("blocks" , "layers" )
if "modulation.f.weight" in name or "modulation.f.bias" in name:
A__ = name.replace("modulation.f" , "modulation.projection_in" )
if "modulation.h.weight" in name or "modulation.h.bias" in name:
A__ = name.replace("modulation.h" , "modulation.projection_context" )
if "modulation.proj.weight" in name or "modulation.proj.bias" in name:
A__ = name.replace("modulation.proj" , "modulation.projection_out" )
if name == "norm.weight":
A__ = "layernorm.weight"
if name == "norm.bias":
A__ = "layernorm.bias"
if "head" in name:
A__ = name.replace("head" , "classifier" )
else:
A__ = "focalnet." + name
return name
def __a ( A , A , A=False ) -> Tuple:
'''simple docstring'''
A__ = {
"focalnet-tiny": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth",
"focalnet-tiny-lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth",
"focalnet-small": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth",
"focalnet-small-lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth",
"focalnet-base": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth",
"focalnet-base-lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth",
"focalnet-large-lrf-fl3": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth",
"focalnet-large-lrf-fl4": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth",
"focalnet-xlarge-lrf-fl3": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth",
"focalnet-xlarge-lrf-fl4": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth",
}
# fmt: on
A__ = model_name_to_url[model_name]
print("Checkpoint URL: " , A )
A__ = torch.hub.load_state_dict_from_url(A , map_location="cpu" )["model"]
# rename keys
for key in state_dict.copy().keys():
A__ = state_dict.pop(A )
A__ = val
A__ = get_focalnet_config(A )
A__ = FocalNetForImageClassification(A )
model.eval()
# load state dict
model.load_state_dict(A )
# verify conversion
A__ = "http://images.cocodataset.org/val2017/000000039769.jpg"
A__ = BitImageProcessor(
do_resize=A , size={"shortest_edge": 256} , resample=PILImageResampling.BILINEAR , do_center_crop=A , crop_size=224 , do_normalize=A , image_mean=A , image_std=A , )
A__ = Image.open(requests.get(A , stream=A ).raw )
A__ = processor(images=A , return_tensors="pt" )
A__ = transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(mean=[0.4_85, 0.4_56, 0.4_06] , std=[0.2_29, 0.2_24, 0.2_25] ),
] )
A__ = image_transforms(A ).unsqueeze(0 )
# verify pixel_values
assert torch.allclose(inputs.pixel_values , A , atol=1E-4 )
A__ = model(**A )
A__ = outputs.logits.argmax(-1 ).item()
print("Predicted class:" , model.config.idalabel[predicted_class_idx] )
print("First values of logits:" , outputs.logits[0, :3] )
if model_name == "focalnet-tiny":
A__ = torch.tensor([0.21_66, -0.43_68, 0.21_91] )
elif model_name == "focalnet-tiny-lrf":
A__ = torch.tensor([1.16_69, 0.01_25, -0.16_95] )
elif model_name == "focalnet-small":
A__ = torch.tensor([0.49_17, -0.04_30, 0.13_41] )
elif model_name == "focalnet-small-lrf":
A__ = torch.tensor([-0.25_88, -0.53_42, -0.23_31] )
elif model_name == "focalnet-base":
A__ = torch.tensor([-0.16_55, -0.40_90, -0.17_30] )
elif model_name == "focalnet-base-lrf":
A__ = torch.tensor([0.53_06, -0.04_83, -0.39_28] )
assert torch.allclose(outputs.logits[0, :3] , A , atol=1E-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(f"""Saving model and processor of {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(A )
processor.save_pretrained(A )
if push_to_hub:
print(f"""Pushing model and processor of {model_name} to the hub...""" )
model.push_to_hub(f"""{model_name}""" )
processor.push_to_hub(f"""{model_name}""" )
if __name__ == "__main__":
__UpperCAmelCase =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""focalnet-tiny""",
type=str,
help="""Name of the FocalNet model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether to push the model and processor to the hub.""",
)
__UpperCAmelCase =parser.parse_args()
convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub) | 337 | 1 |
'''simple docstring'''
from ...utils import is_torch_available, is_transformers_available
if is_transformers_available() and is_torch_available():
from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
| 640 |
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
UpperCamelCase__ = logging.get_logger(__name__) # pylint: disable=invalid-name
UpperCamelCase__ = '\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline\n >>> from diffusers.utils import load_image\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16\n ... )\n >>> pipe_prior.to("cuda")\n\n >>> prompt = "A red cartoon frog, 4k"\n >>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False)\n\n >>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16\n ... )\n >>> pipe.to("cuda")\n\n >>> init_image = load_image(\n ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"\n ... "/kandinsky/frog.png"\n ... )\n\n >>> image = pipe(\n ... image=init_image,\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=100,\n ... strength=0.2,\n ... ).images\n\n >>> image[0].save("red_frog.png")\n ```\n'
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=8 ):
"""simple docstring"""
lowercase_ : Union[str, Any] = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
lowercase_ : Union[str, Any] = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase=512 , _UpperCamelCase=512 ):
"""simple docstring"""
lowercase_ : Union[str, Any] = pil_image.resize((w, h) , resample=Image.BICUBIC , reducing_gap=1 )
lowercase_ : str = np.array(pil_image.convert("RGB" ) )
lowercase_ : Optional[int] = arr.astype(np.floataa ) / 127.5 - 1
lowercase_ : int = np.transpose(_UpperCamelCase , [2, 0, 1] )
lowercase_ : str = torch.from_numpy(_UpperCamelCase ).unsqueeze(0 )
return image
class _UpperCAmelCase ( snake_case ):
def __init__( self : List[Any] , a : UNetaDConditionModel , a : DDPMScheduler , a : VQModel , ):
'''simple docstring'''
super().__init__()
self.register_modules(
unet=a , scheduler=a , movq=a , )
lowercase_ : Dict = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def lowerCAmelCase__ ( self : Union[str, Any] , a : Tuple , a : List[str] , a : List[Any] ):
'''simple docstring'''
lowercase_ : Dict = min(int(num_inference_steps * strength ) , a )
lowercase_ : str = max(num_inference_steps - init_timestep , 0 )
lowercase_ : Tuple = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def lowerCAmelCase__ ( self : Union[str, Any] , a : int , a : List[Any] , a : Tuple , a : Union[str, Any] , a : int , a : Tuple , a : Optional[Any]=None ):
'''simple docstring'''
if not isinstance(a , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
f"""`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(a )}""" )
lowercase_ : str = image.to(device=a , dtype=a )
lowercase_ : Any = batch_size * num_images_per_prompt
if image.shape[1] == 4:
lowercase_ : str = image
else:
if isinstance(a , a ) and len(a ) != batch_size:
raise ValueError(
f"""You have passed a list of generators of length {len(a )}, but requested an effective batch"""
f""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
elif isinstance(a , a ):
lowercase_ : str = [
self.movq.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(a )
]
lowercase_ : List[Any] = torch.cat(a , dim=0 )
else:
lowercase_ : Tuple = self.movq.encode(a ).latent_dist.sample(a )
lowercase_ : Union[str, Any] = self.movq.config.scaling_factor * init_latents
lowercase_ : Tuple = torch.cat([init_latents] , dim=0 )
lowercase_ : List[Any] = init_latents.shape
lowercase_ : Union[str, Any] = randn_tensor(a , generator=a , device=a , dtype=a )
# get latents
lowercase_ : Dict = self.scheduler.add_noise(a , a , a )
lowercase_ : Tuple = init_latents
return latents
def lowerCAmelCase__ ( self : List[Any] , a : str=0 ):
'''simple docstring'''
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
lowercase_ : Optional[Any] = torch.device(f"""cuda:{gpu_id}""" )
lowercase_ : Optional[Any] = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(a , a )
def lowerCAmelCase__ ( self : Optional[int] , a : Union[str, Any]=0 ):
'''simple docstring'''
if is_accelerate_available() and is_accelerate_version(">=" , "0.17.0.dev0" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher." )
lowercase_ : Any = torch.device(f"""cuda:{gpu_id}""" )
if self.device.type != "cpu":
self.to("cpu" , silence_dtype_warnings=a )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
lowercase_ : Optional[int] = None
for cpu_offloaded_model in [self.unet, self.movq]:
lowercase_ , lowercase_ : Union[str, Any] = cpu_offload_with_hook(a , a , prev_module_hook=a )
# We'll offload the last model manually.
lowercase_ : List[Any] = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def lowerCAmelCase__ ( self : int ):
'''simple docstring'''
if not hasattr(self.unet , "_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(a , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(a )
def __call__( self : Optional[int] , a : Union[torch.FloatTensor, List[torch.FloatTensor]] , a : Union[torch.FloatTensor, PIL.Image.Image, List[torch.FloatTensor], List[PIL.Image.Image]] , a : Union[torch.FloatTensor, List[torch.FloatTensor]] , a : int = 5_1_2 , a : int = 5_1_2 , a : int = 1_0_0 , a : float = 4.0 , a : float = 0.3 , a : int = 1 , a : Optional[Union[torch.Generator, List[torch.Generator]]] = None , a : Optional[str] = "pil" , a : bool = True , ):
'''simple docstring'''
lowercase_ : Optional[int] = self._execution_device
lowercase_ : Dict = guidance_scale > 1.0
if isinstance(a , a ):
lowercase_ : Dict = torch.cat(a , dim=0 )
lowercase_ : Dict = image_embeds.shape[0]
if isinstance(a , a ):
lowercase_ : str = torch.cat(a , dim=0 )
if do_classifier_free_guidance:
lowercase_ : Optional[Any] = image_embeds.repeat_interleave(a , dim=0 )
lowercase_ : int = negative_image_embeds.repeat_interleave(a , dim=0 )
lowercase_ : int = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=a )
if not isinstance(a , a ):
lowercase_ : List[Any] = [image]
if not all(isinstance(a , (PIL.Image.Image, torch.Tensor) ) for i in image ):
raise ValueError(
f"""Input is in incorrect format: {[type(a ) for i in image]}. Currently, we only support PIL image and pytorch tensor""" )
lowercase_ : List[Any] = torch.cat([prepare_image(a , a , a ) for i in image] , dim=0 )
lowercase_ : List[Any] = image.to(dtype=image_embeds.dtype , device=a )
lowercase_ : Optional[int] = self.movq.encode(a )["latents"]
lowercase_ : Dict = latents.repeat_interleave(a , dim=0 )
self.scheduler.set_timesteps(a , device=a )
lowercase_ , lowercase_ : List[Any] = self.get_timesteps(a , a , a )
lowercase_ : Tuple = timesteps[:1].repeat(batch_size * num_images_per_prompt )
lowercase_ , lowercase_ : Optional[Any] = downscale_height_and_width(a , a , self.movq_scale_factor )
lowercase_ : Tuple = self.prepare_latents(
a , a , a , a , image_embeds.dtype , a , a )
for i, t in enumerate(self.progress_bar(a ) ):
# expand the latents if we are doing classifier free guidance
lowercase_ : Tuple = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowercase_ : int = {"image_embeds": image_embeds}
lowercase_ : Optional[int] = self.unet(
sample=a , timestep=a , encoder_hidden_states=a , added_cond_kwargs=a , return_dict=a , )[0]
if do_classifier_free_guidance:
lowercase_ , lowercase_ : List[Any] = noise_pred.split(latents.shape[1] , dim=1 )
lowercase_ , lowercase_ : int = noise_pred.chunk(2 )
lowercase_ , lowercase_ : Any = variance_pred.chunk(2 )
lowercase_ : Dict = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
lowercase_ : Optional[Any] = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , "variance_type" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
lowercase_ , lowercase_ : List[Any] = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
lowercase_ : Dict = self.scheduler.step(
a , a , a , generator=a , )[0]
# post-processing
lowercase_ : Optional[Any] = self.movq.decode(a , force_not_quantize=a )["sample"]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" )
if output_type in ["np", "pil"]:
lowercase_ : Tuple = image * 0.5 + 0.5
lowercase_ : Any = image.clamp(0 , 1 )
lowercase_ : Any = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
lowercase_ : Tuple = self.numpy_to_pil(a )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=a )
| 640 | 1 |
'''simple docstring'''
def A_( A : int , A : int):
if not isinstance(UpperCamelCase__ , UpperCamelCase__):
raise ValueError('iterations must be defined as integers')
if not isinstance(UpperCamelCase__ , UpperCamelCase__) or not number >= 1:
raise ValueError(
'starting number must be\n and integer and be more than 0')
if not iterations >= 1:
raise ValueError('Iterations must be done more than 0 times to play FizzBuzz')
UpperCamelCase = ''
while number <= iterations:
if number % 3 == 0:
out += "Fizz"
if number % 5 == 0:
out += "Buzz"
if 0 not in (number % 3, number % 5):
out += str(UpperCamelCase__)
# print(out)
number += 1
out += " "
return out
if __name__ == "__main__":
import doctest
doctest.testmod()
| 3 | import json
import os
import unittest
from transformers.models.roc_bert.tokenization_roc_bert import (
VOCAB_FILES_NAMES,
RoCBertBasicTokenizer,
RoCBertTokenizer,
RoCBertWordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class __lowercase ( A, unittest.TestCase ):
'''simple docstring'''
_A : Dict = RoCBertTokenizer
_A : Optional[Any] = None
_A : Dict = False
_A : List[str] = True
_A : Optional[int] = filter_non_english
def A_ ( self : int ):
super().setUp()
UpperCamelCase__ = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''你''', '''好''', '''是''', '''谁''', '''a''', '''b''', '''c''', '''d''']
UpperCamelCase__ = {}
UpperCamelCase__ = {}
for i, value in enumerate(_a ):
UpperCamelCase__ = i
UpperCamelCase__ = i
UpperCamelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
UpperCamelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''word_shape_file'''] )
UpperCamelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''word_pronunciation_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
with open(self.word_shape_file , '''w''' , encoding='''utf-8''' ) as word_shape_writer:
json.dump(_a , _a , ensure_ascii=_a )
with open(self.word_pronunciation_file , '''w''' , encoding='''utf-8''' ) as word_pronunciation_writer:
json.dump(_a , _a , ensure_ascii=_a )
def A_ ( self : List[str] ):
UpperCamelCase__ = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
UpperCamelCase__ = tokenizer.tokenize('''你好[SEP]你是谁''' )
self.assertListEqual(_a , ['''你''', '''好''', '''[SEP]''', '''你''', '''是''', '''谁'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_a ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_shape_ids(_a ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_pronunciation_ids(_a ) , [5, 6, 2, 5, 7, 8] )
def A_ ( self : Tuple ):
UpperCamelCase__ = RoCBertBasicTokenizer()
self.assertListEqual(tokenizer.tokenize('''ah\u535A\u63A8zz''' ) , ['''ah''', '''\u535A''', '''\u63A8''', '''zz'''] )
def A_ ( self : Optional[Any] ):
UpperCamelCase__ = RoCBertBasicTokenizer(do_lower_case=_a )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def A_ ( self : Optional[int] ):
UpperCamelCase__ = RoCBertBasicTokenizer(do_lower_case=_a , strip_accents=_a )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hällo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''h\u00E9llo'''] )
def A_ ( self : List[Any] ):
UpperCamelCase__ = RoCBertBasicTokenizer(do_lower_case=_a , strip_accents=_a )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def A_ ( self : Any ):
UpperCamelCase__ = RoCBertBasicTokenizer(do_lower_case=_a )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def A_ ( self : str ):
UpperCamelCase__ = RoCBertBasicTokenizer(do_lower_case=_a )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def A_ ( self : List[Any] ):
UpperCamelCase__ = RoCBertBasicTokenizer(do_lower_case=_a , strip_accents=_a )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HäLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def A_ ( self : Any ):
UpperCamelCase__ = RoCBertBasicTokenizer(do_lower_case=_a , strip_accents=_a )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HaLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def A_ ( self : Optional[int] ):
UpperCamelCase__ = RoCBertBasicTokenizer(do_lower_case=_a , never_split=['''[UNK]'''] )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? [UNK]''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?''', '''[UNK]'''] )
def A_ ( self : Optional[int] ):
UpperCamelCase__ = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''']
UpperCamelCase__ = {}
for i, token in enumerate(_a ):
UpperCamelCase__ = i
UpperCamelCase__ = RoCBertWordpieceTokenizer(vocab=_a , unk_token='''[UNK]''' )
self.assertListEqual(tokenizer.tokenize('''''' ) , [] )
self.assertListEqual(tokenizer.tokenize('''unwanted running''' ) , ['''un''', '''##want''', '''##ed''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.tokenize('''unwantedX running''' ) , ['''[UNK]''', '''runn''', '''##ing'''] )
def A_ ( self : Union[str, Any] ):
self.assertTrue(_is_whitespace(''' ''' ) )
self.assertTrue(_is_whitespace('''\t''' ) )
self.assertTrue(_is_whitespace('''\r''' ) )
self.assertTrue(_is_whitespace('''\n''' ) )
self.assertTrue(_is_whitespace('''\u00A0''' ) )
self.assertFalse(_is_whitespace('''A''' ) )
self.assertFalse(_is_whitespace('''-''' ) )
def A_ ( self : Optional[Any] ):
self.assertTrue(_is_control('''\u0005''' ) )
self.assertFalse(_is_control('''A''' ) )
self.assertFalse(_is_control(''' ''' ) )
self.assertFalse(_is_control('''\t''' ) )
self.assertFalse(_is_control('''\r''' ) )
def A_ ( self : List[str] ):
self.assertTrue(_is_punctuation('''-''' ) )
self.assertTrue(_is_punctuation('''$''' ) )
self.assertTrue(_is_punctuation('''`''' ) )
self.assertTrue(_is_punctuation('''.''' ) )
self.assertFalse(_is_punctuation('''A''' ) )
self.assertFalse(_is_punctuation(''' ''' ) )
def A_ ( self : Union[str, Any] ):
UpperCamelCase__ = self.get_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(_a ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] )
if self.test_rust_tokenizer:
UpperCamelCase__ = self.get_rust_tokenizer()
self.assertListEqual(
[rust_tokenizer.tokenize(_a ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] )
def A_ ( self : Tuple ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
UpperCamelCase__ = self.rust_tokenizer_class.from_pretrained(_a , **_a )
UpperCamelCase__ = F"""A, naïve {tokenizer_r.mask_token} AllenNLP sentence."""
UpperCamelCase__ = tokenizer_r.encode_plus(
_a , return_attention_mask=_a , return_token_type_ids=_a , return_offsets_mapping=_a , add_special_tokens=_a , )
UpperCamelCase__ = tokenizer_r.do_lower_case if hasattr(_a , '''do_lower_case''' ) else False
UpperCamelCase__ = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), '''A'''),
((1, 2), ''','''),
((3, 5), '''na'''),
((5, 6), '''##ï'''),
((6, 8), '''##ve'''),
((9, 15), tokenizer_r.mask_token),
((16, 21), '''Allen'''),
((21, 23), '''##NL'''),
((23, 24), '''##P'''),
((25, 33), '''sentence'''),
((33, 34), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), '''a'''),
((1, 2), ''','''),
((3, 8), '''naive'''),
((9, 15), tokenizer_r.mask_token),
((16, 21), '''allen'''),
((21, 23), '''##nl'''),
((23, 24), '''##p'''),
((25, 33), '''sentence'''),
((33, 34), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['''input_ids'''] ) )
self.assertEqual([e[0] for e in expected_results] , tokens['''offset_mapping'''] )
def A_ ( self : Any ):
UpperCamelCase__ = ['''的''', '''人''', '''有''']
UpperCamelCase__ = ''''''.join(_a )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
UpperCamelCase__ = True
UpperCamelCase__ = self.tokenizer_class.from_pretrained(_a , **_a )
UpperCamelCase__ = self.rust_tokenizer_class.from_pretrained(_a , **_a )
UpperCamelCase__ = tokenizer_p.encode(_a , add_special_tokens=_a )
UpperCamelCase__ = tokenizer_r.encode(_a , add_special_tokens=_a )
UpperCamelCase__ = tokenizer_r.convert_ids_to_tokens(_a )
UpperCamelCase__ = tokenizer_p.convert_ids_to_tokens(_a )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(_a , _a )
self.assertListEqual(_a , _a )
UpperCamelCase__ = False
UpperCamelCase__ = self.rust_tokenizer_class.from_pretrained(_a , **_a )
UpperCamelCase__ = self.tokenizer_class.from_pretrained(_a , **_a )
UpperCamelCase__ = tokenizer_r.encode(_a , add_special_tokens=_a )
UpperCamelCase__ = tokenizer_p.encode(_a , add_special_tokens=_a )
UpperCamelCase__ = tokenizer_r.convert_ids_to_tokens(_a )
UpperCamelCase__ = tokenizer_p.convert_ids_to_tokens(_a )
# it is expected that only the first Chinese character is not preceded by "##".
UpperCamelCase__ = [
F"""##{token}""" if idx != 0 else token for idx, token in enumerate(_a )
]
self.assertListEqual(_a , _a )
self.assertListEqual(_a , _a )
@slow
def A_ ( self : str ):
UpperCamelCase__ = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
UpperCamelCase__ = tokenizer.encode('''你好''' , add_special_tokens=_a )
UpperCamelCase__ = tokenizer.encode('''你是谁''' , add_special_tokens=_a )
UpperCamelCase__ = tokenizer.build_inputs_with_special_tokens(_a )
UpperCamelCase__ = tokenizer.build_inputs_with_special_tokens(_a , _a )
assert encoded_sentence == [1] + text + [2]
assert encoded_pair == [1] + text + [2] + text_a + [2]
def A_ ( self : List[Any] ):
UpperCamelCase__ = self.get_tokenizers(do_lower_case=_a )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
UpperCamelCase__ = '''你好,你是谁'''
UpperCamelCase__ = tokenizer.tokenize(_a )
UpperCamelCase__ = tokenizer.convert_tokens_to_ids(_a )
UpperCamelCase__ = tokenizer.convert_tokens_to_shape_ids(_a )
UpperCamelCase__ = tokenizer.convert_tokens_to_pronunciation_ids(_a )
UpperCamelCase__ = tokenizer.prepare_for_model(
_a , _a , _a , add_special_tokens=_a )
UpperCamelCase__ = tokenizer.encode_plus(_a , add_special_tokens=_a )
self.assertEqual(_a , _a )
| 240 | 0 |
"""simple docstring"""
import argparse
import json
import os
import torch
from torch import nn
from transformers import NllbMoeConfig, NllbMoeModel
from transformers.modeling_utils import dtype_byte_size
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
def _lowerCamelCase ( UpperCAmelCase_ : Union[str, Any] ) -> Tuple:
"""simple docstring"""
A__ = [
"encoder.version",
"decoder.version",
"model.encoder.version",
"model.decoder.version",
"decoder.output_projection.weight",
"_float_tensor",
"encoder.embed_positions._float_tensor",
"decoder.embed_positions._float_tensor",
]
for k in ignore_keys:
state_dict.pop(UpperCAmelCase_, UpperCAmelCase_ )
def _lowerCamelCase ( UpperCAmelCase_ : Optional[int] ) -> Optional[int]:
"""simple docstring"""
A__ , A__ = emb.weight.shape
A__ = nn.Linear(UpperCAmelCase_, UpperCAmelCase_, bias=UpperCAmelCase_ )
A__ = emb.weight.data
return lin_layer
def _lowerCamelCase ( UpperCAmelCase_ : Any, UpperCAmelCase_ : str=None ) -> Any:
"""simple docstring"""
A__ = {}
for old_key in state_dict.keys():
A__ = old_key
if "moe_layer.experts." in key:
if expert_idx is not None:
A__ = key.replace("moe_layer.experts.0", F"""ffn.experts.expert_{expert_idx}""" )
else:
A__ = key.replace("moe_layer.experts.", "ffn.experts.expert_" )
if "gate" in key:
A__ = key.replace(".moe_layer.gate.wg", ".ffn.router.classifier" )
if "fc2" and "experts" not in key:
A__ = key.replace(".fc2.", ".ffn.fc2." )
if "fc1" and "experts" not in key:
A__ = key.replace(".fc1.", ".ffn.fc1." )
if ".encoder_attn." in key:
A__ = key.replace(".encoder_attn.", ".cross_attention." )
if "encoder_attn_layer_norm" in key:
A__ = key.replace("encoder_attn_layer_norm", "cross_attention_layer_norm" )
if "final_layer_norm" in key:
A__ = key.replace("final_layer_norm", "ff_layer_norm" )
A__ = state_dict[old_key]
return new_dict
def _lowerCamelCase ( UpperCAmelCase_ : List[Any], UpperCAmelCase_ : Tuple, UpperCAmelCase_ : Optional[int], UpperCAmelCase_ : List[Any], UpperCAmelCase_ : str = WEIGHTS_NAME ) -> str:
"""simple docstring"""
A__ = []
A__ = 0
os.makedirs(UpperCAmelCase_, exist_ok=UpperCAmelCase_ )
for expert in range(UpperCAmelCase_ ):
A__ = switch_checkpoint_path + F"""-rank-{expert}.pt"""
if os.path.isfile(UpperCAmelCase_ ):
A__ = torch.load(UpperCAmelCase_ )["model"]
remove_ignore_keys_(UpperCAmelCase_ )
A__ = rename_fairseq_keys(UpperCAmelCase_, UpperCAmelCase_ )
A__ = os.path.join(
UpperCAmelCase_, weights_name.replace(".bin", F"""-{len(UpperCAmelCase_ )+1:05d}-of-???.bin""" ) )
torch.save(UpperCAmelCase_, UpperCAmelCase_ )
sharded_state_dicts.append(expert_state.keys() )
total_size += sum([value.numel() for key, value in expert_state.items()] ) * dtype_byte_size(
expert_state[list(UpperCAmelCase_ )[0]].dtype )
# Add the last block
A__ = os.path.join(UpperCAmelCase_, weights_name.replace(".bin", F"""-{len(UpperCAmelCase_ )+1:05d}-of-???.bin""" ) )
A__ = torch.load(switch_checkpoint_path + "-shared.pt" )["model"]
remove_ignore_keys_(UpperCAmelCase_ )
A__ = rename_fairseq_keys(UpperCAmelCase_, UpperCAmelCase_ )
A__ = shared_weights["decoder.embed_tokens.weight"]
sharded_state_dicts.append(shared_weights.keys() )
# If we only have the shared weights (dummy model/experts saved on the same file)
if len(UpperCAmelCase_ ) == 1:
A__ = os.path.join(UpperCAmelCase_, UpperCAmelCase_ )
torch.save(UpperCAmelCase_, UpperCAmelCase_ )
return {weights_name: sharded_state_dicts[0]}, None
else:
torch.save(UpperCAmelCase_, UpperCAmelCase_ )
# Otherwise, let's build the index
A__ = {}
for idx, shard in enumerate(UpperCAmelCase_ ):
A__ = weights_name.replace(".bin", F"""-{idx+1:05d}-of-{len(UpperCAmelCase_ ):05d}.bin""" )
A__ = os.path.join(UpperCAmelCase_, weights_name.replace(".bin", F"""-{idx+1:05d}-of-???.bin""" ) )
os.rename(UpperCAmelCase_, os.path.join(UpperCAmelCase_, UpperCAmelCase_ ) )
for key in shard:
A__ = shard_file
# Add the metadata
A__ = {"total_size": total_size}
A__ = {"metadata": metadata, "weight_map": weight_map}
with open(os.path.join(UpperCAmelCase_, UpperCAmelCase_ ), "w", encoding="utf-8" ) as f:
A__ = json.dumps(UpperCAmelCase_, indent=2, sort_keys=UpperCAmelCase_ ) + "\n"
f.write(UpperCAmelCase_ )
return metadata, index
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--nllb_moe_checkpoint_path""",
default="""/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000""",
type=str,
required=False,
help="""Path to a directory containing a folder per layer. Follows the original Google format.""",
)
parser.add_argument("""--dtype""", default="""float32""", type=str, required=False, help="""dtype of the saved model""")
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b""",
type=str,
required=False,
help="""Path to the output pytorch model.""",
)
UpperCamelCase = parser.parse_args()
UpperCamelCase , UpperCamelCase = shard_on_the_fly(
args.nllb_moe_checkpoint_path,
args.pytorch_dump_folder_path,
128,
args.dtype,
)
UpperCamelCase = NllbMoeConfig.from_pretrained(
"""facebook/nllb-200-3.3B""", encoder_sparse_step=4, decoder_sparse_step=4, num_experts=128
)
config.save_pretrained(args.pytorch_dump_folder_path)
UpperCamelCase = NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path)
print("""Done""")
model.save_pretrained(args.pytorch_dump_folder_path)
| 720 |
"""simple docstring"""
import os
import jsonlines
import numpy as np
from tqdm import tqdm
UpperCamelCase = 2048
UpperCamelCase = 4096
UpperCamelCase = 42
UpperCamelCase = os.environ.pop("""PROCESS_TRAIN""", """false""")
UpperCamelCase = {"""null""": 0, """short""": 1, """long""": 2, """yes""": 3, """no""": 4}
def _lowerCamelCase ( UpperCAmelCase_ : Any ) -> Optional[Any]:
"""simple docstring"""
def choose_first(UpperCAmelCase_ : Union[str, Any], UpperCAmelCase_ : str=False ):
assert isinstance(UpperCAmelCase_, UpperCAmelCase_ )
if len(UpperCAmelCase_ ) == 1:
A__ = answer[0]
return {k: [answer[k]] for k in answer} if is_long_answer else answer
for a in answer:
if is_long_answer:
A__ = {k: [a[k]] for k in a}
if len(a["start_token"] ) > 0:
break
return a
A__ = {"id": example["id"]}
A__ = example["annotations"]
A__ = annotation["yes_no_answer"]
if 0 in yes_no_answer or 1 in yes_no_answer:
A__ = ["yes"] if 1 in yes_no_answer else ["no"]
A__ = A__ = []
A__ = A__ = []
A__ = ["<cls>"]
else:
A__ = ["short"]
A__ = choose_first(annotation["short_answers"] )
if len(out["start_token"] ) == 0:
# answer will be long if short is not available
A__ = ["long"]
A__ = choose_first(annotation["long_answer"], is_long_answer=UpperCAmelCase_ )
A__ = []
answer.update(UpperCAmelCase_ )
# disregard some samples
if len(answer["start_token"] ) > 1 or answer["start_token"] == answer["end_token"]:
A__ = True
else:
A__ = False
A__ = ["start_token", "end_token", "start_byte", "end_byte", "text"]
if not all(isinstance(answer[k], UpperCAmelCase_ ) for k in cols ):
raise ValueError("Issue in ID", example["id"] )
return answer
def _lowerCamelCase ( UpperCAmelCase_ : Optional[int], UpperCAmelCase_ : Any=False ) -> Optional[Any]:
"""simple docstring"""
A__ = _get_single_answer(UpperCAmelCase_ )
# bytes are of no use
del answer["start_byte"]
del answer["end_byte"]
# handle yes_no answers explicitly
if answer["category"][0] in ["yes", "no"]: # category is list with one element
A__ = example["document"]["tokens"]
A__ = []
for i in range(len(doc["token"] ) ):
if not doc["is_html"][i]:
context.append(doc["token"][i] )
return {
"context": " ".join(UpperCAmelCase_ ),
"answer": {
"start_token": -100, # ignore index in cross-entropy
"end_token": -100, # ignore index in cross-entropy
"category": answer["category"],
"span": answer["category"], # extra
},
}
# later, help in removing all no answers
if answer["start_token"] == [-1]:
return {
"context": "None",
"answer": {
"start_token": -1,
"end_token": -1,
"category": "null",
"span": "None", # extra
},
}
# handling normal samples
A__ = ["start_token", "end_token"]
answer.update({k: answer[k][0] if len(answer[k] ) > 0 else answer[k] for k in cols} ) # e.g. [10] == 10
A__ = example["document"]["tokens"]
A__ = answer["start_token"]
A__ = answer["end_token"]
A__ = []
for i in range(len(doc["token"] ) ):
if not doc["is_html"][i]:
context.append(doc["token"][i] )
else:
if answer["start_token"] > i:
start_token -= 1
if answer["end_token"] > i:
end_token -= 1
A__ = " ".join(context[start_token:end_token] )
# checking above code
if assertion:
A__ = doc["is_html"][answer["start_token"] : answer["end_token"]]
A__ = doc["token"][answer["start_token"] : answer["end_token"]]
A__ = " ".join([old[i] for i in range(len(UpperCAmelCase_ ) ) if not is_html[i]] )
if new != old:
print("ID:", example["id"] )
print("New:", UpperCAmelCase_, end="\n" )
print("Old:", UpperCAmelCase_, end="\n\n" )
return {
"context": " ".join(UpperCAmelCase_ ),
"answer": {
"start_token": start_token,
"end_token": end_token - 1, # this makes it inclusive
"category": answer["category"], # either long or short
"span": new, # extra
},
}
def _lowerCamelCase ( UpperCAmelCase_ : List[str], UpperCAmelCase_ : Optional[int], UpperCAmelCase_ : List[str]=2048, UpperCAmelCase_ : Union[str, Any]=4096, UpperCAmelCase_ : Optional[int]=True ) -> str:
"""simple docstring"""
A__ = get_context_and_ans(UpperCAmelCase_, assertion=UpperCAmelCase_ )
A__ = out["answer"]
# later, removing these samples
if answer["start_token"] == -1:
return {
"example_id": example["id"],
"input_ids": [[-1]],
"labels": {
"start_token": [-1],
"end_token": [-1],
"category": ["null"],
},
}
A__ = tokenizer(example["question"]["text"], out["context"] ).input_ids
A__ = input_ids.index(tokenizer.sep_token_id ) + 1
# return yes/no
if answer["category"][0] in ["yes", "no"]: # category is list with one element
A__ = []
A__ = []
A__ = input_ids[:q_len]
A__ = range(UpperCAmelCase_, len(UpperCAmelCase_ ), max_length - doc_stride )
for i in doc_start_indices:
A__ = i + max_length - q_len
A__ = input_ids[i:end_index]
inputs.append(q_indices + slice )
category.append(answer["category"][0] )
if slice[-1] == tokenizer.sep_token_id:
break
return {
"example_id": example["id"],
"input_ids": inputs,
"labels": {
"start_token": [-100] * len(UpperCAmelCase_ ),
"end_token": [-100] * len(UpperCAmelCase_ ),
"category": category,
},
}
A__ = out["context"].split()
A__ = splitted_context[answer["end_token"]]
A__ = len(
tokenizer(
" ".join(splitted_context[: answer["start_token"]] ), add_special_tokens=UpperCAmelCase_, ).input_ids )
A__ = len(
tokenizer(" ".join(splitted_context[: answer["end_token"]] ), add_special_tokens=UpperCAmelCase_ ).input_ids )
answer["start_token"] += q_len
answer["end_token"] += q_len
# fixing end token
A__ = len(tokenizer(UpperCAmelCase_, add_special_tokens=UpperCAmelCase_ ).input_ids )
if num_sub_tokens > 1:
answer["end_token"] += num_sub_tokens - 1
A__ = input_ids[answer["start_token"] : answer["end_token"] + 1] # right & left are inclusive
A__ = answer["start_token"]
A__ = answer["end_token"]
if assertion:
A__ = tokenizer.decode(UpperCAmelCase_ )
if answer["span"] != new:
print("ISSUE IN TOKENIZATION" )
print("OLD:", answer["span"] )
print("NEW:", UpperCAmelCase_, end="\n\n" )
if len(UpperCAmelCase_ ) <= max_length:
return {
"example_id": example["id"],
"input_ids": [input_ids],
"labels": {
"start_token": [answer["start_token"]],
"end_token": [answer["end_token"]],
"category": answer["category"],
},
}
A__ = input_ids[:q_len]
A__ = range(UpperCAmelCase_, len(UpperCAmelCase_ ), max_length - doc_stride )
A__ = []
A__ = []
A__ = []
A__ = [] # null, yes, no, long, short
for i in doc_start_indices:
A__ = i + max_length - q_len
A__ = input_ids[i:end_index]
inputs.append(q_indices + slice )
assert len(inputs[-1] ) <= max_length, "Issue in truncating length"
if start_token >= i and end_token <= end_index - 1:
A__ = start_token - i + q_len
A__ = end_token - i + q_len
answers_category.append(answer["category"][0] ) # ["short"] -> "short"
else:
A__ = -100
A__ = -100
answers_category.append("null" )
A__ = inputs[-1][start_token : end_token + 1]
answers_start_token.append(UpperCAmelCase_ )
answers_end_token.append(UpperCAmelCase_ )
if assertion:
if new != old and new != [tokenizer.cls_token_id]:
print("ISSUE in strided for ID:", example["id"] )
print("New:", tokenizer.decode(UpperCAmelCase_ ) )
print("Old:", tokenizer.decode(UpperCAmelCase_ ), end="\n\n" )
if slice[-1] == tokenizer.sep_token_id:
break
return {
"example_id": example["id"],
"input_ids": inputs,
"labels": {
"start_token": answers_start_token,
"end_token": answers_end_token,
"category": answers_category,
},
}
def _lowerCamelCase ( UpperCAmelCase_ : List[Any], UpperCAmelCase_ : str, UpperCAmelCase_ : Union[str, Any]=2048, UpperCAmelCase_ : Tuple=4096, UpperCAmelCase_ : int=False ) -> List[Any]:
"""simple docstring"""
A__ = get_strided_contexts_and_ans(
UpperCAmelCase_, UpperCAmelCase_, doc_stride=UpperCAmelCase_, max_length=UpperCAmelCase_, assertion=UpperCAmelCase_, )
return example
def _lowerCamelCase ( UpperCAmelCase_ : List[str], UpperCAmelCase_ : str ) -> Optional[Any]:
"""simple docstring"""
with jsonlines.open(UpperCAmelCase_, "a" ) as writer:
for example in tqdm(UpperCAmelCase_, total=len(UpperCAmelCase_ ), desc="Saving samples ... " ):
A__ = example["labels"]
for ids, start, end, cat in zip(
example["input_ids"], labels["start_token"], labels["end_token"], labels["category"], ):
if start == -1 and end == -1:
continue # leave waste samples with no answer
if cat == "null" and np.random.rand() < 0.6:
continue # removing 50 % samples
writer.write(
{
"input_ids": ids,
"start_token": start,
"end_token": end,
"category": CATEGORY_MAPPING[cat],
} )
if __name__ == "__main__":
from datasets import load_dataset
from transformers import BigBirdTokenizer
UpperCamelCase = load_dataset("""natural_questions""")
UpperCamelCase = BigBirdTokenizer.from_pretrained("""google/bigbird-roberta-base""")
UpperCamelCase = data["""train""" if PROCESS_TRAIN == """true""" else """validation"""]
UpperCamelCase = {
"""tokenizer""": tokenizer,
"""doc_stride""": DOC_STRIDE,
"""max_length""": MAX_LENGTH,
"""assertion""": False,
}
UpperCamelCase = data.map(prepare_inputs, fn_kwargs=fn_kwargs)
UpperCamelCase = data.remove_columns(["""annotations""", """document""", """id""", """question"""])
print(data)
np.random.seed(SEED)
UpperCamelCase = """nq-training.jsonl""" if PROCESS_TRAIN == """true""" else """nq-validation.jsonl"""
save_to_disk(data, file_name=cache_file_name)
| 562 | 0 |
"""simple docstring"""
import hashlib
import unittest
from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available
from transformers.pipelines import DepthEstimationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
else:
class a__ :
@staticmethod
def __magic_name__ ( *_a , **_a ):
pass
def __magic_name__ ( __snake_case : int ) -> str:
lowercase : List[Any] = hashlib.mda(image.tobytes() )
return m.hexdigest()
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class a__ ( unittest.TestCase ):
__lowerCAmelCase = MODEL_FOR_DEPTH_ESTIMATION_MAPPING
def __magic_name__ ( self , _a , _a , _a ):
lowercase : Optional[int] = DepthEstimationPipeline(model=a_ , image_processor=a_ )
return depth_estimator, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def __magic_name__ ( self , _a , _a ):
lowercase : List[str] = depth_estimator("./tests/fixtures/tests_samples/COCO/000000039769.png" )
self.assertEqual({"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )} , a_ )
import datasets
lowercase : Tuple = datasets.load_dataset("hf-internal-testing/fixtures_image_utils" , "image" , split="test" )
lowercase : Any = depth_estimator(
[
Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ),
"http://images.cocodataset.org/val2017/000000039769.jpg",
# RGBA
dataset[0]["file"],
# LA
dataset[1]["file"],
# L
dataset[2]["file"],
] )
self.assertEqual(
[
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
] , a_ , )
@require_tf
@unittest.skip("Depth estimation is not implemented in TF" )
def __magic_name__ ( self ):
pass
@slow
@require_torch
def __magic_name__ ( self ):
lowercase : Any = """Intel/dpt-large"""
lowercase : Optional[int] = pipeline("depth-estimation" , model=a_ )
lowercase : Optional[Any] = depth_estimator("http://images.cocodataset.org/val2017/000000039769.jpg" )
lowercase : Union[str, Any] = hashimage(outputs["depth"] )
# This seems flaky.
# self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977")
self.assertEqual(nested_simplify(outputs["predicted_depth"].max().item() ) , 2_9.3_0_4 )
self.assertEqual(nested_simplify(outputs["predicted_depth"].min().item() ) , 2.6_6_2 )
@require_torch
def __magic_name__ ( self ):
self.skipTest("There is not hf-internal-testing tiny model for either GLPN nor DPT" )
| 361 |
"""simple docstring"""
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class UpperCAmelCase_ (lowerCamelCase_ ):
"""simple docstring"""
UpperCamelCase_ : str = ["""image_processor""", """tokenizer"""]
UpperCamelCase_ : List[str] = """OwlViTImageProcessor"""
UpperCamelCase_ : str = ("""CLIPTokenizer""", """CLIPTokenizerFast""")
def __init__( self : Any , a_ : Any=None , a_ : str=None , **a_ : List[str] )-> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : List[Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , a_ , )
UpperCAmelCase_ : List[Any] = kwargs.pop("""feature_extractor""" )
UpperCAmelCase_ : str = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(a_ , a_ )
def __call__( self : Any , a_ : Optional[int]=None , a_ : Optional[int]=None , a_ : Dict=None , a_ : int="max_length" , a_ : List[Any]="np" , **a_ : int )-> Tuple:
"""simple docstring"""
if text is None and query_images is None and images is None:
raise ValueError(
"""You have to specify at least one text or query image or image. All three cannot be none.""" )
if text is not None:
if isinstance(a_ , a_ ) or (isinstance(a_ , a_ ) and not isinstance(text[0] , a_ )):
UpperCAmelCase_ : List[str] = [self.tokenizer(a_ , padding=a_ , return_tensors=a_ , **a_ )]
elif isinstance(a_ , a_ ) and isinstance(text[0] , a_ ):
UpperCAmelCase_ : Optional[int] = []
# Maximum number of queries across batch
UpperCAmelCase_ : Union[str, Any] = max([len(a_ ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(a_ ) != max_num_queries:
UpperCAmelCase_ : str = t + [""" """] * (max_num_queries - len(a_ ))
UpperCAmelCase_ : Optional[int] = self.tokenizer(a_ , padding=a_ , return_tensors=a_ , **a_ )
encodings.append(a_ )
else:
raise TypeError("""Input text should be a string, a list of strings or a nested list of strings""" )
if return_tensors == "np":
UpperCAmelCase_ : List[Any] = np.concatenate([encoding["""input_ids"""] for encoding in encodings] , axis=0 )
UpperCAmelCase_ : Tuple = np.concatenate([encoding["""attention_mask"""] for encoding in encodings] , axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
UpperCAmelCase_ : Tuple = jnp.concatenate([encoding["""input_ids"""] for encoding in encodings] , axis=0 )
UpperCAmelCase_ : Tuple = jnp.concatenate([encoding["""attention_mask"""] for encoding in encodings] , axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
UpperCAmelCase_ : str = torch.cat([encoding["""input_ids"""] for encoding in encodings] , dim=0 )
UpperCAmelCase_ : str = torch.cat([encoding["""attention_mask"""] for encoding in encodings] , dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
UpperCAmelCase_ : Union[str, Any] = tf.stack([encoding["""input_ids"""] for encoding in encodings] , axis=0 )
UpperCAmelCase_ : Optional[int] = tf.stack([encoding["""attention_mask"""] for encoding in encodings] , axis=0 )
else:
raise ValueError("""Target return tensor type could not be returned""" )
UpperCAmelCase_ : Union[str, Any] = BatchEncoding()
UpperCAmelCase_ : int = input_ids
UpperCAmelCase_ : List[str] = attention_mask
if query_images is not None:
UpperCAmelCase_ : Optional[int] = BatchEncoding()
UpperCAmelCase_ : Any = self.image_processor(
a_ , return_tensors=a_ , **a_ ).pixel_values
UpperCAmelCase_ : Optional[Any] = query_pixel_values
if images is not None:
UpperCAmelCase_ : str = self.image_processor(a_ , return_tensors=a_ , **a_ )
if text is not None and images is not None:
UpperCAmelCase_ : List[Any] = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
UpperCAmelCase_ : int = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**a_ ) , tensor_type=a_ )
def a ( self : Any , *a_ : Optional[Any] , **a_ : List[str] )-> Union[str, Any]:
"""simple docstring"""
return self.image_processor.post_process(*a_ , **a_ )
def a ( self : Tuple , *a_ : List[str] , **a_ : Dict )-> Optional[int]:
"""simple docstring"""
return self.image_processor.post_process_object_detection(*a_ , **a_ )
def a ( self : Optional[int] , *a_ : Tuple , **a_ : Optional[int] )-> Optional[Any]:
"""simple docstring"""
return self.image_processor.post_process_image_guided_detection(*a_ , **a_ )
def a ( self : str , *a_ : Optional[int] , **a_ : str )-> Optional[int]:
"""simple docstring"""
return self.tokenizer.batch_decode(*a_ , **a_ )
def a ( self : str , *a_ : List[Any] , **a_ : List[str] )-> Union[str, Any]:
"""simple docstring"""
return self.tokenizer.decode(*a_ , **a_ )
@property
def a ( self : Tuple )-> int:
"""simple docstring"""
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , a_ , )
return self.image_processor_class
@property
def a ( self : Optional[Any] )-> int:
"""simple docstring"""
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , a_ , )
return self.image_processor
| 470 | 0 |
'''simple docstring'''
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
AutoConfig,
AutoImageProcessor,
CLIPConfig,
CLIPImageProcessor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __magic_name__ ( self : str ) -> str:
SCREAMING_SNAKE_CASE__ : Any =0
def __magic_name__ ( self : List[str] ) -> Tuple:
SCREAMING_SNAKE_CASE__ : List[Any] =AutoImageProcessor.from_pretrained('''openai/clip-vit-base-patch32''' )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
def __magic_name__ ( self : str ) -> Union[str, Any]:
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE__ : Tuple =Path(UpperCAmelCase_ ) / '''preprocessor_config.json'''
SCREAMING_SNAKE_CASE__ : Any =Path(UpperCAmelCase_ ) / '''config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(UpperCAmelCase_ , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(UpperCAmelCase_ , '''w''' ) )
SCREAMING_SNAKE_CASE__ : Any =AutoImageProcessor.from_pretrained(UpperCAmelCase_ )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
def __magic_name__ ( self : Optional[int] ) -> Any:
# Ensure we can load the image processor from the feature extractor config
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE__ : int =Path(UpperCAmelCase_ ) / '''preprocessor_config.json'''
SCREAMING_SNAKE_CASE__ : Optional[int] =Path(UpperCAmelCase_ ) / '''config.json'''
json.dump(
{'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(UpperCAmelCase_ , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(UpperCAmelCase_ , '''w''' ) )
SCREAMING_SNAKE_CASE__ : str =AutoImageProcessor.from_pretrained(UpperCAmelCase_ )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
def __magic_name__ ( self : List[str] ) -> Tuple:
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE__ : Dict =CLIPConfig()
# Create a dummy config file with image_proceesor_type
SCREAMING_SNAKE_CASE__ : List[str] =Path(UpperCAmelCase_ ) / '''preprocessor_config.json'''
SCREAMING_SNAKE_CASE__ : str =Path(UpperCAmelCase_ ) / '''config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(UpperCAmelCase_ , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(UpperCAmelCase_ , '''w''' ) )
# remove image_processor_type to make sure config.json alone is enough to load image processor locally
SCREAMING_SNAKE_CASE__ : Dict =AutoImageProcessor.from_pretrained(UpperCAmelCase_ ).to_dict()
config_dict.pop('''image_processor_type''' )
SCREAMING_SNAKE_CASE__ : Any =CLIPImageProcessor(**UpperCAmelCase_ )
# save in new folder
model_config.save_pretrained(UpperCAmelCase_ )
config.save_pretrained(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ : Optional[int] =AutoImageProcessor.from_pretrained(UpperCAmelCase_ )
# make sure private variable is not incorrectly saved
SCREAMING_SNAKE_CASE__ : List[str] =json.loads(config.to_json_string() )
self.assertTrue('''_processor_class''' not in dict_as_saved )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
def __magic_name__ ( self : List[str] ) -> str:
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE__ : Optional[Any] =Path(UpperCAmelCase_ ) / '''preprocessor_config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(UpperCAmelCase_ , '''w''' ) , )
SCREAMING_SNAKE_CASE__ : Any =AutoImageProcessor.from_pretrained(UpperCAmelCase_ )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
def __magic_name__ ( self : Any ) -> str:
with self.assertRaisesRegex(
UpperCAmelCase_ , '''clip-base is not a local folder and is not a valid model identifier''' ):
SCREAMING_SNAKE_CASE__ : Tuple =AutoImageProcessor.from_pretrained('''clip-base''' )
def __magic_name__ ( self : Optional[Any] ) -> Optional[int]:
with self.assertRaisesRegex(
UpperCAmelCase_ , r'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] =AutoImageProcessor.from_pretrained(UpperCAmelCase_ , revision='''aaaaaa''' )
def __magic_name__ ( self : List[str] ) -> List[Any]:
with self.assertRaisesRegex(
UpperCAmelCase_ , '''hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.''' , ):
SCREAMING_SNAKE_CASE__ : str =AutoImageProcessor.from_pretrained('''hf-internal-testing/config-no-model''' )
def __magic_name__ ( self : Optional[Any] ) -> Optional[int]:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(UpperCAmelCase_ ):
SCREAMING_SNAKE_CASE__ : List[str] =AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(UpperCAmelCase_ ):
SCREAMING_SNAKE_CASE__ : Optional[Any] =AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ : Any =AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=UpperCAmelCase_ )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
# Test image processor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ : Optional[int] =AutoImageProcessor.from_pretrained(UpperCAmelCase_ , trust_remote_code=UpperCAmelCase_ )
self.assertEqual(reloaded_image_processor.__class__.__name__ , '''NewImageProcessor''' )
def __magic_name__ ( self : List[Any] ) -> Tuple:
try:
AutoConfig.register('''custom''' , UpperCAmelCase_ )
AutoImageProcessor.register(UpperCAmelCase_ , UpperCAmelCase_ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(UpperCAmelCase_ ):
AutoImageProcessor.register(UpperCAmelCase_ , UpperCAmelCase_ )
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE__ : Optional[int] =Path(UpperCAmelCase_ ) / '''preprocessor_config.json'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] =Path(UpperCAmelCase_ ) / '''config.json'''
json.dump(
{'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(UpperCAmelCase_ , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(UpperCAmelCase_ , '''w''' ) )
SCREAMING_SNAKE_CASE__ : Any =CustomImageProcessor.from_pretrained(UpperCAmelCase_ )
# Now that the config is registered, it can be used as any other config with the auto-API
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ : Optional[Any] =AutoImageProcessor.from_pretrained(UpperCAmelCase_ )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
def __magic_name__ ( self : Union[str, Any] ) -> str:
class __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
snake_case_ = True
try:
AutoConfig.register('''custom''' , UpperCAmelCase_ )
AutoImageProcessor.register(UpperCAmelCase_ , UpperCAmelCase_ )
# If remote code is not set, the default is to use local
SCREAMING_SNAKE_CASE__ : List[Any] =AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(image_processor.is_local )
# If remote code is disabled, we load the local one.
SCREAMING_SNAKE_CASE__ : List[Any] =AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=UpperCAmelCase_ )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(image_processor.is_local )
# If remote is enabled, we load from the Hub
SCREAMING_SNAKE_CASE__ : str =AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=UpperCAmelCase_ )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(not hasattr(UpperCAmelCase_ , '''is_local''' ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig] | 711 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
'EleutherAI/gpt-neox-20b': 'https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json',
# See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox
}
class __SCREAMING_SNAKE_CASE ( lowerCamelCase ):
snake_case_ = """gpt_neox"""
def __init__( self : List[Any] , __lowercase : Union[str, Any]=5_04_32 , __lowercase : int=61_44 , __lowercase : Tuple=44 , __lowercase : List[str]=64 , __lowercase : str=2_45_76 , __lowercase : Dict="gelu" , __lowercase : Tuple=0.25 , __lowercase : Tuple=1_00_00 , __lowercase : Tuple=0.0 , __lowercase : str=0.0 , __lowercase : List[Any]=0.1 , __lowercase : Dict=20_48 , __lowercase : Any=0.02 , __lowercase : Dict=1e-5 , __lowercase : List[Any]=True , __lowercase : str=0 , __lowercase : Optional[Any]=2 , __lowercase : Tuple=False , __lowercase : List[Any]=True , __lowercase : Optional[Any]=None , **__lowercase : Any , ) -> Dict:
super().__init__(bos_token_id=__lowercase , eos_token_id=__lowercase , **__lowercase )
SCREAMING_SNAKE_CASE__ : Optional[int] =vocab_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] =max_position_embeddings
SCREAMING_SNAKE_CASE__ : Any =hidden_size
SCREAMING_SNAKE_CASE__ : str =num_hidden_layers
SCREAMING_SNAKE_CASE__ : Any =num_attention_heads
SCREAMING_SNAKE_CASE__ : Union[str, Any] =intermediate_size
SCREAMING_SNAKE_CASE__ : Dict =hidden_act
SCREAMING_SNAKE_CASE__ : str =rotary_pct
SCREAMING_SNAKE_CASE__ : Optional[Any] =rotary_emb_base
SCREAMING_SNAKE_CASE__ : List[Any] =attention_dropout
SCREAMING_SNAKE_CASE__ : List[Any] =hidden_dropout
SCREAMING_SNAKE_CASE__ : str =classifier_dropout
SCREAMING_SNAKE_CASE__ : Any =initializer_range
SCREAMING_SNAKE_CASE__ : Dict =layer_norm_eps
SCREAMING_SNAKE_CASE__ : Any =use_cache
SCREAMING_SNAKE_CASE__ : Tuple =tie_word_embeddings
SCREAMING_SNAKE_CASE__ : Tuple =use_parallel_residual
SCREAMING_SNAKE_CASE__ : Union[str, Any] =rope_scaling
self._rope_scaling_validation()
if self.hidden_size % self.num_attention_heads != 0:
raise ValueError(
'''The hidden size is not divisble by the number of attention heads! Make sure to update them!''' )
def __magic_name__ ( self : Optional[Any] ) -> Optional[Any]:
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , __lowercase ) or len(self.rope_scaling ) != 2:
raise ValueError(
'''`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '''
F"got {self.rope_scaling}" )
SCREAMING_SNAKE_CASE__ : int =self.rope_scaling.get('''type''' , __lowercase )
SCREAMING_SNAKE_CASE__ : Optional[int] =self.rope_scaling.get('''factor''' , __lowercase )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F"`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}" )
if rope_scaling_factor is None or not isinstance(__lowercase , __lowercase ) or rope_scaling_factor <= 1.0:
raise ValueError(F"`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}" ) | 665 | 0 |
import re
import tempfile
from pathlib import Path
import pytest
import yaml
from datasets.utils.readme import ReadMe
# @pytest.fixture
# def example_yaml_structure():
_a : Union[str, Any] = yaml.safe_load(
'\\nname: ""\nallow_empty: false\nallow_empty_text: true\nsubsections:\n - name: "Dataset Card for X" # First-level markdown heading\n allow_empty: false\n allow_empty_text: true\n subsections:\n - name: "Table of Contents"\n allow_empty: false\n allow_empty_text: false\n subsections: null\n - name: "Dataset Description"\n allow_empty: false\n allow_empty_text: false\n subsections:\n - name: "Dataset Summary"\n allow_empty: false\n allow_empty_text: false\n subsections: null\n - name: "Supported Tasks and Leaderboards"\n allow_empty: true\n allow_empty_text: true\n subsections: null\n - name: Languages\n allow_empty: false\n allow_empty_text: true\n subsections: null\n'
)
_a : int = {
'name': 'root',
'text': '',
'is_empty_text': True,
'subsections': [
{
'name': 'Dataset Card for My Dataset',
'text': '',
'is_empty_text': True,
'subsections': [
{'name': 'Table of Contents', 'text': 'Some text here.', 'is_empty_text': False, 'subsections': []},
{
'name': 'Dataset Description',
'text': 'Some text here.',
'is_empty_text': False,
'subsections': [
{
'name': 'Dataset Summary',
'text': 'Some text here.',
'is_empty_text': False,
'subsections': [],
},
{
'name': 'Supported Tasks and Leaderboards',
'text': '',
'is_empty_text': True,
'subsections': [],
},
{'name': 'Languages', 'text': 'Language Text', 'is_empty_text': False, 'subsections': []},
],
},
],
}
],
}
_a : int = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n'
_a : Optional[int] = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n#### Extra Ignored Subsection\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n'
_a : int = {
'name': 'root',
'text': '',
'is_empty_text': True,
'subsections': [
{
'name': 'Dataset Card for My Dataset',
'text': '',
'is_empty_text': True,
'subsections': [
{'name': 'Table of Contents', 'text': 'Some text here.', 'is_empty_text': False, 'subsections': []},
{
'name': 'Dataset Description',
'text': 'Some text here.',
'is_empty_text': False,
'subsections': [
{
'name': 'Dataset Summary',
'text': 'Some text here.',
'is_empty_text': False,
'subsections': [
{
'name': 'Extra Ignored Subsection',
'text': '',
'is_empty_text': True,
'subsections': [],
}
],
},
{
'name': 'Supported Tasks and Leaderboards',
'text': '',
'is_empty_text': True,
'subsections': [],
},
{'name': 'Languages', 'text': 'Language Text', 'is_empty_text': False, 'subsections': []},
],
},
],
}
],
}
_a : Optional[int] = '\\n---\n---\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n'
_a : Optional[Any] = (
'The following issues were found for the README at `{path}`:\n-\tEmpty YAML markers are present in the README.'
)
_a : str = '\\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n'
_a : Any = (
'The following issues were found for the README at `{path}`:\n-\tNo YAML markers are present in the README.'
)
_a : str = '\\n---\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n'
_a : Any = 'The following issues were found for the README at `{path}`:\n-\tOnly the start of YAML tags present in the README.'
_a : Optional[int] = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n'
_a : Optional[Any] = 'The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Summary` but it is empty.\n-\tExpected some text in section `Dataset Summary` but it is empty (text in subsections are ignored).'
_a : Dict = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n'
_a : Union[str, Any] = 'The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Card for My Dataset` but it is empty.\n-\tSection `Dataset Card for My Dataset` expected the following subsections: `Table of Contents`, `Dataset Description`. Found \'None\'.'
_a : Tuple = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Languages\nLanguage Text\n'
_a : Any = 'The following issues were found for the README at `{path}`:\n-\tSection `Dataset Description` is missing subsection: `Supported Tasks and Leaderboards`.'
_a : Dict = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\n'
_a : Optional[int] = 'The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Languages` but it is empty.'
_a : List[str] = '\\n---\nlanguage:\n- zh\n- en\n---\n\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n'
_a : Optional[int] = 'The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.'
_a : int = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n# Dataset Card My Dataset\n'
_a : Optional[Any] = 'The following issues were found for the README at `{path}`:\n-\tThe README has several first-level headings: `Dataset Card for My Dataset`, `Dataset Card My Dataset`. Only one heading is expected. Skipping further validation for this README.'
_a : Any = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n'
_a : str = 'The following issues were found for the README at `{path}`:\n-\tNo first-level heading starting with `Dataset Card for` found in README. Skipping further validation for this README.'
_a : Union[str, Any] = ''
_a : Union[str, Any] = 'The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.\n-\tNo YAML markers are present in the README.'
_a : Dict = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n'
_a : str = 'The following issues were found while parsing the README at `{path}`:\n-\tMultiple sections with the same heading `Dataset Card for My Dataset` have been found. Please keep only one of these sections.'
@pytest.mark.parametrize(
'''readme_md, expected_dict''' , [
(README_CORRECT, CORRECT_DICT),
(README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL),
] , )
def a_ ( __magic_name__ , __magic_name__ ) -> int:
"""simple docstring"""
assert ReadMe.from_string(__magic_name__ , __magic_name__ ).to_dict() == expected_dict
@pytest.mark.parametrize(
'''readme_md, expected_error''' , [
(README_NO_YAML, EXPECTED_ERROR_README_NO_YAML),
(README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML),
(README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML),
(README_EMPTY, EXPECTED_ERROR_README_EMPTY),
(README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION),
(README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL),
(README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION),
(README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT),
(README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL),
(README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL),
(README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT),
] , )
def a_ ( __magic_name__ , __magic_name__ ) -> int:
"""simple docstring"""
with pytest.raises(__magic_name__ , match=re.escape(expected_error.format(path='''root''' ) ) ):
snake_case : List[Any] = ReadMe.from_string(__magic_name__ , __magic_name__ )
readme.validate()
@pytest.mark.parametrize(
'''readme_md, expected_error''' , [
(README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1),
] , )
def a_ ( __magic_name__ , __magic_name__ ) -> List[Any]:
"""simple docstring"""
with pytest.raises(__magic_name__ , match=re.escape(expected_error.format(path='''root''' ) ) ):
ReadMe.from_string(__magic_name__ , __magic_name__ )
@pytest.mark.parametrize(
'''readme_md,''' , [
(README_MULTIPLE_SAME_HEADING_1),
] , )
def a_ ( __magic_name__ ) -> Optional[Any]:
"""simple docstring"""
ReadMe.from_string(__magic_name__ , __magic_name__ , suppress_parsing_errors=__magic_name__ )
@pytest.mark.parametrize(
'''readme_md, expected_dict''' , [
(README_CORRECT, CORRECT_DICT),
(README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL),
] , )
def a_ ( __magic_name__ , __magic_name__ ) -> Optional[Any]:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case : Tuple = Path(__magic_name__ ) / '''README.md'''
with open(__magic_name__ , '''w+''' ) as readme_file:
readme_file.write(__magic_name__ )
snake_case : int = ReadMe.from_readme(__magic_name__ , __magic_name__ ).to_dict()
assert out["name"] == path
assert out["text"] == ""
assert out["is_empty_text"]
assert out["subsections"] == expected_dict["subsections"]
@pytest.mark.parametrize(
'''readme_md, expected_error''' , [
(README_NO_YAML, EXPECTED_ERROR_README_NO_YAML),
(README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML),
(README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML),
(README_EMPTY, EXPECTED_ERROR_README_EMPTY),
(README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION),
(README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL),
(README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION),
(README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT),
(README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL),
(README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL),
(README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT),
] , )
def a_ ( __magic_name__ , __magic_name__ ) -> Dict:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case : Optional[Any] = Path(__magic_name__ ) / '''README.md'''
with open(__magic_name__ , '''w+''' ) as readme_file:
readme_file.write(__magic_name__ )
snake_case : Optional[Any] = expected_error.format(path=__magic_name__ )
with pytest.raises(__magic_name__ , match=re.escape(__magic_name__ ) ):
snake_case : List[str] = ReadMe.from_readme(__magic_name__ , __magic_name__ )
readme.validate()
@pytest.mark.parametrize(
'''readme_md, expected_error''' , [
(README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1),
] , )
def a_ ( __magic_name__ , __magic_name__ ) -> str:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case : int = Path(__magic_name__ ) / '''README.md'''
with open(__magic_name__ , '''w+''' ) as readme_file:
readme_file.write(__magic_name__ )
snake_case : Dict = expected_error.format(path=__magic_name__ )
with pytest.raises(__magic_name__ , match=re.escape(__magic_name__ ) ):
ReadMe.from_readme(__magic_name__ , __magic_name__ )
@pytest.mark.parametrize(
'''readme_md,''' , [
(README_MULTIPLE_SAME_HEADING_1),
] , )
def a_ ( __magic_name__ ) -> List[str]:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case : Optional[int] = Path(__magic_name__ ) / '''README.md'''
with open(__magic_name__ , '''w+''' ) as readme_file:
readme_file.write(__magic_name__ )
ReadMe.from_readme(__magic_name__ , __magic_name__ , suppress_parsing_errors=__magic_name__ )
| 598 |
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('TEST_SAGEMAKER' , 'False' ) ) is not True , reason='Skipping test because should only be run when releasing minor transformers version' , )
@pytest.mark.usefixtures('sm_env' )
@parameterized_class(
[
{
'framework': 'pytorch',
'script': 'run_glue.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.g4dn.xlarge',
'results': {'train_runtime': 650, 'eval_accuracy': 0.6, 'eval_loss': 0.9},
},
{
'framework': 'tensorflow',
'script': 'run_tf.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.g4dn.xlarge',
'results': {'train_runtime': 600, 'eval_accuracy': 0.3, 'eval_loss': 0.9},
},
] )
class a_ ( unittest.TestCase ):
def lowerCAmelCase( self : str ):
"""simple docstring"""
if self.framework == "pytorch":
subprocess.run(
F"cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py".split() , encoding='''utf-8''' , check=UpperCAmelCase__ , )
assert hasattr(self , '''env''' )
def lowerCAmelCase( self : str , UpperCAmelCase__ : str=1 ):
"""simple docstring"""
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=F"{self.env.base_job_name}-single" , instance_count=UpperCAmelCase__ , instance_type=self.instance_type , debugger_hook_config=UpperCAmelCase__ , hyperparameters={**self.env.hyperparameters, '''model_name_or_path''': self.model_name_or_path} , metric_definitions=self.env.metric_definitions , py_version='''py36''' , )
def lowerCAmelCase( self : int , UpperCAmelCase__ : Optional[Any] ):
"""simple docstring"""
TrainingJobAnalytics(UpperCAmelCase__ ).export_csv(F"{self.env.test_path}/{job_name}_metrics.csv" )
def lowerCAmelCase( self : Any ):
"""simple docstring"""
# create estimator
snake_case : List[Any] = self.create_estimator()
# run training
estimator.fit()
# result dataframe
snake_case : str = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
snake_case : Dict = list(result_metrics_df[result_metrics_df.metric_name == '''eval_accuracy''']['''value'''] )
snake_case : List[str] = list(result_metrics_df[result_metrics_df.metric_name == '''eval_loss''']['''value'''] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
snake_case : List[Any] = (
Session().describe_training_job(estimator.latest_training_job.name ).get('''TrainingTimeInSeconds''' , 999_999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['''eval_accuracy'''] for t in eval_accuracy )
assert all(t <= self.results['''eval_loss'''] for t in eval_loss )
# dump tests result into json file to share in PR
with open(F"{estimator.latest_training_job.name}.json" , '''w''' ) as outfile:
json.dump({'''train_time''': train_runtime, '''eval_accuracy''': eval_accuracy, '''eval_loss''': eval_loss} , UpperCAmelCase__ )
| 598 | 1 |
from __future__ import annotations
from pprint import pformat
from typing import Generic, TypeVar
_lowercase : List[str] =TypeVar('''T''')
class SCREAMING_SNAKE_CASE_ ( Generic[T] ):
'''simple docstring'''
def __init__( self : int , SCREAMING_SNAKE_CASE__ : bool = True ) -> None:
A : List[Any] ={} # dictionary of lists
A : Optional[int] =directed
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : T , SCREAMING_SNAKE_CASE__ : T ) -> GraphAdjacencyList[T]:
if not self.directed: # For undirected graphs
# if both source vertex and destination vertex are both present in the
# adjacency list, add destination vertex to source vertex list of adjacent
# vertices and add source vertex to destination vertex list of adjacent
# vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(__UpperCamelCase )
self.adj_list[destination_vertex].append(__UpperCamelCase )
# if only source vertex is present in adjacency list, add destination vertex
# to source vertex list of adjacent vertices, then create a new vertex with
# destination vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(__UpperCamelCase )
A : Any =[source_vertex]
# if only destination vertex is present in adjacency list, add source vertex
# to destination vertex list of adjacent vertices, then create a new vertex
# with source vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif destination_vertex in self.adj_list:
self.adj_list[destination_vertex].append(__UpperCamelCase )
A : Dict =[destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and assign a list
# containing the destination vertex as it's first adjacent vertex also
# create a new vertex with destination vertex as key and assign a list
# containing the source vertex as it's first adjacent vertex.
else:
A : List[Any] =[destination_vertex]
A : int =[source_vertex]
else: # For directed graphs
# if both source vertex and destination vertex are present in adjacency
# list, add destination vertex to source vertex list of adjacent vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(__UpperCamelCase )
# if only source vertex is present in adjacency list, add destination
# vertex to source vertex list of adjacent vertices and create a new vertex
# with destination vertex as key, which has no adjacent vertex
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(__UpperCamelCase )
A : Union[str, Any] =[]
# if only destination vertex is present in adjacency list, create a new
# vertex with source vertex as key and assign a list containing destination
# vertex as first adjacent vertex
elif destination_vertex in self.adj_list:
A : Dict =[destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and a list containing
# destination vertex as it's first adjacent vertex. Then create a new vertex
# with destination vertex as key, which has no adjacent vertex
else:
A : Any =[destination_vertex]
A : Optional[Any] =[]
return self
def __repr__( self : List[str] ) -> str:
return pformat(self.adj_list )
| 707 | import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XLMRobertaTokenizer, XLMRobertaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_lowercase : Optional[int] =get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
lowercase : List[str] = XLMRobertaTokenizer
lowercase : Dict = XLMRobertaTokenizerFast
lowercase : str = True
lowercase : Tuple = True
def SCREAMING_SNAKE_CASE_ ( self : int ) -> Optional[Any]:
super().setUp()
# We have a SentencePiece fixture for testing
A : List[str] =XLMRobertaTokenizer(SCREAMING_SNAKE_CASE__ , keep_accents=SCREAMING_SNAKE_CASE__ )
tokenizer.save_pretrained(self.tmpdirname )
def SCREAMING_SNAKE_CASE_ ( self : int ) -> List[Any]:
A : List[str] ='<pad>'
A : int =1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> Any:
A : List[str] =list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<s>' )
self.assertEqual(vocab_keys[1] , '<pad>' )
self.assertEqual(vocab_keys[-1] , '<mask>' )
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , 10_02 )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ) -> Dict:
self.assertEqual(self.get_tokenizer().vocab_size , 10_02 )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> str:
A : Union[str, Any] =XLMRobertaTokenizer(SCREAMING_SNAKE_CASE__ , keep_accents=SCREAMING_SNAKE_CASE__ )
A : str =tokenizer.tokenize('This is a test' )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
A : Any =tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
SCREAMING_SNAKE_CASE__ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
A : Tuple =tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(
SCREAMING_SNAKE_CASE__ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
A : Union[str, Any] =tokenizer.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(
SCREAMING_SNAKE_CASE__ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
def SCREAMING_SNAKE_CASE_ ( self : Any ) -> Optional[int]:
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
A : Any =(self.rust_tokenizer_class, 'hf-internal-testing/tiny-xlm-roberta', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
A : List[Any] =self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
A : Dict =self.tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
A : str =tempfile.mkdtemp()
A : Optional[int] =tokenizer_r.save_pretrained(SCREAMING_SNAKE_CASE__ )
A : Optional[Any] =tokenizer_p.save_pretrained(SCREAMING_SNAKE_CASE__ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
A : List[str] =tuple(f for f in tokenizer_r_files if 'tokenizer.json' not in f )
self.assertSequenceEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Checks everything loads correctly in the same way
A : Tuple =tokenizer_r.from_pretrained(SCREAMING_SNAKE_CASE__ )
A : Dict =tokenizer_p.from_pretrained(SCREAMING_SNAKE_CASE__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(SCREAMING_SNAKE_CASE__ )
# Save tokenizer rust, legacy_format=True
A : Optional[int] =tempfile.mkdtemp()
A : Optional[int] =tokenizer_r.save_pretrained(SCREAMING_SNAKE_CASE__ , legacy_format=SCREAMING_SNAKE_CASE__ )
A : List[Any] =tokenizer_p.save_pretrained(SCREAMING_SNAKE_CASE__ )
# Checks it save with the same files
self.assertSequenceEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Checks everything loads correctly in the same way
A : Tuple =tokenizer_r.from_pretrained(SCREAMING_SNAKE_CASE__ )
A : Tuple =tokenizer_p.from_pretrained(SCREAMING_SNAKE_CASE__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
shutil.rmtree(SCREAMING_SNAKE_CASE__ )
# Save tokenizer rust, legacy_format=False
A : List[Any] =tempfile.mkdtemp()
A : Optional[int] =tokenizer_r.save_pretrained(SCREAMING_SNAKE_CASE__ , legacy_format=SCREAMING_SNAKE_CASE__ )
A : str =tokenizer_p.save_pretrained(SCREAMING_SNAKE_CASE__ )
# Checks it saved the tokenizer.json file
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
A : List[Any] =tokenizer_r.from_pretrained(SCREAMING_SNAKE_CASE__ )
A : List[Any] =tokenizer_p.from_pretrained(SCREAMING_SNAKE_CASE__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
shutil.rmtree(SCREAMING_SNAKE_CASE__ )
@cached_property
def SCREAMING_SNAKE_CASE_ ( self : str ) -> Optional[int]:
return XLMRobertaTokenizer.from_pretrained('xlm-roberta-base' )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> Any:
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(SCREAMING_SNAKE_CASE__ , f.name )
A : Optional[Any] =XLMRobertaTokenizer(f.name , keep_accents=SCREAMING_SNAKE_CASE__ )
A : int =pickle.dumps(SCREAMING_SNAKE_CASE__ )
pickle.loads(SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ) -> Union[str, Any]:
if not self.test_rust_tokenizer:
return
A : Union[str, Any] =self.get_tokenizer()
A : int =self.get_rust_tokenizer()
A : List[str] ='I was born in 92000, and this is falsé.'
A : Union[str, Any] =tokenizer.tokenize(SCREAMING_SNAKE_CASE__ )
A : Optional[int] =rust_tokenizer.tokenize(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
A : Any =tokenizer.encode(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ )
A : Tuple =rust_tokenizer.encode(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
A : Optional[Any] =self.get_rust_tokenizer()
A : int =tokenizer.encode(SCREAMING_SNAKE_CASE__ )
A : Dict =rust_tokenizer.encode(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@slow
def SCREAMING_SNAKE_CASE_ ( self : Dict ) -> List[str]:
A : Any ='Hello World!'
A : Optional[Any] =[0, 3_53_78, 66_61, 38, 2]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(SCREAMING_SNAKE_CASE__ , self.big_tokenizer.encode(SCREAMING_SNAKE_CASE__ ) )
@slow
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> str:
A : Any =(
'This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'
' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'
)
A : int =[
0,
32_93,
83,
10,
45_52,
49_89,
79_86,
6_78,
10,
59_15,
1_11,
17_94_59,
12_48_50,
4,
60_44,
2_37,
12,
6,
5,
6,
4,
67_80,
7_05,
15,
13_88,
44,
3_78,
1_01_14,
7_11,
1_52,
20,
6,
5,
2_23_76,
6_42,
12_21,
1_51_90,
3_41_53,
4_50,
56_08,
9_59,
11_19,
5_77_02,
1_36,
1_86,
47,
10_98,
2_93_67,
47,
# 4426, # What fairseq tokenizes from "<unk>": "_<"
# 3678, # What fairseq tokenizes from "<unk>": "unk"
# 2740, # What fairseq tokenizes from "<unk>": ">"
3, # What we tokenize from "<unk>": "<unk>"
6, # Residue from the tokenization: an extra sentencepiece underline
4,
60_44,
2_37,
62_84,
5_09_01,
5_28,
31,
90,
34,
9_27,
2,
]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(SCREAMING_SNAKE_CASE__ , self.big_tokenizer.encode(SCREAMING_SNAKE_CASE__ ) )
@slow
def SCREAMING_SNAKE_CASE_ ( self : Any ) -> Any:
# fmt: off
A : List[Any] ={'input_ids': [[0, 1_10_62, 8_27_72, 7, 15, 8_27_72, 5_38, 5_15_29, 2_37, 1_71_98, 12_90, 2_06, 9, 21_51_75, 13_14, 1_36, 1_71_98, 12_90, 2_06, 9, 5_63_59, 42, 12_20_09, 9, 1_64_66, 16, 8_73_44, 45_37, 9, 47_17, 7_83_81, 6, 15_99_58, 7, 15, 2_44_80, 6_18, 4, 5_27, 2_26_93, 54_28, 4, 27_77, 2_44_80, 98_74, 4, 4_35_23, 5_94, 4, 8_03, 1_83_92, 3_31_89, 18, 4, 4_35_23, 2_44_47, 1_23_99, 1_00, 2_49_55, 8_36_58, 96_26, 14_40_57, 15, 8_39, 2_23_35, 16, 1_36, 2_49_55, 8_36_58, 8_34_79, 15, 3_91_02, 7_24, 16, 6_78, 6_45, 27_89, 13_28, 45_89, 42, 12_20_09, 11_57_74, 23, 8_05, 13_28, 4_68_76, 7, 1_36, 5_38_94, 19_40, 4_22_27, 4_11_59, 1_77_21, 8_23, 4_25, 4, 2_75_12, 9_87_22, 2_06, 1_36, 55_31, 49_70, 9_19, 1_73_36, 5, 2], [0, 2_00_80, 6_18, 83, 8_27_75, 47, 4_79, 9, 15_17, 73, 5_38_94, 3_33, 8_05_81, 11_01_17, 1_88_11, 52_56, 12_95, 51, 15_25_26, 2_97, 79_86, 3_90, 12_44_16, 5_38, 3_54_31, 2_14, 98, 1_50_44, 2_57_37, 1_36, 71_08, 4_37_01, 23, 7_56, 13_53_55, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 5_81, 6_37_73, 11_94_55, 6, 14_77_97, 8_82_03, 7, 6_45, 70, 21, 32_85, 1_02_69, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=SCREAMING_SNAKE_CASE__ , model_name='xlm-roberta-base' , revision='d9d8a8ea5eb94b1c6654ae9249df7793cd2933d3' , )
| 661 | 0 |
from unittest.mock import patch
import pyspark
from datasets.packaged_modules.spark.spark import (
Spark,
SparkExamplesIterable,
_generate_iterable_examples,
)
from ..utils import (
require_dill_gt_0_3_2,
require_not_windows,
)
def a ( A__ , A__ ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] = []
for part_id in partition_order:
SCREAMING_SNAKE_CASE__ : Tuple = df.where(f"""SPARK_PARTITION_ID() = {part_id}""" ).collect()
for row_idx, row in enumerate(A__ ):
expected_row_ids_and_row_dicts.append((f"""{part_id}_{row_idx}""", row.asDict()) )
return expected_row_ids_and_row_dicts
@require_not_windows
@require_dill_gt_0_3_2
def a ( ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
SCREAMING_SNAKE_CASE__ : Tuple = spark.range(1_0_0 ).repartition(1 )
SCREAMING_SNAKE_CASE__ : str = Spark(A__ )
# The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means
# that each partition can hold 2 rows.
spark_builder._repartition_df_if_needed(max_shard_size=1_6 )
# Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions.
assert spark_builder.df.rdd.getNumPartitions() == 5_0
@require_not_windows
@require_dill_gt_0_3_2
def a ( ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
SCREAMING_SNAKE_CASE__ : int = spark.range(1_0 ).repartition(2 )
SCREAMING_SNAKE_CASE__ : List[str] = [1, 0]
SCREAMING_SNAKE_CASE__ : Tuple = _generate_iterable_examples(A__ , A__ ) # Reverse the partitions.
SCREAMING_SNAKE_CASE__ : Dict = _get_expected_row_ids_and_row_dicts_for_partition_order(A__ , A__ )
for i, (row_id, row_dict) in enumerate(generate_fn() ):
SCREAMING_SNAKE_CASE__ : Dict = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def a ( ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
SCREAMING_SNAKE_CASE__ : Dict = spark.range(1_0 ).repartition(1 )
SCREAMING_SNAKE_CASE__ : List[str] = SparkExamplesIterable(A__ )
assert it.n_shards == 1
for i, (row_id, row_dict) in enumerate(A__ ):
assert row_id == f"""0_{i}"""
assert row_dict == {"id": i}
@require_not_windows
@require_dill_gt_0_3_2
def a ( ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
SCREAMING_SNAKE_CASE__ : str = spark.range(3_0 ).repartition(3 )
# Mock the generator so that shuffle reverses the partition indices.
with patch('''numpy.random.Generator''' ) as generator_mock:
SCREAMING_SNAKE_CASE__ : Optional[int] = lambda A__ : x.reverse()
SCREAMING_SNAKE_CASE__ : List[Any] = _get_expected_row_ids_and_row_dicts_for_partition_order(A__ , [2, 1, 0] )
SCREAMING_SNAKE_CASE__ : Any = SparkExamplesIterable(A__ ).shuffle_data_sources(A__ )
assert shuffled_it.n_shards == 3
for i, (row_id, row_dict) in enumerate(A__ ):
SCREAMING_SNAKE_CASE__ : Any = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def a ( ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = spark.range(2_0 ).repartition(4 )
# Partitions 0 and 2
SCREAMING_SNAKE_CASE__ : int = SparkExamplesIterable(A__ ).shard_data_sources(worker_id=0 , num_workers=2 )
assert shard_it_a.n_shards == 2
SCREAMING_SNAKE_CASE__ : Optional[int] = _get_expected_row_ids_and_row_dicts_for_partition_order(A__ , [0, 2] )
for i, (row_id, row_dict) in enumerate(A__ ):
SCREAMING_SNAKE_CASE__ : int = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
# Partitions 1 and 3
SCREAMING_SNAKE_CASE__ : Optional[Any] = SparkExamplesIterable(A__ ).shard_data_sources(worker_id=1 , num_workers=2 )
assert shard_it_a.n_shards == 2
SCREAMING_SNAKE_CASE__ : Optional[int] = _get_expected_row_ids_and_row_dicts_for_partition_order(A__ , [1, 3] )
for i, (row_id, row_dict) in enumerate(A__ ):
SCREAMING_SNAKE_CASE__ : List[Any] = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def a ( ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
SCREAMING_SNAKE_CASE__ : int = spark.range(1_0_0 ).repartition(1 )
SCREAMING_SNAKE_CASE__ : Dict = Spark(A__ )
# Choose a small max_shard_size for maximum partitioning.
spark_builder._repartition_df_if_needed(max_shard_size=1 )
# The new number of partitions should not be greater than the number of rows.
assert spark_builder.df.rdd.getNumPartitions() == 1_0_0
| 35 |
'''simple docstring'''
from __future__ import annotations
# This is the precision for this function which can be altered.
# It is recommended for users to keep this number greater than or equal to 10.
__A : Tuple = 10
def UpperCamelCase_ ( A__ : int , A__ : int , A__ : list[int] , A__ : int ):
'''simple docstring'''
for i in range(A__ , A__ ):
if array[i] == target:
return i
return -1
def UpperCamelCase_ ( A__ : list[int] , A__ : int ):
'''simple docstring'''
lowerCAmelCase_ : Optional[int] = 0
lowerCAmelCase_ : Union[str, Any] = len(A__ )
while left <= right:
if right - left < precision:
return lin_search(A__ , A__ , A__ , A__ )
lowerCAmelCase_ : Dict = (left + right) // 3 + 1
lowerCAmelCase_ : List[Any] = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
lowerCAmelCase_ : List[str] = one_third - 1
elif array[two_third] < target:
lowerCAmelCase_ : Tuple = two_third + 1
else:
lowerCAmelCase_ : Tuple = one_third + 1
lowerCAmelCase_ : Any = two_third - 1
else:
return -1
def UpperCamelCase_ ( A__ : int , A__ : int , A__ : list[int] , A__ : int ):
'''simple docstring'''
if left < right:
if right - left < precision:
return lin_search(A__ , A__ , A__ , A__ )
lowerCAmelCase_ : Dict = (left + right) // 3 + 1
lowerCAmelCase_ : Dict = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
return rec_ternary_search(A__ , one_third - 1 , A__ , A__ )
elif array[two_third] < target:
return rec_ternary_search(two_third + 1 , A__ , A__ , A__ )
else:
return rec_ternary_search(one_third + 1 , two_third - 1 , A__ , A__ )
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
__A : List[Any] = input("Enter numbers separated by comma:\n").strip()
__A : Union[str, Any] = [int(item.strip()) for item in user_input.split(",")]
assert collection == sorted(collection), F"List must be ordered.\n{collection}."
__A : List[Any] = int(input("Enter the number to be found in the list:\n").strip())
__A : str = ite_ternary_search(collection, target)
__A : Dict = rec_ternary_search(0, len(collection) - 1, collection, target)
if resulta != -1:
print(F'''Iterative search: {target} found at positions: {resulta}''')
print(F'''Recursive search: {target} found at positions: {resulta}''')
else:
print("Not found")
| 275 | 0 |
import os
from tempfile import TemporaryDirectory
from unittest import TestCase
import pytest
from absl.testing import parameterized
from datasets import config
from datasets.arrow_reader import HF_GCP_BASE_URL
from datasets.builder import DatasetBuilder
from datasets.dataset_dict import IterableDatasetDict
from datasets.iterable_dataset import IterableDataset
from datasets.load import dataset_module_factory, import_main_class
from datasets.utils.file_utils import cached_path
snake_case_ : List[Any] = [
{"dataset": "wikipedia", "config_name": "20220301.de"},
{"dataset": "wikipedia", "config_name": "20220301.en"},
{"dataset": "wikipedia", "config_name": "20220301.fr"},
{"dataset": "wikipedia", "config_name": "20220301.frr"},
{"dataset": "wikipedia", "config_name": "20220301.it"},
{"dataset": "wikipedia", "config_name": "20220301.simple"},
{"dataset": "snli", "config_name": "plain_text"},
{"dataset": "eli5", "config_name": "LFQA_reddit"},
{"dataset": "wiki40b", "config_name": "en"},
{"dataset": "wiki_dpr", "config_name": "psgs_w100.nq.compressed"},
{"dataset": "wiki_dpr", "config_name": "psgs_w100.nq.no_index"},
{"dataset": "wiki_dpr", "config_name": "psgs_w100.multiset.no_index"},
{"dataset": "natural_questions", "config_name": "default"},
]
def A (__A : str=True ) -> Optional[Any]:
"""simple docstring"""
if with_config:
return [
{
"testcase_name": d["dataset"] + "/" + d["config_name"],
"dataset": d["dataset"],
"config_name": d["config_name"],
}
for d in DATASETS_ON_HF_GCP
]
else:
return [
{"testcase_name": dataset, "dataset": dataset} for dataset in {d["dataset"] for d in DATASETS_ON_HF_GCP}
]
@parameterized.named_parameters(list_datasets_on_hf_gcp_parameters(with_config=a__ ) )
class __snake_case ( a__ ):
UpperCAmelCase__ : Dict = None
UpperCAmelCase__ : Optional[int] = None
def lowerCamelCase ( self : Union[str, Any] , _snake_case : int , _snake_case : Tuple):
"""simple docstring"""
with TemporaryDirectory() as tmp_dir:
UpperCAmelCase_ = dataset_module_factory(lowerCamelCase_ , cache_dir=lowerCamelCase_)
UpperCAmelCase_ = import_main_class(dataset_module.module_path , dataset=lowerCamelCase_)
UpperCAmelCase_ = builder_cls(
cache_dir=lowerCamelCase_ , config_name=lowerCamelCase_ , hash=dataset_module.hash , )
UpperCAmelCase_ = '''/'''.join(
[
HF_GCP_BASE_URL,
builder_instance._relative_data_dir(with_hash=lowerCamelCase_).replace(os.sep , '''/'''),
config.DATASET_INFO_FILENAME,
])
UpperCAmelCase_ = cached_path(lowerCamelCase_ , cache_dir=lowerCamelCase_)
self.assertTrue(os.path.exists(lowerCamelCase_))
@pytest.mark.integration
def A (__A : Tuple ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ = tmp_path_factory.mktemp('''test_hf_gcp''' ) / '''test_wikipedia_simple'''
UpperCAmelCase_ = dataset_module_factory('''wikipedia''' , cache_dir=__A )
UpperCAmelCase_ = import_main_class(dataset_module.module_path )
UpperCAmelCase_ = builder_cls(
cache_dir=__A , config_name='''20220301.frr''' , hash=dataset_module.hash , )
# use the HF cloud storage, not the original download_and_prepare that uses apache-beam
UpperCAmelCase_ = None
builder_instance.download_and_prepare()
UpperCAmelCase_ = builder_instance.as_dataset()
assert ds
@pytest.mark.integration
def A (__A : List[Any] ) -> int:
"""simple docstring"""
UpperCAmelCase_ = dataset_module_factory('''wikipedia''' , cache_dir=__A )
UpperCAmelCase_ = import_main_class(dataset_module.module_path , dataset=__A )
UpperCAmelCase_ = builder_cls(
cache_dir=__A , config_name='''20220301.frr''' , hash=dataset_module.hash , )
UpperCAmelCase_ = builder_instance.as_streaming_dataset()
assert ds
assert isinstance(__A , __A )
assert "train" in ds
assert isinstance(ds['''train'''] , __A )
assert next(iter(ds['''train'''] ) )
| 710 |
import warnings
from ...utils import logging
from .image_processing_deformable_detr import DeformableDetrImageProcessor
snake_case_ : Dict = logging.get_logger(__name__)
class __snake_case ( a ):
def __init__( self : str , *_snake_case : Optional[Any] , **_snake_case : Optional[Any]):
"""simple docstring"""
warnings.warn(
'''The class DeformableDetrFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use DeformableDetrImageProcessor instead.''' , _snake_case , )
super().__init__(*_snake_case , **_snake_case)
| 169 | 0 |
import argparse
import requests
import torch
from PIL import Image
from transformers import ViTMAEConfig, ViTMAEForPreTraining, ViTMAEImageProcessor
def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> List[str]:
if "cls_token" in name:
UpperCAmelCase_ = name.replace("cls_token" , "vit.embeddings.cls_token" )
if "mask_token" in name:
UpperCAmelCase_ = name.replace("mask_token" , "decoder.mask_token" )
if "decoder_pos_embed" in name:
UpperCAmelCase_ = name.replace("decoder_pos_embed" , "decoder.decoder_pos_embed" )
if "pos_embed" in name and "decoder" not in name:
UpperCAmelCase_ = name.replace("pos_embed" , "vit.embeddings.position_embeddings" )
if "patch_embed.proj" in name:
UpperCAmelCase_ = name.replace("patch_embed.proj" , "vit.embeddings.patch_embeddings.projection" )
if "patch_embed.norm" in name:
UpperCAmelCase_ = name.replace("patch_embed.norm" , "vit.embeddings.norm" )
if "decoder_blocks" in name:
UpperCAmelCase_ = name.replace("decoder_blocks" , "decoder.decoder_layers" )
if "blocks" in name:
UpperCAmelCase_ = name.replace("blocks" , "vit.encoder.layer" )
if "attn.proj" in name:
UpperCAmelCase_ = name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name:
UpperCAmelCase_ = name.replace("attn" , "attention.self" )
if "norm1" in name:
UpperCAmelCase_ = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
UpperCAmelCase_ = name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
UpperCAmelCase_ = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
UpperCAmelCase_ = name.replace("mlp.fc2" , "output.dense" )
if "decoder_embed" in name:
UpperCAmelCase_ = name.replace("decoder_embed" , "decoder.decoder_embed" )
if "decoder_norm" in name:
UpperCAmelCase_ = name.replace("decoder_norm" , "decoder.decoder_norm" )
if "decoder_pred" in name:
UpperCAmelCase_ = name.replace("decoder_pred" , "decoder.decoder_pred" )
if "norm.weight" in name and "decoder" not in name:
UpperCAmelCase_ = name.replace("norm.weight" , "vit.layernorm.weight" )
if "norm.bias" in name and "decoder" not in name:
UpperCAmelCase_ = name.replace("norm.bias" , "vit.layernorm.bias" )
return name
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> str:
for key in orig_state_dict.copy().keys():
UpperCAmelCase_ = orig_state_dict.pop(__SCREAMING_SNAKE_CASE )
if "qkv" in key:
UpperCAmelCase_ = key.split("." )
UpperCAmelCase_ = int(key_split[1] )
if "decoder_blocks" in key:
UpperCAmelCase_ = config.decoder_hidden_size
UpperCAmelCase_ = "decoder.decoder_layers."
if "weight" in key:
UpperCAmelCase_ = val[:dim, :]
UpperCAmelCase_ = val[dim : dim * 2, :]
UpperCAmelCase_ = val[-dim:, :]
elif "bias" in key:
UpperCAmelCase_ = val[:dim]
UpperCAmelCase_ = val[dim : dim * 2]
UpperCAmelCase_ = val[-dim:]
else:
UpperCAmelCase_ = config.hidden_size
UpperCAmelCase_ = "vit.encoder.layer."
if "weight" in key:
UpperCAmelCase_ = val[:dim, :]
UpperCAmelCase_ = val[dim : dim * 2, :]
UpperCAmelCase_ = val[-dim:, :]
elif "bias" in key:
UpperCAmelCase_ = val[:dim]
UpperCAmelCase_ = val[dim : dim * 2]
UpperCAmelCase_ = val[-dim:]
else:
UpperCAmelCase_ = val
return orig_state_dict
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> List[str]:
UpperCAmelCase_ = ViTMAEConfig()
if "large" in checkpoint_url:
UpperCAmelCase_ = 1024
UpperCAmelCase_ = 4096
UpperCAmelCase_ = 24
UpperCAmelCase_ = 16
elif "huge" in checkpoint_url:
UpperCAmelCase_ = 14
UpperCAmelCase_ = 1280
UpperCAmelCase_ = 5120
UpperCAmelCase_ = 32
UpperCAmelCase_ = 16
UpperCAmelCase_ = ViTMAEForPreTraining(__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ = torch.hub.load_state_dict_from_url(__SCREAMING_SNAKE_CASE , map_location="cpu" )["model"]
UpperCAmelCase_ = ViTMAEImageProcessor(size=config.image_size )
UpperCAmelCase_ = convert_state_dict(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
model.load_state_dict(__SCREAMING_SNAKE_CASE )
model.eval()
UpperCAmelCase_ = "https://user-images.githubusercontent.com/11435359/147738734-196fd92f-9260-48d5-ba7e-bf103d29364d.jpg"
UpperCAmelCase_ = Image.open(requests.get(__SCREAMING_SNAKE_CASE , stream=__SCREAMING_SNAKE_CASE ).raw )
UpperCAmelCase_ = ViTMAEImageProcessor(size=config.image_size )
UpperCAmelCase_ = image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors="pt" )
# forward pass
torch.manual_seed(2 )
UpperCAmelCase_ = model(**__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ = outputs.logits
if "large" in checkpoint_url:
UpperCAmelCase_ = torch.tensor(
[[-0.7_309, -0.7_128, -1.0_169], [-1.0_161, -0.9_058, -1.1_878], [-1.0_478, -0.9_411, -1.1_911]] )
elif "huge" in checkpoint_url:
UpperCAmelCase_ = torch.tensor(
[[-1.1_599, -0.9_199, -1.2_221], [-1.1_952, -0.9_269, -1.2_307], [-1.2_143, -0.9_337, -1.2_262]] )
else:
UpperCAmelCase_ = torch.tensor(
[[-0.9_192, -0.8_481, -1.1_259], [-1.1_349, -1.0_034, -1.2_599], [-1.1_757, -1.0_429, -1.2_726]] )
# verify logits
assert torch.allclose(logits[0, :3, :3] , __SCREAMING_SNAKE_CASE , atol=1E-4 )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(__SCREAMING_SNAKE_CASE )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://dl.fbaipublicfiles.com/mae/visualize/mae_visualize_vit_base.pth",
type=str,
help="URL of the checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
SCREAMING_SNAKE_CASE = parser.parse_args()
convert_vit_mae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 579 |
import uuid
from typing import Any, Dict, List, Optional, Union
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
class lowerCamelCase :
'''simple docstring'''
def __init__( self , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase=None , lowerCAmelCase=None ):
if not conversation_id:
UpperCAmelCase_ = uuid.uuida()
if past_user_inputs is None:
UpperCAmelCase_ = []
if generated_responses is None:
UpperCAmelCase_ = []
UpperCAmelCase_ = conversation_id
UpperCAmelCase_ = past_user_inputs
UpperCAmelCase_ = generated_responses
UpperCAmelCase_ = text
def __eq__( self , lowerCAmelCase ):
if not isinstance(lowerCAmelCase , lowerCAmelCase ):
return False
if self.uuid == other.uuid:
return True
return (
self.new_user_input == other.new_user_input
and self.past_user_inputs == other.past_user_inputs
and self.generated_responses == other.generated_responses
)
def A__ ( self , lowerCAmelCase , lowerCAmelCase = False ):
if self.new_user_input:
if overwrite:
logger.warning(
f'''User input added while unprocessed input was existing: "{self.new_user_input}" was overwritten '''
f'''with: "{text}".''' )
UpperCAmelCase_ = text
else:
logger.warning(
f'''User input added while unprocessed input was existing: "{self.new_user_input}" new input '''
f'''ignored: "{text}". Set `overwrite` to True to overwrite unprocessed user input''' )
else:
UpperCAmelCase_ = text
def A__ ( self ):
if self.new_user_input:
self.past_user_inputs.append(self.new_user_input )
UpperCAmelCase_ = None
def A__ ( self , lowerCAmelCase ):
self.generated_responses.append(lowerCAmelCase )
def A__ ( self ):
for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses ):
yield True, user_input
yield False, generated_response
if self.new_user_input:
yield True, self.new_user_input
def __repr__( self ):
UpperCAmelCase_ = f'''Conversation id: {self.uuid} \n'''
for is_user, text in self.iter_texts():
UpperCAmelCase_ = "user" if is_user else "bot"
output += f'''{name} >> {text} \n'''
return output
@add_end_docstrings(
lowercase__, r'\n min_length_for_response (`int`, *optional*, defaults to 32):\n The minimum length (in number of tokens) for a response.\n minimum_tokens (`int`, *optional*, defaults to 10):\n The minimum length of tokens to leave for a response.\n ', )
class lowerCamelCase ( lowercase__ ):
'''simple docstring'''
def __init__( self , *lowerCAmelCase , **lowerCAmelCase ):
super().__init__(*lowerCAmelCase , **lowerCAmelCase )
if self.tokenizer.pad_token_id is None:
UpperCAmelCase_ = self.tokenizer.eos_token
def A__ ( self , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , **lowerCAmelCase ):
UpperCAmelCase_ = {}
UpperCAmelCase_ = {}
UpperCAmelCase_ = {}
if min_length_for_response is not None:
UpperCAmelCase_ = min_length_for_response
if minimum_tokens is not None:
UpperCAmelCase_ = minimum_tokens
if "max_length" in generate_kwargs:
UpperCAmelCase_ = generate_kwargs["max_length"]
# self.max_length = generate_kwargs.get("max_length", self.model.config.max_length)
if clean_up_tokenization_spaces is not None:
UpperCAmelCase_ = clean_up_tokenization_spaces
if generate_kwargs:
forward_params.update(lowerCAmelCase )
return preprocess_params, forward_params, postprocess_params
def __call__( self , lowerCAmelCase , lowerCAmelCase=0 , **lowerCAmelCase ):
UpperCAmelCase_ = super().__call__(lowerCAmelCase , num_workers=lowerCAmelCase , **lowerCAmelCase )
if isinstance(lowerCAmelCase , lowerCAmelCase ) and len(lowerCAmelCase ) == 1:
return outputs[0]
return outputs
def A__ ( self , lowerCAmelCase , lowerCAmelCase=32 ):
if not isinstance(lowerCAmelCase , lowerCAmelCase ):
raise ValueError("ConversationalPipeline, expects Conversation as inputs" )
if conversation.new_user_input is None:
raise ValueError(
f'''Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. '''
"Add user inputs with the conversation's `add_user_input` method" )
if hasattr(self.tokenizer , "_build_conversation_input_ids" ):
UpperCAmelCase_ = self.tokenizer._build_conversation_input_ids(lowerCAmelCase )
else:
# If the tokenizer cannot handle conversations, we default to only the old version
UpperCAmelCase_ = self._legacy_parse_and_tokenize(lowerCAmelCase )
if self.framework == "pt":
UpperCAmelCase_ = torch.LongTensor([input_ids] )
elif self.framework == "tf":
UpperCAmelCase_ = tf.constant([input_ids] )
return {"input_ids": input_ids, "conversation": conversation}
def A__ ( self , lowerCAmelCase , lowerCAmelCase=10 , **lowerCAmelCase ):
UpperCAmelCase_ = generate_kwargs.get("max_length" , self.model.config.max_length )
UpperCAmelCase_ = model_inputs["input_ids"].shape[1]
if max_length - minimum_tokens < n:
logger.warning(f'''Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})''' )
UpperCAmelCase_ = max_length - minimum_tokens
UpperCAmelCase_ = model_inputs["input_ids"][:, -trim:]
if "attention_mask" in model_inputs:
UpperCAmelCase_ = model_inputs["attention_mask"][:, -trim:]
UpperCAmelCase_ = model_inputs.pop("conversation" )
UpperCAmelCase_ = max_length
UpperCAmelCase_ = self.model.generate(**lowerCAmelCase , **lowerCAmelCase )
if self.model.config.is_encoder_decoder:
UpperCAmelCase_ = 1
else:
UpperCAmelCase_ = n
return {"output_ids": output_ids[:, start_position:], "conversation": conversation}
def A__ ( self , lowerCAmelCase , lowerCAmelCase=True ):
UpperCAmelCase_ = model_outputs["output_ids"]
UpperCAmelCase_ = self.tokenizer.decode(
output_ids[0] , skip_special_tokens=lowerCAmelCase , clean_up_tokenization_spaces=lowerCAmelCase , )
UpperCAmelCase_ = model_outputs["conversation"]
conversation.mark_processed()
conversation.append_response(lowerCAmelCase )
return conversation
def A__ ( self , lowerCAmelCase ):
UpperCAmelCase_ = self.tokenizer.eos_token_id
UpperCAmelCase_ = []
for is_user, text in conversation.iter_texts():
if eos_token_id is not None:
input_ids.extend(self.tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase ) + [eos_token_id] )
else:
input_ids.extend(self.tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase ) )
if len(lowerCAmelCase ) > self.tokenizer.model_max_length:
UpperCAmelCase_ = input_ids[-self.tokenizer.model_max_length :]
return input_ids
| 579 | 1 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DiffusionPipeline,
EulerDiscreteScheduler,
StableDiffusionXLImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.utils import floats_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class SCREAMING_SNAKE_CASE__ ( lowercase , lowercase , unittest.TestCase ):
"""simple docstring"""
a : List[str] =StableDiffusionXLImgaImgPipeline
a : Optional[int] =TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"}
a : int =PipelineTesterMixin.required_optional_params - {"latents"}
a : Tuple =TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
a : Optional[int] =IMAGE_TO_IMAGE_IMAGE_PARAMS
a : List[Any] =IMAGE_TO_IMAGE_IMAGE_PARAMS
def lowercase__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowerCAmelCase : Union[str, Any] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , attention_head_dim=(2, 4) , use_linear_projection=snake_case__ , addition_embed_type="text_time" , addition_time_embed_dim=8 , transformer_layers_per_block=(1, 2) , projection_class_embeddings_input_dim=80 , cross_attention_dim=64 , )
lowerCAmelCase : Optional[int] = EulerDiscreteScheduler(
beta_start=0.00085 , beta_end=0.012 , steps_offset=1 , beta_schedule="scaled_linear" , timestep_spacing="leading" , )
torch.manual_seed(0 )
lowerCAmelCase : List[Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
lowerCAmelCase : Any = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act="gelu" , projection_dim=32 , )
lowerCAmelCase : Optional[Any] = CLIPTextModel(snake_case__ )
lowerCAmelCase : Tuple = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" , local_files_only=snake_case__ )
lowerCAmelCase : Optional[int] = CLIPTextModelWithProjection(snake_case__ )
lowerCAmelCase : Optional[int] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" , local_files_only=snake_case__ )
lowerCAmelCase : Union[str, Any] = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"text_encoder_2": text_encoder_a,
"tokenizer_2": tokenizer_a,
# "safety_checker": None,
# "feature_extractor": None,
}
return components
def lowercase__ ( self , snake_case__ , snake_case__=0 ):
"""simple docstring"""
lowerCAmelCase : Optional[Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(snake_case__ ) ).to(snake_case__ )
lowerCAmelCase : int = image / 2 + 0.5
if str(snake_case__ ).startswith("mps" ):
lowerCAmelCase : str = torch.manual_seed(snake_case__ )
else:
lowerCAmelCase : int = torch.Generator(device=snake_case__ ).manual_seed(snake_case__ )
lowerCAmelCase : List[str] = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 5.0,
"output_type": "numpy",
"strength": 0.75,
}
return inputs
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : str = "cpu" # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase : str = self.get_dummy_components()
lowerCAmelCase : List[str] = StableDiffusionXLImgaImgPipeline(**snake_case__ )
lowerCAmelCase : List[str] = sd_pipe.to(snake_case__ )
sd_pipe.set_progress_bar_config(disable=snake_case__ )
lowerCAmelCase : Optional[Any] = self.get_dummy_inputs(snake_case__ )
lowerCAmelCase : Union[str, Any] = sd_pipe(**snake_case__ ).images
lowerCAmelCase : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowerCAmelCase : str = np.array([0.4656, 0.4840, 0.4439, 0.6698, 0.5574, 0.4524, 0.5799, 0.5943, 0.5165] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowercase__ ( self ):
"""simple docstring"""
super().test_attention_slicing_forward_pass(expected_max_diff=3e-3 )
def lowercase__ ( self ):
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def lowercase__ ( self ):
"""simple docstring"""
pass
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Optional[int] = self.get_dummy_components()
lowerCAmelCase : str = StableDiffusionXLImgaImgPipeline(**snake_case__ )
lowerCAmelCase : str = sd_pipe.to(snake_case__ )
lowerCAmelCase : Dict = sd_pipe.to(snake_case__ )
sd_pipe.set_progress_bar_config(disable=snake_case__ )
# forward without prompt embeds
lowerCAmelCase : str = self.get_dummy_inputs(snake_case__ )
lowerCAmelCase : Tuple = 3 * ["this is a negative prompt"]
lowerCAmelCase : Tuple = negative_prompt
lowerCAmelCase : Optional[Any] = 3 * [inputs["prompt"]]
lowerCAmelCase : Tuple = sd_pipe(**snake_case__ )
lowerCAmelCase : Tuple = output.images[0, -3:, -3:, -1]
# forward with prompt embeds
lowerCAmelCase : Tuple = self.get_dummy_inputs(snake_case__ )
lowerCAmelCase : str = 3 * ["this is a negative prompt"]
lowerCAmelCase : List[Any] = 3 * [inputs.pop("prompt" )]
(
lowerCAmelCase
) : Tuple = sd_pipe.encode_prompt(snake_case__ , negative_prompt=snake_case__ )
lowerCAmelCase : List[Any] = sd_pipe(
**snake_case__ , prompt_embeds=snake_case__ , negative_prompt_embeds=snake_case__ , pooled_prompt_embeds=snake_case__ , negative_pooled_prompt_embeds=snake_case__ , )
lowerCAmelCase : List[Any] = output.images[0, -3:, -3:, -1]
# make sure that it's equal
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1e-4
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
def lowercase__ ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase__ ( self , snake_case__ , snake_case__="cpu" , snake_case__=torch.floataa , snake_case__=0 ):
"""simple docstring"""
lowerCAmelCase : Union[str, Any] = torch.Generator(device=snake_case__ ).manual_seed(snake_case__ )
lowerCAmelCase : int = np.random.RandomState(snake_case__ ).standard_normal((1, 4, 64, 64) )
lowerCAmelCase : List[Any] = torch.from_numpy(snake_case__ ).to(device=snake_case__ , dtype=snake_case__ )
lowerCAmelCase : Dict = {
"prompt": "a photograph of an astronaut riding a horse",
"latents": latents,
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 7.5,
"output_type": "numpy",
}
return inputs
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : List[str] = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2-base" )
pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
lowerCAmelCase : Optional[int] = self.get_inputs(snake_case__ )
lowerCAmelCase : Dict = pipe(**snake_case__ ).images
lowerCAmelCase : Optional[int] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
lowerCAmelCase : List[Any] = np.array([0.49493, 0.47896, 0.40798, 0.54214, 0.53212, 0.48202, 0.47656, 0.46329, 0.48506] )
assert np.abs(image_slice - expected_slice ).max() < 7e-3
| 721 |
"""simple docstring"""
import argparse
import os
import re
lowerCAmelCase__ = '''src/transformers'''
# Pattern that looks at the indentation in a line.
lowerCAmelCase__ = re.compile(r'''^(\s*)\S''')
# Pattern that matches `"key":" and puts `key` in group 0.
lowerCAmelCase__ = re.compile(r'''^\s*"([^"]+)":''')
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
lowerCAmelCase__ = re.compile(r'''^\s*_import_structure\["([^"]+)"\]''')
# Pattern that matches `"key",` and puts `key` in group 0.
lowerCAmelCase__ = re.compile(r'''^\s*"([^"]+)",\s*$''')
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
lowerCAmelCase__ = re.compile(r'''\[([^\]]+)\]''')
def a__ ( SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
lowerCAmelCase : Union[str, Any] = _re_indent.search(SCREAMING_SNAKE_CASE )
return "" if search is None else search.groups()[0]
def a__ ( SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Optional[int]="" , SCREAMING_SNAKE_CASE : Optional[int]=None , SCREAMING_SNAKE_CASE : Dict=None ):
'''simple docstring'''
lowerCAmelCase : Tuple = 0
lowerCAmelCase : Optional[int] = code.split("\n" )
if start_prompt is not None:
while not lines[index].startswith(SCREAMING_SNAKE_CASE ):
index += 1
lowerCAmelCase : Dict = ["\n".join(lines[:index] )]
else:
lowerCAmelCase : Any = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
lowerCAmelCase : Union[str, Any] = [lines[index]]
index += 1
while index < len(SCREAMING_SNAKE_CASE ) and (end_prompt is None or not lines[index].startswith(SCREAMING_SNAKE_CASE )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(SCREAMING_SNAKE_CASE ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + " " ):
current_block.append(lines[index] )
blocks.append("\n".join(SCREAMING_SNAKE_CASE ) )
if index < len(SCREAMING_SNAKE_CASE ) - 1:
lowerCAmelCase : List[str] = [lines[index + 1]]
index += 1
else:
lowerCAmelCase : Optional[Any] = []
else:
blocks.append("\n".join(SCREAMING_SNAKE_CASE ) )
lowerCAmelCase : str = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(SCREAMING_SNAKE_CASE ) > 0:
blocks.append("\n".join(SCREAMING_SNAKE_CASE ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(SCREAMING_SNAKE_CASE ):
blocks.append("\n".join(lines[index:] ) )
return blocks
def a__ ( SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
def _inner(SCREAMING_SNAKE_CASE : Optional[Any] ):
return key(SCREAMING_SNAKE_CASE ).lower().replace("_" , "" )
return _inner
def a__ ( SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Dict=None ):
'''simple docstring'''
def noop(SCREAMING_SNAKE_CASE : List[Any] ):
return x
if key is None:
lowerCAmelCase : int = noop
# Constants are all uppercase, they go first.
lowerCAmelCase : Dict = [obj for obj in objects if key(SCREAMING_SNAKE_CASE ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
lowerCAmelCase : List[Any] = [obj for obj in objects if key(SCREAMING_SNAKE_CASE )[0].isupper() and not key(SCREAMING_SNAKE_CASE ).isupper()]
# Functions begin with a lowercase, they go last.
lowerCAmelCase : List[Any] = [obj for obj in objects if not key(SCREAMING_SNAKE_CASE )[0].isupper()]
lowerCAmelCase : Dict = ignore_underscore(SCREAMING_SNAKE_CASE )
return sorted(SCREAMING_SNAKE_CASE , key=SCREAMING_SNAKE_CASE ) + sorted(SCREAMING_SNAKE_CASE , key=SCREAMING_SNAKE_CASE ) + sorted(SCREAMING_SNAKE_CASE , key=SCREAMING_SNAKE_CASE )
def a__ ( SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
def _replace(SCREAMING_SNAKE_CASE : List[Any] ):
lowerCAmelCase : List[str] = match.groups()[0]
if "," not in imports:
return f"""[{imports}]"""
lowerCAmelCase : Dict = [part.strip().replace("\"" , "" ) for part in imports.split("," )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
lowerCAmelCase : Any = keys[:-1]
return "[" + ", ".join([f"""\"{k}\"""" for k in sort_objects(SCREAMING_SNAKE_CASE )] ) + "]"
lowerCAmelCase : List[Any] = import_statement.split("\n" )
if len(SCREAMING_SNAKE_CASE ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
lowerCAmelCase : Tuple = 2 if lines[1].strip() == "[" else 1
lowerCAmelCase : Optional[Any] = [(i, _re_strip_line.search(SCREAMING_SNAKE_CASE ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
lowerCAmelCase : Optional[Any] = sort_objects(SCREAMING_SNAKE_CASE , key=lambda SCREAMING_SNAKE_CASE : x[1] )
lowerCAmelCase : List[str] = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(SCREAMING_SNAKE_CASE ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
lowerCAmelCase : Optional[int] = _re_bracket_content.sub(_replace , lines[1] )
else:
lowerCAmelCase : List[str] = [part.strip().replace("\"" , "" ) for part in lines[1].split("," )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
lowerCAmelCase : Union[str, Any] = keys[:-1]
lowerCAmelCase : str = get_indent(lines[1] ) + ", ".join([f"""\"{k}\"""" for k in sort_objects(SCREAMING_SNAKE_CASE )] )
return "\n".join(SCREAMING_SNAKE_CASE )
else:
# Finally we have to deal with imports fitting on one line
lowerCAmelCase : Any = _re_bracket_content.sub(_replace , SCREAMING_SNAKE_CASE )
return import_statement
def a__ ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Tuple=True ):
'''simple docstring'''
with open(SCREAMING_SNAKE_CASE , encoding="utf-8" ) as f:
lowerCAmelCase : Union[str, Any] = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
lowerCAmelCase : List[str] = split_code_in_indented_blocks(
SCREAMING_SNAKE_CASE , start_prompt="_import_structure = {" , end_prompt="if TYPE_CHECKING:" )
# We ignore block 0 (everything untils start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(SCREAMING_SNAKE_CASE ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
lowerCAmelCase : Tuple = main_blocks[block_idx]
lowerCAmelCase : Optional[Any] = block.split("\n" )
# Get to the start of the imports.
lowerCAmelCase : int = 0
while line_idx < len(SCREAMING_SNAKE_CASE ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
lowerCAmelCase : Optional[Any] = len(SCREAMING_SNAKE_CASE )
else:
line_idx += 1
if line_idx >= len(SCREAMING_SNAKE_CASE ):
continue
# Ignore beginning and last line: they don't contain anything.
lowerCAmelCase : Optional[Any] = "\n".join(block_lines[line_idx:-1] )
lowerCAmelCase : Dict = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
lowerCAmelCase : Optional[Any] = split_code_in_indented_blocks(SCREAMING_SNAKE_CASE , indent_level=SCREAMING_SNAKE_CASE )
# We have two categories of import key: list or _import_structure[key].append/extend
lowerCAmelCase : Tuple = _re_direct_key if "_import_structure = {" in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
lowerCAmelCase : Tuple = [(pattern.search(SCREAMING_SNAKE_CASE ).groups()[0] if pattern.search(SCREAMING_SNAKE_CASE ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
lowerCAmelCase : int = [(i, key) for i, key in enumerate(SCREAMING_SNAKE_CASE ) if key is not None]
lowerCAmelCase : Union[str, Any] = [x[0] for x in sorted(SCREAMING_SNAKE_CASE , key=lambda SCREAMING_SNAKE_CASE : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
lowerCAmelCase : Tuple = 0
lowerCAmelCase : Any = []
for i in range(len(SCREAMING_SNAKE_CASE ) ):
if keys[i] is None:
reorderded_blocks.append(internal_blocks[i] )
else:
lowerCAmelCase : Dict = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reorderded_blocks.append(SCREAMING_SNAKE_CASE )
count += 1
# And we put our main block back together with its first and last line.
lowerCAmelCase : List[Any] = "\n".join(block_lines[:line_idx] + reorderded_blocks + [block_lines[-1]] )
if code != "\n".join(SCREAMING_SNAKE_CASE ):
if check_only:
return True
else:
print(f"""Overwriting {file}.""" )
with open(SCREAMING_SNAKE_CASE , "w" , encoding="utf-8" ) as f:
f.write("\n".join(SCREAMING_SNAKE_CASE ) )
def a__ ( SCREAMING_SNAKE_CASE : List[str]=True ):
'''simple docstring'''
lowerCAmelCase : Optional[int] = []
for root, _, files in os.walk(SCREAMING_SNAKE_CASE ):
if "__init__.py" in files:
lowerCAmelCase : Tuple = sort_imports(os.path.join(SCREAMING_SNAKE_CASE , "__init__.py" ) , check_only=SCREAMING_SNAKE_CASE )
if result:
lowerCAmelCase : Optional[Any] = [os.path.join(SCREAMING_SNAKE_CASE , "__init__.py" )]
if len(SCREAMING_SNAKE_CASE ) > 0:
raise ValueError(f"""Would overwrite {len(SCREAMING_SNAKE_CASE )} files, run `make style`.""" )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument('''--check_only''', action='''store_true''', help='''Whether to only check or fix style.''')
lowerCAmelCase__ = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 681 | 0 |
from __future__ import annotations
import unittest
import numpy as np
from transformers import BlipTextConfig
from transformers.testing_utils import require_tf, slow
from transformers.utils import is_tf_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
if is_tf_available():
import tensorflow as tf
from transformers import TFBlipTextModel
from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST
class lowercase_ :
'''simple docstring'''
def __init__( self : Tuple , __UpperCAmelCase : Dict , __UpperCAmelCase : Dict=12 , __UpperCAmelCase : Tuple=7 , __UpperCAmelCase : str=True , __UpperCAmelCase : List[str]=True , __UpperCAmelCase : Tuple=True , __UpperCAmelCase : List[Any]=99 , __UpperCAmelCase : Tuple=32 , __UpperCAmelCase : Any=32 , __UpperCAmelCase : str=2 , __UpperCAmelCase : Union[str, Any]=4 , __UpperCAmelCase : Tuple=37 , __UpperCAmelCase : str=0.1 , __UpperCAmelCase : Any=0.1 , __UpperCAmelCase : Any=512 , __UpperCAmelCase : Optional[Any]=0.02 , __UpperCAmelCase : Union[str, Any]=0 , __UpperCAmelCase : Union[str, Any]=None , ) ->Dict:
"""simple docstring"""
a = parent
a = batch_size
a = seq_length
a = is_training
a = use_input_mask
a = use_labels
a = vocab_size
a = hidden_size
a = projection_dim
a = num_hidden_layers
a = num_attention_heads
a = intermediate_size
a = dropout
a = attention_dropout
a = max_position_embeddings
a = initializer_range
a = scope
a = bos_token_id
def __lowerCAmelCase ( self : Optional[int] ) ->Any:
"""simple docstring"""
a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a = None
if self.use_input_mask:
a = random_attention_mask([self.batch_size, self.seq_length] )
if input_mask is not None:
a = input_mask.numpy()
a , a = input_mask.shape
a = np.random.randint(1 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(__UpperCAmelCase ):
a = 1
a = 0
a = self.get_config()
return config, input_ids, tf.convert_to_tensor(__UpperCAmelCase )
def __lowerCAmelCase ( self : Tuple ) ->Dict:
"""simple docstring"""
return BlipTextConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , projection_dim=self.projection_dim , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , dropout=self.dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , bos_token_id=self.bos_token_id , )
def __lowerCAmelCase ( self : Tuple , __UpperCAmelCase : Dict , __UpperCAmelCase : List[str] , __UpperCAmelCase : str ) ->Union[str, Any]:
"""simple docstring"""
a = TFBlipTextModel(config=__UpperCAmelCase )
a = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , training=__UpperCAmelCase )
a = model(__UpperCAmelCase , training=__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __lowerCAmelCase ( self : str ) ->List[str]:
"""simple docstring"""
a = self.prepare_config_and_inputs()
a , a , a = config_and_inputs
a = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class lowercase_ ( lowercase , unittest.TestCase ):
'''simple docstring'''
__snake_case = (TFBlipTextModel,) if is_tf_available() else ()
__snake_case = False
__snake_case = False
__snake_case = False
def __lowerCAmelCase ( self : Union[str, Any] ) ->List[str]:
"""simple docstring"""
a = BlipTextModelTester(self )
a = ConfigTester(self , config_class=__UpperCAmelCase , hidden_size=37 )
def __lowerCAmelCase ( self : Any ) ->List[str]:
"""simple docstring"""
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self : Any ) ->List[str]:
"""simple docstring"""
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase )
def __lowerCAmelCase ( self : Dict ) ->List[Any]:
"""simple docstring"""
pass
def __lowerCAmelCase ( self : Optional[int] ) ->int:
"""simple docstring"""
pass
@unittest.skip(reason='''Blip does not use inputs_embeds''' )
def __lowerCAmelCase ( self : int ) ->List[str]:
"""simple docstring"""
pass
@unittest.skip(reason='''BlipTextModel has no base class and is not available in MODEL_MAPPING''' )
def __lowerCAmelCase ( self : Union[str, Any] ) ->List[str]:
"""simple docstring"""
pass
@unittest.skip(reason='''BlipTextModel has no base class and is not available in MODEL_MAPPING''' )
def __lowerCAmelCase ( self : str ) ->Any:
"""simple docstring"""
pass
@slow
def __lowerCAmelCase ( self : List[Any] ) ->str:
"""simple docstring"""
for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a = TFBlipTextModel.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
def __lowerCAmelCase ( self : List[Any] , __UpperCAmelCase : Any=True ) ->str:
"""simple docstring"""
super().test_pt_tf_model_equivalence(allow_missing_keys=__UpperCAmelCase )
| 117 |
def _a ( a :list ) -> list:
if len(a ) < 2:
return collection
def circle_sort_util(a :list , a :int , a :int ) -> bool:
a = False
if low == high:
return swapped
a = low
a = high
while left < right:
if collection[left] > collection[right]:
a , a = (
collection[right],
collection[left],
)
a = True
left += 1
right -= 1
if left == right and collection[left] > collection[right + 1]:
a , a = (
collection[right + 1],
collection[left],
)
a = True
a = low + int((high - low) / 2 )
a = circle_sort_util(a , a , a )
a = circle_sort_util(a , mid + 1 , a )
return swapped or left_swap or right_swap
a = True
while is_not_sorted is True:
a = circle_sort_util(a , 0 , len(a ) - 1 )
return collection
if __name__ == "__main__":
UpperCAmelCase__ = input("Enter numbers separated by a comma:\n").strip()
UpperCAmelCase__ = [int(item) for item in user_input.split(",")]
print(circle_sort(unsorted))
| 117 | 1 |
'''simple docstring'''
from collections.abc import Callable
import numpy as np
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : Callable , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float ) -> np.array:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = int(np.ceil((x_end - xa) / step_size ) )
SCREAMING_SNAKE_CASE_ : Tuple = np.zeros((n + 1,) )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ya
SCREAMING_SNAKE_CASE_ : str = xa
for k in range(SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE_ : List[Any] = y[k] + step_size * ode_func(SCREAMING_SNAKE_CASE_ , y[k] )
SCREAMING_SNAKE_CASE_ : Optional[Any] = y[k] + (
(step_size / 2) * (ode_func(SCREAMING_SNAKE_CASE_ , y[k] ) + ode_func(x + step_size , SCREAMING_SNAKE_CASE_ ))
)
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 68 |
'''simple docstring'''
from typing import Optional
import pyspark
from .. import Features, NamedSplit
from ..download import DownloadMode
from ..packaged_modules.spark.spark import Spark
from .abc import AbstractDatasetReader
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
def __init__( self , lowercase__ , lowercase__ = None , lowercase__ = None , lowercase__ = True , lowercase__ = None , lowercase__ = False , lowercase__ = None , lowercase__ = True , lowercase__ = "arrow" , **lowercase__ , ):
"""simple docstring"""
super().__init__(
split=lowercase__ , features=lowercase__ , cache_dir=lowercase__ , keep_in_memory=lowercase__ , streaming=lowercase__ , **lowercase__ , )
SCREAMING_SNAKE_CASE_ : Any = load_from_cache_file
SCREAMING_SNAKE_CASE_ : Optional[int] = file_format
SCREAMING_SNAKE_CASE_ : List[Any] = Spark(
df=lowercase__ , features=lowercase__ , cache_dir=lowercase__ , working_dir=lowercase__ , **lowercase__ , )
def __lowerCamelCase ( self ):
"""simple docstring"""
if self.streaming:
return self.builder.as_streaming_dataset(split=self.split )
SCREAMING_SNAKE_CASE_ : Optional[int] = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD
self.builder.download_and_prepare(
download_mode=lowercase__ , file_format=self._file_format , )
return self.builder.as_dataset(split=self.split )
| 68 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_snake_case = {
'''configuration_roformer''': ['''ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''RoFormerConfig''', '''RoFormerOnnxConfig'''],
'''tokenization_roformer''': ['''RoFormerTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ['''RoFormerTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
'''ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''RoFormerForCausalLM''',
'''RoFormerForMaskedLM''',
'''RoFormerForMultipleChoice''',
'''RoFormerForQuestionAnswering''',
'''RoFormerForSequenceClassification''',
'''RoFormerForTokenClassification''',
'''RoFormerLayer''',
'''RoFormerModel''',
'''RoFormerPreTrainedModel''',
'''load_tf_weights_in_roformer''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
'''TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFRoFormerForCausalLM''',
'''TFRoFormerForMaskedLM''',
'''TFRoFormerForMultipleChoice''',
'''TFRoFormerForQuestionAnswering''',
'''TFRoFormerForSequenceClassification''',
'''TFRoFormerForTokenClassification''',
'''TFRoFormerLayer''',
'''TFRoFormerModel''',
'''TFRoFormerPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
'''FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FlaxRoFormerForMaskedLM''',
'''FlaxRoFormerForMultipleChoice''',
'''FlaxRoFormerForQuestionAnswering''',
'''FlaxRoFormerForSequenceClassification''',
'''FlaxRoFormerForTokenClassification''',
'''FlaxRoFormerModel''',
'''FlaxRoFormerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig
from .tokenization_roformer import RoFormerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roformer_fast import RoFormerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roformer import (
ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
RoFormerForCausalLM,
RoFormerForMaskedLM,
RoFormerForMultipleChoice,
RoFormerForQuestionAnswering,
RoFormerForSequenceClassification,
RoFormerForTokenClassification,
RoFormerLayer,
RoFormerModel,
RoFormerPreTrainedModel,
load_tf_weights_in_roformer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roformer import (
TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerLayer,
TFRoFormerModel,
TFRoFormerPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roformer import (
FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
FlaxRoFormerPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 282 |
from __future__ import annotations
from collections import Counter
from random import random
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase = {}
def __lowerCAmelCase ( self : Tuple , SCREAMING_SNAKE_CASE__ : str ):
"""simple docstring"""
UpperCamelCase = {}
def __lowerCAmelCase ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : float ):
"""simple docstring"""
if nodea not in self.connections:
self.add_node(SCREAMING_SNAKE_CASE__ )
if nodea not in self.connections:
self.add_node(SCREAMING_SNAKE_CASE__ )
UpperCamelCase = probability
def __lowerCAmelCase ( self : str ):
"""simple docstring"""
return list(self.connections )
def __lowerCAmelCase ( self : Any , SCREAMING_SNAKE_CASE__ : str ):
"""simple docstring"""
UpperCamelCase = 0
UpperCamelCase = random()
for dest in self.connections[node]:
current_probability += self.connections[node][dest]
if current_probability > random_value:
return dest
return ""
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> dict[str, int]:
UpperCamelCase = MarkovChainGraphUndirectedUnweighted()
for nodea, nodea, probability in transitions:
graph.add_transition_probability(_lowercase , _lowercase , _lowercase )
UpperCamelCase = Counter(graph.get_nodes() )
UpperCamelCase = start
for _ in range(_lowercase ):
UpperCamelCase = graph.transition(_lowercase )
visited[node] += 1
return visited
if __name__ == "__main__":
import doctest
doctest.testmod()
| 282 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCAmelCase : Optional[int] = {
"""configuration_mvp""": ["""MVP_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MvpConfig""", """MvpOnnxConfig"""],
"""tokenization_mvp""": ["""MvpTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Optional[Any] = ["""MvpTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Any = [
"""MVP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MvpForCausalLM""",
"""MvpForConditionalGeneration""",
"""MvpForQuestionAnswering""",
"""MvpForSequenceClassification""",
"""MvpModel""",
"""MvpPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mvp import MVP_PRETRAINED_CONFIG_ARCHIVE_MAP, MvpConfig, MvpOnnxConfig
from .tokenization_mvp import MvpTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mvp_fast import MvpTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mvp import (
MVP_PRETRAINED_MODEL_ARCHIVE_LIST,
MvpForCausalLM,
MvpForConditionalGeneration,
MvpForQuestionAnswering,
MvpForSequenceClassification,
MvpModel,
MvpPreTrainedModel,
)
else:
import sys
lowerCAmelCase : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 720 |
from math import ceil
def lowerCAmelCase ( UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Union[str, Any] = list(range(0 , UpperCamelCase__ ) )
__SCREAMING_SNAKE_CASE: Optional[Any] = [item for sublist in list(device_map.values() ) for item in sublist]
# Duplicate check
__SCREAMING_SNAKE_CASE: List[Any] = []
for i in device_map_blocks:
if device_map_blocks.count(UpperCamelCase__ ) > 1 and i not in duplicate_blocks:
duplicate_blocks.append(UpperCamelCase__ )
# Missing blocks
__SCREAMING_SNAKE_CASE: Any = [i for i in blocks if i not in device_map_blocks]
__SCREAMING_SNAKE_CASE: List[str] = [i for i in device_map_blocks if i not in blocks]
if len(UpperCamelCase__ ) != 0:
raise ValueError(
'''Duplicate attention blocks specified in device_map. Attention blocks must be specified to one device.'''
''' These attention blocks were specified more than once: ''' + str(UpperCamelCase__ ) )
if len(UpperCamelCase__ ) != 0:
raise ValueError(
'''There are attention blocks for this model that are not specified in the device_map. Add these attention '''
'''blocks to a device on the device_map: ''' + str(UpperCamelCase__ ) )
if len(UpperCamelCase__ ) != 0:
raise ValueError(
'''The device_map contains more attention blocks than this model has. Remove these from the device_map:'''
+ str(UpperCamelCase__ ) )
def lowerCAmelCase ( UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[Any] ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE: int = list(range(UpperCamelCase__ ) )
__SCREAMING_SNAKE_CASE: Optional[int] = int(ceil(n_layers / len(UpperCamelCase__ ) ) )
__SCREAMING_SNAKE_CASE: str = [layers[i : i + n_blocks] for i in range(0 , UpperCamelCase__ , UpperCamelCase__ )]
return dict(zip(UpperCamelCase__ , UpperCamelCase__ ) )
| 146 | 0 |
'''simple docstring'''
import argparse
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import GLPNConfig, GLPNForDepthEstimation, GLPNImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCamelCase = logging.get_logger(__name__)
def _a ( _lowerCamelCase ) -> Tuple:
"""simple docstring"""
__snake_case : Union[str, Any] = OrderedDict()
for key, value in state_dict.items():
if key.startswith("""module.encoder""" ):
__snake_case : Optional[int] = key.replace("""module.encoder""" , """glpn.encoder""" )
if key.startswith("""module.decoder""" ):
__snake_case : str = key.replace("""module.decoder""" , """decoder.stages""" )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
__snake_case : List[Any] = key[key.find("""patch_embed""" ) + len("""patch_embed""" )]
__snake_case : List[Any] = key.replace(F'''patch_embed{idx}''' , F'''patch_embeddings.{int(_lowerCamelCase )-1}''' )
if "norm" in key:
__snake_case : Union[str, Any] = key.replace("""norm""" , """layer_norm""" )
if "glpn.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
__snake_case : int = key[key.find("""glpn.encoder.layer_norm""" ) + len("""glpn.encoder.layer_norm""" )]
__snake_case : List[Any] = key.replace(F'''layer_norm{idx}''' , F'''layer_norm.{int(_lowerCamelCase )-1}''' )
if "layer_norm1" in key:
__snake_case : Optional[Any] = key.replace("""layer_norm1""" , """layer_norm_1""" )
if "layer_norm2" in key:
__snake_case : Any = key.replace("""layer_norm2""" , """layer_norm_2""" )
if "block" in key:
# replace for example block1 by block.0
__snake_case : Optional[int] = key[key.find("""block""" ) + len("""block""" )]
__snake_case : Optional[Any] = key.replace(F'''block{idx}''' , F'''block.{int(_lowerCamelCase )-1}''' )
if "attn.q" in key:
__snake_case : List[str] = key.replace("""attn.q""" , """attention.self.query""" )
if "attn.proj" in key:
__snake_case : Union[str, Any] = key.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in key:
__snake_case : List[Any] = key.replace("""attn""" , """attention.self""" )
if "fc1" in key:
__snake_case : str = key.replace("""fc1""" , """dense1""" )
if "fc2" in key:
__snake_case : Union[str, Any] = key.replace("""fc2""" , """dense2""" )
if "linear_pred" in key:
__snake_case : List[str] = key.replace("""linear_pred""" , """classifier""" )
if "linear_fuse" in key:
__snake_case : Any = key.replace("""linear_fuse.conv""" , """linear_fuse""" )
__snake_case : Any = key.replace("""linear_fuse.bn""" , """batch_norm""" )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
__snake_case : str = key[key.find("""linear_c""" ) + len("""linear_c""" )]
__snake_case : List[Any] = key.replace(F'''linear_c{idx}''' , F'''linear_c.{int(_lowerCamelCase )-1}''' )
if "bot_conv" in key:
__snake_case : Dict = key.replace("""bot_conv""" , """0.convolution""" )
if "skip_conv1" in key:
__snake_case : List[str] = key.replace("""skip_conv1""" , """1.convolution""" )
if "skip_conv2" in key:
__snake_case : Optional[int] = key.replace("""skip_conv2""" , """2.convolution""" )
if "fusion1" in key:
__snake_case : Union[str, Any] = key.replace("""fusion1""" , """1.fusion""" )
if "fusion2" in key:
__snake_case : List[Any] = key.replace("""fusion2""" , """2.fusion""" )
if "fusion3" in key:
__snake_case : List[Any] = key.replace("""fusion3""" , """3.fusion""" )
if "fusion" in key and "conv" in key:
__snake_case : Dict = key.replace("""conv""" , """convolutional_layer""" )
if key.startswith("""module.last_layer_depth""" ):
__snake_case : Union[str, Any] = key.replace("""module.last_layer_depth""" , """head.head""" )
__snake_case : int = value
return new_state_dict
def _a ( _lowerCamelCase , _lowerCamelCase ) -> Any:
"""simple docstring"""
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
__snake_case : Optional[int] = state_dict.pop(F'''glpn.encoder.block.{i}.{j}.attention.self.kv.weight''' )
__snake_case : int = state_dict.pop(F'''glpn.encoder.block.{i}.{j}.attention.self.kv.bias''' )
# next, add keys and values (in that order) to the state dict
__snake_case : str = kv_weight[
: config.hidden_sizes[i], :
]
__snake_case : Dict = kv_bias[: config.hidden_sizes[i]]
__snake_case : List[Any] = kv_weight[
config.hidden_sizes[i] :, :
]
__snake_case : List[str] = kv_bias[config.hidden_sizes[i] :]
def _a ( ) -> Union[str, Any]:
"""simple docstring"""
__snake_case : Optional[int] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
__snake_case : str = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw )
return image
@torch.no_grad()
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=False , _lowerCamelCase=None ) -> List[Any]:
"""simple docstring"""
__snake_case : Optional[Any] = GLPNConfig(hidden_sizes=[64, 128, 320, 512] , decoder_hidden_size=64 , depths=[3, 8, 27, 3] )
# load image processor (only resize + rescale)
__snake_case : List[Any] = GLPNImageProcessor()
# prepare image
__snake_case : str = prepare_img()
__snake_case : str = image_processor(images=_lowerCamelCase , return_tensors="""pt""" ).pixel_values
logger.info("""Converting model...""" )
# load original state dict
__snake_case : Union[str, Any] = torch.load(_lowerCamelCase , map_location=torch.device("""cpu""" ) )
# rename keys
__snake_case : Any = rename_keys(_lowerCamelCase )
# key and value matrices need special treatment
read_in_k_v(_lowerCamelCase , _lowerCamelCase )
# create HuggingFace model and load state dict
__snake_case : Any = GLPNForDepthEstimation(_lowerCamelCase )
model.load_state_dict(_lowerCamelCase )
model.eval()
# forward pass
__snake_case : Optional[Any] = model(_lowerCamelCase )
__snake_case : Any = outputs.predicted_depth
# verify output
if model_name is not None:
if "nyu" in model_name:
__snake_case : Any = torch.tensor(
[[4.41_47, 4.08_73, 4.06_73], [3.78_90, 3.28_81, 3.15_25], [3.76_74, 3.54_23, 3.49_13]] )
elif "kitti" in model_name:
__snake_case : Tuple = torch.tensor(
[[3.42_91, 2.78_65, 2.51_51], [3.28_41, 2.70_21, 2.35_02], [3.11_47, 2.46_25, 2.24_81]] )
else:
raise ValueError(F'''Unknown model name: {model_name}''' )
__snake_case : List[Any] = torch.Size([1, 480, 640] )
assert predicted_depth.shape == expected_shape
assert torch.allclose(predicted_depth[0, :3, :3] , _lowerCamelCase , atol=1E-4 )
print("""Looks ok!""" )
# finally, push to hub if required
if push_to_hub:
logger.info("""Pushing model and image processor to the hub...""" )
model.push_to_hub(
repo_path_or_name=Path(_lowerCamelCase , _lowerCamelCase ) , organization="""nielsr""" , commit_message="""Add model""" , use_temp_dir=_lowerCamelCase , )
image_processor.push_to_hub(
repo_path_or_name=Path(_lowerCamelCase , _lowerCamelCase ) , organization="""nielsr""" , commit_message="""Add image processor""" , use_temp_dir=_lowerCamelCase , )
if __name__ == "__main__":
__UpperCamelCase = argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_path",
default=None,
type=str,
help="Path to the original PyTorch checkpoint (.pth file).",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether to upload the model to the HuggingFace hub."
)
parser.add_argument(
"--model_name",
default="glpn-kitti",
type=str,
help="Name of the model in case you're pushing to the hub.",
)
__UpperCamelCase = parser.parse_args()
convert_glpn_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 26 |
'''simple docstring'''
def _a ( _lowerCamelCase = 100 ) -> int:
"""simple docstring"""
__snake_case : Any = n * (n + 1) * (2 * n + 1) / 6
__snake_case : List[Any] = (n * (n + 1) / 2) ** 2
return int(square_of_sum - sum_of_squares )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 26 | 1 |
"""simple docstring"""
import unittest
from diffusers.models.unet_ad_blocks import * # noqa F403
from diffusers.utils import torch_device
from .test_unet_blocks_common import UNetBlockTesterMixin
class _A ( _UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ : str = DownBlockaD # noqa F405
UpperCamelCase_ : Optional[int] = '''down'''
def lowercase ( self : Union[str, Any] ) -> int:
__snake_case = [-0.02_32, -0.98_69, 0.80_54, -0.06_37, -0.16_88, -1.42_64, 0.44_70, -1.33_94, 0.09_04]
super().test_output(A_ )
class _A ( _UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ : Optional[int] = ResnetDownsampleBlockaD # noqa F405
UpperCamelCase_ : str = '''down'''
def lowercase ( self : Union[str, Any] ) -> Union[str, Any]:
__snake_case = [0.07_10, 0.24_10, -0.73_20, -1.07_57, -1.13_43, 0.35_40, -0.01_33, -0.25_76, 0.09_48]
super().test_output(A_ )
class _A ( _UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ : int = AttnDownBlockaD # noqa F405
UpperCamelCase_ : int = '''down'''
def lowercase ( self : str ) -> int:
__snake_case = [0.06_36, 0.89_64, -0.62_34, -1.01_31, 0.08_44, 0.49_35, 0.34_37, 0.09_11, -0.29_57]
super().test_output(A_ )
class _A ( _UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ : int = CrossAttnDownBlockaD # noqa F405
UpperCamelCase_ : Optional[int] = '''down'''
def lowercase ( self : List[Any] ) -> Dict:
__snake_case , __snake_case = super().prepare_init_args_and_inputs_for_common()
__snake_case = 32
return init_dict, inputs_dict
def lowercase ( self : Union[str, Any] ) -> Tuple:
__snake_case = [0.22_38, -0.73_96, -0.22_55, -0.38_29, 0.19_25, 1.16_65, 0.06_03, -0.72_95, 0.19_83]
super().test_output(A_ )
class _A ( _UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ : str = SimpleCrossAttnDownBlockaD # noqa F405
UpperCamelCase_ : Dict = '''down'''
@property
def lowercase ( self : Tuple ) -> Optional[Any]:
return super().get_dummy_input(include_encoder_hidden_states=A_ )
def lowercase ( self : List[str] ) -> Union[str, Any]:
__snake_case , __snake_case = super().prepare_init_args_and_inputs_for_common()
__snake_case = 32
return init_dict, inputs_dict
@unittest.skipIf(torch_device == '''mps''' , '''MPS result is not consistent''' )
def lowercase ( self : Union[str, Any] ) -> Optional[Any]:
__snake_case = [0.79_21, -0.09_92, -0.19_62, -0.76_95, -0.42_42, 0.78_04, 0.47_37, 0.27_65, 0.33_38]
super().test_output(A_ )
class _A ( _UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ : str = SkipDownBlockaD # noqa F405
UpperCamelCase_ : Dict = '''down'''
@property
def lowercase ( self : Optional[int] ) -> Dict:
return super().get_dummy_input(include_skip_sample=A_ )
def lowercase ( self : Any ) -> Dict:
__snake_case = [-0.08_45, -0.20_87, -0.24_65, 0.09_71, 0.19_00, -0.04_84, 0.26_64, 0.41_79, 0.50_69]
super().test_output(A_ )
class _A ( _UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ : str = AttnSkipDownBlockaD # noqa F405
UpperCamelCase_ : List[str] = '''down'''
@property
def lowercase ( self : Union[str, Any] ) -> Tuple:
return super().get_dummy_input(include_skip_sample=A_ )
def lowercase ( self : Optional[int] ) -> List[Any]:
__snake_case = [0.55_39, 0.16_09, 0.49_24, 0.05_37, -0.19_95, 0.40_50, 0.09_79, -0.27_21, -0.06_42]
super().test_output(A_ )
class _A ( _UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ : int = DownEncoderBlockaD # noqa F405
UpperCamelCase_ : Optional[int] = '''down'''
@property
def lowercase ( self : Dict ) -> Any:
return super().get_dummy_input(include_temb=A_ )
def lowercase ( self : Dict ) -> int:
__snake_case = {
'''in_channels''': 32,
'''out_channels''': 32,
}
__snake_case = self.dummy_input
return init_dict, inputs_dict
def lowercase ( self : Optional[int] ) -> Optional[Any]:
__snake_case = [1.11_02, 0.53_02, 0.48_72, -0.00_23, -0.80_42, 0.04_83, -0.34_89, -0.56_32, 0.76_26]
super().test_output(A_ )
class _A ( _UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ : Dict = AttnDownEncoderBlockaD # noqa F405
UpperCamelCase_ : Union[str, Any] = '''down'''
@property
def lowercase ( self : Tuple ) -> Dict:
return super().get_dummy_input(include_temb=A_ )
def lowercase ( self : Any ) -> Optional[Any]:
__snake_case = {
'''in_channels''': 32,
'''out_channels''': 32,
}
__snake_case = self.dummy_input
return init_dict, inputs_dict
def lowercase ( self : int ) -> str:
__snake_case = [0.89_66, -0.14_86, 0.85_68, 0.81_41, -0.90_46, -0.13_42, -0.09_72, -0.74_17, 0.15_38]
super().test_output(A_ )
class _A ( _UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ : str = UNetMidBlockaD # noqa F405
UpperCamelCase_ : Any = '''mid'''
def lowercase ( self : List[str] ) -> Dict:
__snake_case = {
'''in_channels''': 32,
'''temb_channels''': 128,
}
__snake_case = self.dummy_input
return init_dict, inputs_dict
def lowercase ( self : Dict ) -> Union[str, Any]:
__snake_case = [-0.10_62, 1.72_48, 0.34_94, 1.45_69, -0.09_10, -1.24_21, -0.99_84, 0.67_36, 1.00_28]
super().test_output(A_ )
class _A ( _UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ : str = UNetMidBlockaDCrossAttn # noqa F405
UpperCamelCase_ : Tuple = '''mid'''
def lowercase ( self : Any ) -> str:
__snake_case , __snake_case = super().prepare_init_args_and_inputs_for_common()
__snake_case = 32
return init_dict, inputs_dict
def lowercase ( self : int ) -> Optional[int]:
__snake_case = [0.01_87, 2.42_20, 0.44_84, 1.12_03, -0.61_21, -1.51_22, -0.82_70, 0.78_51, 1.83_35]
super().test_output(A_ )
class _A ( _UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ : Tuple = UNetMidBlockaDSimpleCrossAttn # noqa F405
UpperCamelCase_ : Optional[int] = '''mid'''
@property
def lowercase ( self : Dict ) -> Optional[Any]:
return super().get_dummy_input(include_encoder_hidden_states=A_ )
def lowercase ( self : List[str] ) -> List[Any]:
__snake_case , __snake_case = super().prepare_init_args_and_inputs_for_common()
__snake_case = 32
return init_dict, inputs_dict
def lowercase ( self : Optional[Any] ) -> List[str]:
__snake_case = [0.71_43, 1.99_74, 0.54_48, 1.39_77, 0.12_82, -1.12_37, -1.42_38, 0.55_30, 0.88_80]
super().test_output(A_ )
class _A ( _UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ : Optional[Any] = UpBlockaD # noqa F405
UpperCamelCase_ : str = '''up'''
@property
def lowercase ( self : Any ) -> List[str]:
return super().get_dummy_input(include_res_hidden_states_tuple=A_ )
def lowercase ( self : int ) -> Optional[Any]:
__snake_case = [-0.20_41, -0.41_65, -0.30_22, 0.00_41, -0.66_28, -0.70_53, 0.19_28, -0.03_25, 0.05_23]
super().test_output(A_ )
class _A ( _UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ : int = ResnetUpsampleBlockaD # noqa F405
UpperCamelCase_ : int = '''up'''
@property
def lowercase ( self : str ) -> List[Any]:
return super().get_dummy_input(include_res_hidden_states_tuple=A_ )
def lowercase ( self : Optional[Any] ) -> Any:
__snake_case = [0.22_87, 0.35_49, -0.13_46, 0.47_97, -0.17_15, -0.96_49, 0.73_05, -0.58_64, -0.62_44]
super().test_output(A_ )
class _A ( _UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ : List[str] = CrossAttnUpBlockaD # noqa F405
UpperCamelCase_ : Dict = '''up'''
@property
def lowercase ( self : List[Any] ) -> List[Any]:
return super().get_dummy_input(include_res_hidden_states_tuple=A_ )
def lowercase ( self : Optional[int] ) -> Optional[Any]:
__snake_case , __snake_case = super().prepare_init_args_and_inputs_for_common()
__snake_case = 32
return init_dict, inputs_dict
def lowercase ( self : List[Any] ) -> int:
__snake_case = [-0.14_03, -0.35_15, -0.04_20, -0.14_25, 0.31_67, 0.50_94, -0.21_81, 0.59_31, 0.55_82]
super().test_output(A_ )
class _A ( _UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ : Union[str, Any] = SimpleCrossAttnUpBlockaD # noqa F405
UpperCamelCase_ : Any = '''up'''
@property
def lowercase ( self : str ) -> str:
return super().get_dummy_input(include_res_hidden_states_tuple=A_ , include_encoder_hidden_states=A_ )
def lowercase ( self : str ) -> str:
__snake_case , __snake_case = super().prepare_init_args_and_inputs_for_common()
__snake_case = 32
return init_dict, inputs_dict
def lowercase ( self : int ) -> Optional[int]:
__snake_case = [0.26_45, 0.14_80, 0.09_09, 0.80_44, -0.97_58, -0.90_83, 0.09_94, -1.14_53, -0.74_02]
super().test_output(A_ )
class _A ( _UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ : List[str] = AttnUpBlockaD # noqa F405
UpperCamelCase_ : Optional[int] = '''up'''
@property
def lowercase ( self : Union[str, Any] ) -> List[str]:
return super().get_dummy_input(include_res_hidden_states_tuple=A_ )
@unittest.skipIf(torch_device == '''mps''' , '''MPS result is not consistent''' )
def lowercase ( self : str ) -> str:
__snake_case = [0.09_79, 0.13_26, 0.00_21, 0.06_59, 0.22_49, 0.00_59, 0.11_32, 0.59_52, 0.10_33]
super().test_output(A_ )
class _A ( _UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ : List[Any] = SkipUpBlockaD # noqa F405
UpperCamelCase_ : Any = '''up'''
@property
def lowercase ( self : Optional[int] ) -> int:
return super().get_dummy_input(include_res_hidden_states_tuple=A_ )
def lowercase ( self : Union[str, Any] ) -> str:
__snake_case = [-0.08_93, -0.12_34, -0.15_06, -0.03_32, 0.01_23, -0.02_11, 0.05_66, 0.01_43, 0.03_62]
super().test_output(A_ )
class _A ( _UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ : str = AttnSkipUpBlockaD # noqa F405
UpperCamelCase_ : str = '''up'''
@property
def lowercase ( self : Union[str, Any] ) -> Tuple:
return super().get_dummy_input(include_res_hidden_states_tuple=A_ )
def lowercase ( self : Optional[Any] ) -> List[str]:
__snake_case = [0.03_61, 0.06_17, 0.27_87, -0.03_50, 0.03_42, 0.34_21, -0.08_43, 0.09_13, 0.30_15]
super().test_output(A_ )
class _A ( _UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ : Optional[int] = UpDecoderBlockaD # noqa F405
UpperCamelCase_ : Tuple = '''up'''
@property
def lowercase ( self : List[str] ) -> Optional[int]:
return super().get_dummy_input(include_temb=A_ )
def lowercase ( self : int ) -> Any:
__snake_case = {'''in_channels''': 32, '''out_channels''': 32}
__snake_case = self.dummy_input
return init_dict, inputs_dict
def lowercase ( self : List[str] ) -> Union[str, Any]:
__snake_case = [0.44_04, 0.19_98, -0.98_86, -0.33_20, -0.31_28, -0.70_34, -0.69_55, -0.23_38, -0.31_37]
super().test_output(A_ )
class _A ( _UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ : str = AttnUpDecoderBlockaD # noqa F405
UpperCamelCase_ : Any = '''up'''
@property
def lowercase ( self : List[Any] ) -> str:
return super().get_dummy_input(include_temb=A_ )
def lowercase ( self : Optional[Any] ) -> Dict:
__snake_case = {'''in_channels''': 32, '''out_channels''': 32}
__snake_case = self.dummy_input
return init_dict, inputs_dict
def lowercase ( self : List[str] ) -> Any:
__snake_case = [0.67_38, 0.44_91, 0.10_55, 1.07_10, 0.73_16, 0.33_39, 0.33_52, 0.10_23, 0.35_68]
super().test_output(A_ ) | 93 | """simple docstring"""
import re
def SCREAMING_SNAKE_CASE ( snake_case):
__snake_case = re.compile(R'''^(\+91[\-\s]?)?[0]?(91)?[789]\d{9}$''')
if match := re.search(snake_case, snake_case):
return match.string == phone
return False
if __name__ == "__main__":
print(indian_phone_validator("+918827897895")) | 93 | 1 |
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import numpy as np
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING,
AutoConfig,
AutoImageProcessor,
AutoModelForMaskedImageModeling,
HfArgumentParser,
Trainer,
TrainingArguments,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
UpperCAmelCase = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
require_version('''datasets>=1.8.0''', '''To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt''')
UpperCAmelCase = list(MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING.keys())
UpperCAmelCase = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class lowerCAmelCase :
lowerCAmelCase_ = field(
default="cifar10" , metadata={"help": "Name of a dataset from the datasets package"} )
lowerCAmelCase_ = field(
default=A , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} )
lowerCAmelCase_ = field(
default=A , metadata={"help": "The column name of the images in the files. If not set, will try to use 'image' or 'img'."} , )
lowerCAmelCase_ = field(default=A , metadata={"help": "A folder containing the training data."} )
lowerCAmelCase_ = field(default=A , metadata={"help": "A folder containing the validation data."} )
lowerCAmelCase_ = field(
default=0.1_5 , metadata={"help": "Percent to split off of train for validation."} )
lowerCAmelCase_ = field(default=3_2 , metadata={"help": "The size of the square patches to use for masking."} )
lowerCAmelCase_ = field(
default=0.6 , metadata={"help": "Percentage of patches to mask."} , )
lowerCAmelCase_ = field(
default=A , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
lowerCAmelCase_ = field(
default=A , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} , )
def snake_case ( self : Tuple ):
"""simple docstring"""
__lowercase ={}
if self.train_dir is not None:
__lowercase =self.train_dir
if self.validation_dir is not None:
__lowercase =self.validation_dir
__lowercase =data_files if data_files else None
@dataclass
class lowerCAmelCase :
lowerCAmelCase_ = field(
default=A , metadata={
"help": (
"The model checkpoint for weights initialization. Can be a local path to a pytorch_model.bin or a "
"checkpoint identifier on the hub. "
"Don't set if you want to train a model from scratch."
)
} , )
lowerCAmelCase_ = field(
default=A , metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(A )} , )
lowerCAmelCase_ = field(
default=A , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
lowerCAmelCase_ = field(
default=A , metadata={
"help": (
"Override some existing default config settings when a model is trained from scratch. Example: "
"n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index"
)
} , )
lowerCAmelCase_ = field(
default=A , metadata={"help": "Where do you want to store (cache) the pretrained models/datasets downloaded from the hub"} , )
lowerCAmelCase_ = field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
lowerCAmelCase_ = field(default=A , metadata={"help": "Name or path of preprocessor config."} )
lowerCAmelCase_ = field(
default=A , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
lowerCAmelCase_ = field(
default=A , metadata={
"help": (
"The size (resolution) of each image. If not specified, will use `image_size` of the configuration."
)
} , )
lowerCAmelCase_ = field(
default=A , metadata={
"help": (
"The size (resolution) of each patch. If not specified, will use `patch_size` of the configuration."
)
} , )
lowerCAmelCase_ = field(
default=A , metadata={"help": "Stride to use for the encoder."} , )
class lowerCAmelCase :
def __init__( self : int , __lowercase : int=192 , __lowercase : Dict=32 , __lowercase : Any=4 , __lowercase : Any=0.6 ):
"""simple docstring"""
__lowercase =input_size
__lowercase =mask_patch_size
__lowercase =model_patch_size
__lowercase =mask_ratio
if self.input_size % self.mask_patch_size != 0:
raise ValueError('Input size must be divisible by mask patch size' )
if self.mask_patch_size % self.model_patch_size != 0:
raise ValueError('Mask patch size must be divisible by model patch size' )
__lowercase =self.input_size // self.mask_patch_size
__lowercase =self.mask_patch_size // self.model_patch_size
__lowercase =self.rand_size**2
__lowercase =int(np.ceil(self.token_count * self.mask_ratio ) )
def __call__( self : str ):
"""simple docstring"""
__lowercase =np.random.permutation(self.token_count )[: self.mask_count]
__lowercase =np.zeros(self.token_count , dtype=__lowercase )
__lowercase =1
__lowercase =mask.reshape((self.rand_size, self.rand_size) )
__lowercase =mask.repeat(self.scale , axis=0 ).repeat(self.scale , axis=1 )
return torch.tensor(mask.flatten() )
def __UpperCamelCase ( lowercase__ : Optional[Any] ):
'''simple docstring'''
__lowercase =torch.stack([example['pixel_values'] for example in examples] )
__lowercase =torch.stack([example['mask'] for example in examples] )
return {"pixel_values": pixel_values, "bool_masked_pos": mask}
def __UpperCamelCase ( ):
'''simple docstring'''
__lowercase =HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__lowercase , __lowercase , __lowercase =parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__lowercase , __lowercase , __lowercase =parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('run_mim', lowercase__, lowercase__ )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', handlers=[logging.StreamHandler(sys.stdout )], )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
__lowercase =training_args.get_process_log_level()
logger.setLevel(lowercase__ )
transformers.utils.logging.set_verbosity(lowercase__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(F'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
__lowercase =None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__lowercase =get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Initialize our dataset.
__lowercase =load_dataset(
data_args.dataset_name, data_args.dataset_config_name, data_files=data_args.data_files, cache_dir=model_args.cache_dir, use_auth_token=True if model_args.use_auth_token else None, )
# If we don't have a validation split, split off a percentage of train as validation.
__lowercase =None if 'validation' in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split, lowercase__ ) and data_args.train_val_split > 0.0:
__lowercase =ds['train'].train_test_split(data_args.train_val_split )
__lowercase =split['train']
__lowercase =split['test']
# Create config
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__lowercase ={
'cache_dir': model_args.cache_dir,
'revision': model_args.model_revision,
'use_auth_token': True if model_args.use_auth_token else None,
}
if model_args.config_name_or_path:
__lowercase =AutoConfig.from_pretrained(model_args.config_name_or_path, **lowercase__ )
elif model_args.model_name_or_path:
__lowercase =AutoConfig.from_pretrained(model_args.model_name_or_path, **lowercase__ )
else:
__lowercase =CONFIG_MAPPING[model_args.model_type]()
logger.warning('You are instantiating a new config instance from scratch.' )
if model_args.config_overrides is not None:
logger.info(F'''Overriding config: {model_args.config_overrides}''' )
config.update_from_string(model_args.config_overrides )
logger.info(F'''New config: {config}''' )
# make sure the decoder_type is "simmim" (only relevant for BEiT)
if hasattr(lowercase__, 'decoder_type' ):
__lowercase ='simmim'
# adapt config
__lowercase =model_args.image_size if model_args.image_size is not None else config.image_size
__lowercase =model_args.patch_size if model_args.patch_size is not None else config.patch_size
__lowercase =(
model_args.encoder_stride if model_args.encoder_stride is not None else config.encoder_stride
)
config.update(
{
'image_size': model_args.image_size,
'patch_size': model_args.patch_size,
'encoder_stride': model_args.encoder_stride,
} )
# create image processor
if model_args.image_processor_name:
__lowercase =AutoImageProcessor.from_pretrained(model_args.image_processor_name, **lowercase__ )
elif model_args.model_name_or_path:
__lowercase =AutoImageProcessor.from_pretrained(model_args.model_name_or_path, **lowercase__ )
else:
__lowercase ={
conf.model_type: image_processor_class for conf, image_processor_class in IMAGE_PROCESSOR_MAPPING.items()
}
__lowercase =IMAGE_PROCESSOR_TYPES[model_args.model_type]()
# create model
if model_args.model_name_or_path:
__lowercase =AutoModelForMaskedImageModeling.from_pretrained(
model_args.model_name_or_path, from_tf=bool('.ckpt' in model_args.model_name_or_path ), config=lowercase__, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, )
else:
logger.info('Training new model from scratch' )
__lowercase =AutoModelForMaskedImageModeling.from_config(lowercase__ )
if training_args.do_train:
__lowercase =ds['train'].column_names
else:
__lowercase =ds['validation'].column_names
if data_args.image_column_name is not None:
__lowercase =data_args.image_column_name
elif "image" in column_names:
__lowercase ='image'
elif "img" in column_names:
__lowercase ='img'
else:
__lowercase =column_names[0]
# transformations as done in original SimMIM paper
# source: https://github.com/microsoft/SimMIM/blob/main/data/data_simmim.py
__lowercase =Compose(
[
Lambda(lambda lowercase__ : img.convert('RGB' ) if img.mode != "RGB" else img ),
RandomResizedCrop(model_args.image_size, scale=(0.67, 1.0), ratio=(3.0 / 4.0, 4.0 / 3.0) ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean, std=image_processor.image_std ),
] )
# create mask generator
__lowercase =MaskGenerator(
input_size=model_args.image_size, mask_patch_size=data_args.mask_patch_size, model_patch_size=model_args.patch_size, mask_ratio=data_args.mask_ratio, )
def preprocess_images(lowercase__ : List[Any] ):
__lowercase =[transforms(lowercase__ ) for image in examples[image_column_name]]
__lowercase =[mask_generator() for i in range(len(examples[image_column_name] ) )]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError('--do_train requires a train dataset' )
if data_args.max_train_samples is not None:
__lowercase =ds['train'].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(lowercase__ )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError('--do_eval requires a validation dataset' )
if data_args.max_eval_samples is not None:
__lowercase =(
ds['validation'].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(lowercase__ )
# Initialize our trainer
__lowercase =Trainer(
model=lowercase__, args=lowercase__, train_dataset=ds['train'] if training_args.do_train else None, eval_dataset=ds['validation'] if training_args.do_eval else None, tokenizer=lowercase__, data_collator=lowercase__, )
# Training
if training_args.do_train:
__lowercase =None
if training_args.resume_from_checkpoint is not None:
__lowercase =training_args.resume_from_checkpoint
elif last_checkpoint is not None:
__lowercase =last_checkpoint
__lowercase =trainer.train(resume_from_checkpoint=lowercase__ )
trainer.save_model()
trainer.log_metrics('train', train_result.metrics )
trainer.save_metrics('train', train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
__lowercase =trainer.evaluate()
trainer.log_metrics('eval', lowercase__ )
trainer.save_metrics('eval', lowercase__ )
# Write model card and (optionally) push to hub
__lowercase ={
'finetuned_from': model_args.model_name_or_path,
'tasks': 'masked-image-modeling',
'dataset': data_args.dataset_name,
'tags': ['masked-image-modeling'],
}
if training_args.push_to_hub:
trainer.push_to_hub(**lowercase__ )
else:
trainer.create_model_card(**lowercase__ )
if __name__ == "__main__":
main()
| 119 |
'''simple docstring'''
def __UpperCamelCase ( lowercase__ : List[str], lowercase__ : Tuple ):
'''simple docstring'''
__lowercase =[0 for i in range(r + 1 )]
# nc0 = 1
__lowercase =1
for i in range(1, n + 1 ):
# to compute current row from previous row.
__lowercase =min(lowercase__, lowercase__ )
while j > 0:
c[j] += c[j - 1]
j -= 1
return c[r]
print(binomial_coefficient(n=10, r=5))
| 119 | 1 |
"""simple docstring"""
import baseaa
def __magic_name__ ( lowercase ):
return baseaa.baaencode(string.encode("""utf-8""" ) )
def __magic_name__ ( lowercase ):
return baseaa.baadecode(lowercase ).decode("""utf-8""" )
if __name__ == "__main__":
_UpperCAmelCase = """Hello World!"""
_UpperCAmelCase = baseaa_encode(test)
print(encoded)
_UpperCAmelCase = baseaa_decode(encoded)
print(decoded)
| 36 |
"""simple docstring"""
def __magic_name__ ( lowercase = 200_0000 ):
SCREAMING_SNAKE_CASE_: List[Any] =[0 for i in range(n + 1 )]
SCREAMING_SNAKE_CASE_: Union[str, Any] =1
SCREAMING_SNAKE_CASE_: Optional[Any] =1
for i in range(2 , int(n**0.5 ) + 1 ):
if primality_list[i] == 0:
for j in range(i * i , n + 1 , lowercase ):
SCREAMING_SNAKE_CASE_: Optional[int] =1
SCREAMING_SNAKE_CASE_: Dict =0
for i in range(lowercase ):
if primality_list[i] == 0:
sum_of_primes += i
return sum_of_primes
if __name__ == "__main__":
print(f"""{solution() = }""")
| 36 | 1 |
from __future__ import annotations
def A__ ( SCREAMING_SNAKE_CASE_ : list[int] , SCREAMING_SNAKE_CASE_ : int ) -> list[int]:
"""simple docstring"""
_UpperCAmelCase = 0
_UpperCAmelCase = len(SCREAMING_SNAKE_CASE_ ) - 1
while i < j:
if nums[i] + nums[j] == target:
return [i, j]
elif nums[i] + nums[j] < target:
_UpperCAmelCase = i + 1
else:
_UpperCAmelCase = j - 1
return []
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f'''{two_pointer([2, 7, 11, 15], 9) = }''') | 32 |
'''simple docstring'''
import inspect
import logging
import os
import random
import shutil
import tempfile
import unittest
import pytest
import torch
from torch import nn
from torch.utils.data import DataLoader, TensorDataset
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_cuda
from accelerate.utils import ProjectConfiguration, set_seed
UpperCamelCase_ = logging.getLogger(__name__)
def _UpperCAmelCase ( _lowerCamelCase : str=2 , _lowerCamelCase : Union[str, Any]=3 , _lowerCamelCase : List[str]=16 , _lowerCamelCase : int = 10 , _lowerCamelCase : int = 2 ) -> Optional[int]:
def get_dataset(_lowerCamelCase : Union[str, Any] ):
_lowerCAmelCase : Tuple = torch.randn(batch_size * n_batches , 1 )
return TensorDataset(_lowerCamelCase , a * x + b + 0.1 * torch.randn(batch_size * n_batches , 1 ) )
_lowerCAmelCase : str = get_dataset(_lowerCamelCase )
_lowerCAmelCase : Optional[int] = get_dataset(_lowerCamelCase )
_lowerCAmelCase : int = DataLoader(_lowerCamelCase , shuffle=_lowerCamelCase , batch_size=_lowerCamelCase , num_workers=4 )
_lowerCAmelCase : str = DataLoader(_lowerCamelCase , shuffle=_lowerCamelCase , batch_size=_lowerCamelCase , num_workers=4 )
return (train_dataloader, valid_dataloader)
def _UpperCAmelCase ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : str , _lowerCamelCase : Optional[int] , _lowerCamelCase : Any , _lowerCamelCase : str , _lowerCamelCase : Any=None ) -> List[str]:
_lowerCAmelCase : List[Any] = []
for epoch in range(_lowerCamelCase ):
# Train quickly
model.train()
for batch in dataloader:
_lowerCAmelCase , _lowerCAmelCase : Dict = batch
_lowerCAmelCase : List[str] = model(_lowerCamelCase )
_lowerCAmelCase : Optional[int] = torch.nn.functional.mse_loss(_lowerCamelCase , _lowerCamelCase )
accelerator.backward(_lowerCamelCase )
optimizer.step()
optimizer.zero_grad()
rands.append(random.random() ) # Introduce some randomness
if scheduler is not None:
scheduler.step()
return rands
class a_ (nn.Module ):
def __init__( self ):
super().__init__()
_lowerCAmelCase : str = nn.Parameter(torch.randn(1 ) )
_lowerCAmelCase : Optional[int] = nn.Parameter(torch.randn(1 ) )
def __UpperCamelCase ( self , snake_case_ ):
return x * self.a + self.b
class a_ (unittest.TestCase ):
def __UpperCamelCase ( self ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
_lowerCAmelCase : Tuple = DummyModel()
_lowerCAmelCase : int = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
_lowerCAmelCase , _lowerCAmelCase : Optional[Any] = dummy_dataloaders()
_lowerCAmelCase : Union[str, Any] = ProjectConfiguration(total_limit=1 , project_dir=snake_case_ , automatic_checkpoint_naming=snake_case_ )
# Train baseline
_lowerCAmelCase : List[Any] = Accelerator(project_config=snake_case_ )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : List[str] = accelerator.prepare(
snake_case_ , snake_case_ , snake_case_ , snake_case_ )
# Save initial
accelerator.save_state()
# Save second state
accelerator.save_state()
self.assertEqual(len(os.listdir(accelerator.project_dir ) ) , 1 )
def __UpperCamelCase ( self ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
_lowerCAmelCase : int = DummyModel()
_lowerCAmelCase : List[str] = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
_lowerCAmelCase , _lowerCAmelCase : str = dummy_dataloaders()
# Train baseline
_lowerCAmelCase : Tuple = Accelerator()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Tuple = accelerator.prepare(
snake_case_ , snake_case_ , snake_case_ , snake_case_ )
# Save initial
_lowerCAmelCase : Union[str, Any] = os.path.join(snake_case_ , """initial""" )
accelerator.save_state(snake_case_ )
((_lowerCAmelCase) , (_lowerCAmelCase)) : List[Any] = model.a.item(), model.b.item()
_lowerCAmelCase : Dict = optimizer.state_dict()
_lowerCAmelCase : Optional[int] = train(3 , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
((_lowerCAmelCase) , (_lowerCAmelCase)) : Optional[Any] = model.a.item(), model.b.item()
_lowerCAmelCase : Optional[int] = optimizer.state_dict()
# Train partially
set_seed(4_2 )
_lowerCAmelCase : List[str] = DummyModel()
_lowerCAmelCase : str = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
_lowerCAmelCase , _lowerCAmelCase : Dict = dummy_dataloaders()
_lowerCAmelCase : Optional[Any] = Accelerator()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Tuple = accelerator.prepare(
snake_case_ , snake_case_ , snake_case_ , snake_case_ )
accelerator.load_state(snake_case_ )
((_lowerCAmelCase) , (_lowerCAmelCase)) : Optional[Any] = model.a.item(), model.b.item()
_lowerCAmelCase : Optional[int] = optimizer.state_dict()
self.assertEqual(snake_case_ , snake_case_ )
self.assertEqual(snake_case_ , snake_case_ )
self.assertEqual(snake_case_ , snake_case_ )
_lowerCAmelCase : Any = train(2 , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
# Save everything
_lowerCAmelCase : str = os.path.join(snake_case_ , """checkpoint""" )
accelerator.save_state(snake_case_ )
# Load everything back in and make sure all states work
accelerator.load_state(snake_case_ )
test_rands += train(1 , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
((_lowerCAmelCase) , (_lowerCAmelCase)) : str = model.a.item(), model.b.item()
_lowerCAmelCase : Any = optimizer.state_dict()
self.assertEqual(snake_case_ , snake_case_ )
self.assertEqual(snake_case_ , snake_case_ )
self.assertEqual(snake_case_ , snake_case_ )
self.assertEqual(snake_case_ , snake_case_ )
def __UpperCamelCase ( self ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
_lowerCAmelCase : str = DummyModel()
_lowerCAmelCase : int = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
_lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = dummy_dataloaders()
_lowerCAmelCase : Union[str, Any] = ProjectConfiguration(automatic_checkpoint_naming=snake_case_ )
# Train baseline
_lowerCAmelCase : List[Any] = Accelerator(project_dir=snake_case_ , project_config=snake_case_ )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : List[Any] = accelerator.prepare(
snake_case_ , snake_case_ , snake_case_ , snake_case_ )
# Save initial
accelerator.save_state()
((_lowerCAmelCase) , (_lowerCAmelCase)) : Dict = model.a.item(), model.b.item()
_lowerCAmelCase : List[Any] = optimizer.state_dict()
_lowerCAmelCase : Optional[Any] = train(3 , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
((_lowerCAmelCase) , (_lowerCAmelCase)) : Dict = model.a.item(), model.b.item()
_lowerCAmelCase : str = optimizer.state_dict()
# Train partially
set_seed(4_2 )
_lowerCAmelCase : List[str] = DummyModel()
_lowerCAmelCase : Tuple = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
_lowerCAmelCase , _lowerCAmelCase : Optional[Any] = dummy_dataloaders()
_lowerCAmelCase : List[str] = ProjectConfiguration(iteration=1 , automatic_checkpoint_naming=snake_case_ )
_lowerCAmelCase : Optional[int] = Accelerator(project_dir=snake_case_ , project_config=snake_case_ )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = accelerator.prepare(
snake_case_ , snake_case_ , snake_case_ , snake_case_ )
accelerator.load_state(os.path.join(snake_case_ , """checkpoints""" , """checkpoint_0""" ) )
((_lowerCAmelCase) , (_lowerCAmelCase)) : Optional[Any] = model.a.item(), model.b.item()
_lowerCAmelCase : Union[str, Any] = optimizer.state_dict()
self.assertEqual(snake_case_ , snake_case_ )
self.assertEqual(snake_case_ , snake_case_ )
self.assertEqual(snake_case_ , snake_case_ )
_lowerCAmelCase : Optional[int] = train(2 , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
# Save everything
accelerator.save_state()
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(snake_case_ , """checkpoints""" , """checkpoint_1""" ) )
test_rands += train(1 , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
((_lowerCAmelCase) , (_lowerCAmelCase)) : Optional[int] = model.a.item(), model.b.item()
_lowerCAmelCase : str = optimizer.state_dict()
self.assertEqual(snake_case_ , snake_case_ )
self.assertEqual(snake_case_ , snake_case_ )
self.assertEqual(snake_case_ , snake_case_ )
self.assertEqual(snake_case_ , snake_case_ )
def __UpperCamelCase ( self ):
_lowerCAmelCase : Optional[int] = torch.tensor([1, 2, 3] )
_lowerCAmelCase : Optional[Any] = torch.tensor([2, 3, 4] )
_lowerCAmelCase : Union[str, Any] = DummyModel()
_lowerCAmelCase : Tuple = torch.optim.Adam(net.parameters() )
_lowerCAmelCase : str = Accelerator()
with self.assertRaises(snake_case_ ) as ve:
accelerator.register_for_checkpointing(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
_lowerCAmelCase : str = str(ve.exception )
self.assertTrue("""Item at index 0""" in message )
self.assertTrue("""Item at index 1""" in message )
self.assertFalse("""Item at index 2""" in message )
self.assertFalse("""Item at index 3""" in message )
def __UpperCamelCase ( self ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
_lowerCAmelCase : List[str] = DummyModel()
_lowerCAmelCase : List[str] = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
_lowerCAmelCase : Tuple = torch.optim.lr_scheduler.StepLR(snake_case_ , step_size=1 , gamma=0.99 )
_lowerCAmelCase , _lowerCAmelCase : int = dummy_dataloaders()
_lowerCAmelCase : Dict = ProjectConfiguration(automatic_checkpoint_naming=snake_case_ )
# Train baseline
_lowerCAmelCase : int = Accelerator(project_dir=snake_case_ , project_config=snake_case_ )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Any = accelerator.prepare(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
# Save initial
accelerator.save_state()
_lowerCAmelCase : Union[str, Any] = scheduler.state_dict()
train(3 , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
self.assertNotEqual(snake_case_ , scheduler.state_dict() )
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(snake_case_ , """checkpoints""" , """checkpoint_0""" ) )
self.assertEqual(snake_case_ , scheduler.state_dict() )
def __UpperCamelCase ( self ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
_lowerCAmelCase : str = DummyModel()
_lowerCAmelCase : Optional[int] = ProjectConfiguration(automatic_checkpoint_naming=snake_case_ , total_limit=2 )
# Train baseline
_lowerCAmelCase : Optional[Any] = Accelerator(project_dir=snake_case_ , project_config=snake_case_ )
_lowerCAmelCase : Any = accelerator.prepare(snake_case_ )
# Save 3 states:
for _ in range(1_1 ):
accelerator.save_state()
self.assertTrue(not os.path.exists(os.path.join(snake_case_ , """checkpoints""" , """checkpoint_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(snake_case_ , """checkpoints""" , """checkpoint_9""" ) ) )
self.assertTrue(os.path.exists(os.path.join(snake_case_ , """checkpoints""" , """checkpoint_10""" ) ) )
@require_cuda
def __UpperCamelCase ( self ):
_lowerCAmelCase : Dict = ["""torchrun""", f'--nproc_per_node={torch.cuda.device_count()}', inspect.getfile(self.__class__ )]
execute_subprocess_async(snake_case_ , env=os.environ.copy() )
if __name__ == "__main__":
UpperCamelCase_ = """/tmp/accelerate/state_checkpointing"""
UpperCamelCase_ = DummyModel()
UpperCamelCase_ = torch.optim.Adam(params=model.parameters(), lr=1e-3)
UpperCamelCase_ = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.9_9)
UpperCamelCase_ , UpperCamelCase_ = dummy_dataloaders()
UpperCamelCase_ = ProjectConfiguration(automatic_checkpoint_naming=True)
# Train baseline
UpperCamelCase_ = Accelerator(project_dir=savedir, project_config=project_config, mixed_precision="""no""")
if accelerator.process_index == 0:
if os.path.exists(savedir):
shutil.rmtree(savedir)
os.makedirs(savedir)
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = accelerator.prepare(
model, optimizer, train_dataloader, valid_dataloader, scheduler
)
UpperCamelCase_ , UpperCamelCase_ = accelerator.prepare(model, optimizer)
train(3, model, train_dataloader, optimizer, accelerator, scheduler)
# Check that the intial optimizer is loaded on the GPU
for group in optimizer.param_groups:
UpperCamelCase_ = group["""params"""][0].device
break
assert param_device.type == accelerator.device.type
UpperCamelCase_ = model.cpu()
accelerator.wait_for_everyone()
accelerator.save_state()
accelerator.wait_for_everyone()
# Check CPU state
accelerator.load_state(os.path.join(savedir, """checkpoints""", """checkpoint_0"""), map_location="""cpu""")
for group in optimizer.param_groups:
UpperCamelCase_ = group["""params"""][0].device
break
assert (
param_device.type == torch.device("""cpu""").type
), F"Loaded optimizer states did not match, expected to be loaded on the CPU but got {param_device}"
# Check device state
model.to(accelerator.device)
accelerator.load_state(os.path.join(savedir, """checkpoints""", """checkpoint_0"""), map_location="""on_device""")
for group in optimizer.param_groups:
UpperCamelCase_ = group["""params"""][0].device
break
assert (
param_device.type == accelerator.device.type
), F"Loaded optimizer states did not match, expected to be loaded on {accelerator.device} but got {param_device}"
# Check error
with pytest.raises(TypeError, match="""Unsupported optimizer map location passed"""):
accelerator.load_state(os.path.join(savedir, """checkpoints""", """checkpoint_0"""), map_location="""invalid""")
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
shutil.rmtree(savedir)
accelerator.wait_for_everyone()
| 384 | 0 |
from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
A__ = 42
A__ = 42
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
def __init__( self , _lowerCAmelCase ):
lowerCamelCase__ = [[] for _ in range(_lowerCAmelCase )]
lowerCamelCase__ = size
def __getitem__( self , _lowerCAmelCase ):
return iter(self._graph[vertex] )
@property
def __magic_name__ ( self ):
return self._size
def __magic_name__ ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
if weight not in (0, 1):
raise ValueError("Edge weight must be either 0 or 1." )
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError("Vertex indexes must be in [0; size)." )
self._graph[from_vertex].append(Edge(_lowerCAmelCase , _lowerCAmelCase ) )
def __magic_name__ ( self , _lowerCAmelCase , _lowerCAmelCase ):
lowerCamelCase__ = deque([start_vertex] )
lowerCamelCase__ = [None] * self.size
lowerCamelCase__ = 0
while queue:
lowerCamelCase__ = queue.popleft()
lowerCamelCase__ = distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
lowerCamelCase__ = current_distance + edge.weight
lowerCamelCase__ = distances[edge.destination_vertex]
if (
isinstance(_lowerCAmelCase , _lowerCAmelCase )
and new_distance >= dest_vertex_distance
):
continue
lowerCamelCase__ = new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex )
else:
queue.append(edge.destination_vertex )
if distances[finish_vertex] is None:
raise ValueError("No path from start_vertex to finish_vertex." )
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 360 |
import importlib
import torch
import yaml
from omegaconf import OmegaConf
from taming.models.vqgan import VQModel
def __UpperCamelCase ( a, a=False) ->Optional[Any]:
lowerCamelCase__ = OmegaConf.load(a)
if display:
print(yaml.dump(OmegaConf.to_container(a)))
return config
def __UpperCamelCase ( a, a=None, a=None) ->List[Any]:
if conf_path is None:
lowerCamelCase__ = "./model_checkpoints/vqgan_only.yaml"
lowerCamelCase__ = load_config(a, display=a)
lowerCamelCase__ = VQModel(**config.model.params)
if ckpt_path is None:
lowerCamelCase__ = "./model_checkpoints/vqgan_only.pt"
lowerCamelCase__ = torch.load(a, map_location=a)
if ".ckpt" in ckpt_path:
lowerCamelCase__ = sd["state_dict"]
model.load_state_dict(a, strict=a)
model.to(a)
del sd
return model
def __UpperCamelCase ( a, a) ->int:
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = model.encode(a)
print(f"VQGAN --- {model.__class__.__name__}: latent shape: {z.shape[2:]}")
lowerCamelCase__ = model.decode(a)
return xrec
def __UpperCamelCase ( a, a=False) ->Dict:
lowerCamelCase__ , lowerCamelCase__ = string.rsplit(".", 1)
if reload:
lowerCamelCase__ = importlib.import_module(a)
importlib.reload(a)
return getattr(importlib.import_module(a, package=a), cls)
def __UpperCamelCase ( a) ->int:
if "target" not in config:
raise KeyError("Expected key `target` to instantiate.")
return get_obj_from_str(config["target"])(**config.get("params", {}))
def __UpperCamelCase ( a, a, a=True, a=True) ->Optional[Any]:
lowerCamelCase__ = instantiate_from_config(a)
if sd is not None:
model.load_state_dict(a)
if gpu:
model.cuda()
if eval_mode:
model.eval()
return {"model": model}
def __UpperCamelCase ( a, a, a, a) ->Dict:
# load the specified checkpoint
if ckpt:
lowerCamelCase__ = torch.load(a, map_location="cpu")
lowerCamelCase__ = pl_sd["global_step"]
print(f"loaded model from global step {global_step}.")
else:
lowerCamelCase__ = {"state_dict": None}
lowerCamelCase__ = None
lowerCamelCase__ = load_model_from_config(config.model, pl_sd["state_dict"], gpu=a, eval_mode=a)["model"]
return model, global_step
| 360 | 1 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__UpperCAmelCase =logging.get_logger(__name__)
__UpperCAmelCase ="""▁"""
__UpperCAmelCase ={"""vocab_file""": """spiece.model"""}
__UpperCAmelCase ={
"""vocab_file""": {
"""google/reformer-crime-and-punishment""": (
"""https://huggingface.co/google/reformer-crime-and-punishment/resolve/main/spiece.model"""
)
}
}
__UpperCAmelCase ={
"""google/reformer-crime-and-punishment""": 52_4288,
}
class lowerCAmelCase__ ( UpperCAmelCase_ ):
lowercase__ : int = VOCAB_FILES_NAMES
lowercase__ : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
lowercase__ : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ : Optional[int] = ["""input_ids""", """attention_mask"""]
def __init__( self , UpperCamelCase__ , UpperCamelCase__="</s>" , UpperCamelCase__="<unk>" , UpperCamelCase__=[] , UpperCamelCase__ = None , **UpperCamelCase__ , ):
'''simple docstring'''
A__ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , additional_special_tokens=UpperCamelCase__ , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase__ , )
A__ = vocab_file
A__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(UpperCamelCase__ )
@property
def lowercase_ ( self ):
'''simple docstring'''
return self.sp_model.get_piece_size()
def lowercase_ ( self ):
'''simple docstring'''
A__ = {self.convert_ids_to_tokens(UpperCamelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
'''simple docstring'''
A__ = self.__dict__.copy()
A__ = None
return state
def __setstate__( self , UpperCamelCase__ ):
'''simple docstring'''
A__ = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
A__ = {}
A__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowercase_ ( self , UpperCamelCase__ ):
'''simple docstring'''
return self.sp_model.encode(UpperCamelCase__ , out_type=UpperCamelCase__ )
def lowercase_ ( self , UpperCamelCase__ ):
'''simple docstring'''
return self.sp_model.piece_to_id(UpperCamelCase__ )
def lowercase_ ( self , UpperCamelCase__ ):
'''simple docstring'''
if index < self.sp_model.get_piece_size():
A__ = self.sp_model.IdToPiece(UpperCamelCase__ )
return token
def lowercase_ ( self , UpperCamelCase__ ):
'''simple docstring'''
A__ = []
A__ = ""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(UpperCamelCase__ ) + token
A__ = []
else:
current_sub_tokens.append(UpperCamelCase__ )
out_string += self.sp_model.decode(UpperCamelCase__ )
return out_string.strip()
def lowercase_ ( self , UpperCamelCase__ , UpperCamelCase__ = None ):
'''simple docstring'''
if not os.path.isdir(UpperCamelCase__ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
A__ = os.path.join(
UpperCamelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCamelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCamelCase__ , "wb" ) as fi:
A__ = self.sp_model.serialized_model_proto()
fi.write(UpperCamelCase__ )
return (out_vocab_file,) | 337 |
"""simple docstring"""
import os
import pytest
from attr import dataclass
__UpperCAmelCase ="""us-east-1""" # defaults region
@dataclass
class lowerCAmelCase__ :
lowercase__ : str
lowercase__ : List[Any] = """arn:aws:iam::558105141721:role/sagemaker_execution_role"""
lowercase__ : Union[str, Any] = {
"""task_name""": """mnli""",
"""per_device_train_batch_size""": 16,
"""per_device_eval_batch_size""": 16,
"""do_train""": True,
"""do_eval""": True,
"""do_predict""": True,
"""output_dir""": """/opt/ml/model""",
"""overwrite_output_dir""": True,
"""max_steps""": 5_00,
"""save_steps""": 55_00,
}
lowercase__ : List[str] = {**hyperparameters, """max_steps""": 10_00}
@property
def lowercase_ ( self ):
'''simple docstring'''
if self.framework == "pytorch":
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"eval_accuracy.*=\D*(.*?)$"},
{"Name": "eval_loss", "Regex": r"eval_loss.*=\D*(.*?)$"},
]
else:
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"loss.*=\D*(.*?)]?$"},
{"Name": "eval_loss", "Regex": r"sparse_categorical_accuracy.*=\D*(.*?)]?$"},
]
@property
def lowercase_ ( self ):
'''simple docstring'''
return f"""{self.framework}-transfromers-test"""
@property
def lowercase_ ( self ):
'''simple docstring'''
return f"""./tests/sagemaker/scripts/{self.framework}"""
@property
def lowercase_ ( self ):
'''simple docstring'''
if self.framework == "pytorch":
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-pytorch-training:1.7.1-transformers4.6.1-gpu-py36-cu110-ubuntu18.04"
else:
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-tensorflow-training:2.4.1-transformers4.6.1-gpu-py37-cu110-ubuntu18.04"
@pytest.fixture(scope="class" )
def __a ( A ) -> int:
'''simple docstring'''
A__ = SageMakerTestEnvironment(framework=request.cls.framework ) | 337 | 1 |
import collections
import os
import re
from pathlib import Path
_snake_case = "src/transformers"
# Matches is_xxx_available()
_snake_case = re.compile(r"is\_([a-z_]*)_available()")
# Catches a one-line _import_struct = {xxx}
_snake_case = re.compile(r"^_import_structure\s+=\s+\{([^\}]+)\}")
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
_snake_case = re.compile(r"\s+\"\S*\":\s+\[([^\]]*)\]")
# Catches a line if not is_foo_available
_snake_case = re.compile(r"^\s*if\s+not\s+is\_[a-z_]*\_available\(\)")
# Catches a line _import_struct["bla"].append("foo")
_snake_case = re.compile(r"^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)")
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
_snake_case = re.compile(r"^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]")
# Catches a line with an object between quotes and a comma: "MyModel",
_snake_case = re.compile(r"^\s+\"([^\"]+)\",")
# Catches a line with objects between brackets only: ["foo", "bar"],
_snake_case = re.compile(r"^\s+\[([^\]]+)\]")
# Catches a line with from foo import bar, bla, boo
_snake_case = re.compile(r"\s+from\s+\S*\s+import\s+([^\(\s].*)\n")
# Catches a line with try:
_snake_case = re.compile(r"^\s*try:")
# Catches a line with else:
_snake_case = re.compile(r"^\s*else:")
def lowerCAmelCase_ ( snake_case_ ):
if _re_test_backend.search(snake_case_ ) is None:
return None
_A : Union[str, Any] = [b[0] for b in _re_backend.findall(snake_case_ )]
backends.sort()
return "_and_".join(snake_case_ )
def lowerCAmelCase_ ( snake_case_ ):
with open(snake_case_,"""r""",encoding="""utf-8""",newline="""\n""" ) as f:
_A : str = f.readlines()
_A : Any = 0
while line_index < len(snake_case_ ) and not lines[line_index].startswith("""_import_structure = {""" ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(snake_case_ ):
return None
# First grab the objects without a specific backend in _import_structure
_A : Optional[int] = []
while not lines[line_index].startswith("""if TYPE_CHECKING""" ) and find_backend(lines[line_index] ) is None:
_A : int = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(snake_case_ ):
_A : Optional[int] = _re_one_line_import_struct.search(snake_case_ ).groups()[0]
_A : Optional[Any] = re.findall(r"""\[([^\]]+)\]""",snake_case_ )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(""", """ )] )
line_index += 1
continue
_A : Union[str, Any] = _re_import_struct_key_value.search(snake_case_ )
if single_line_import_search is not None:
_A : int = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(""", """ ) if len(snake_case_ ) > 0]
objects.extend(snake_case_ )
elif line.startswith(""" """ * 8 + """\"""" ):
objects.append(line[9:-3] )
line_index += 1
_A : Union[str, Any] = {"""none""": objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith("""if TYPE_CHECKING""" ):
# If the line is an if not is_backend_available, we grab all objects associated.
_A : List[Any] = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
_A : List[Any] = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
_A : Optional[int] = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(""" """ * 4 ):
_A : Dict = lines[line_index]
if _re_import_struct_add_one.search(snake_case_ ) is not None:
objects.append(_re_import_struct_add_one.search(snake_case_ ).groups()[0] )
elif _re_import_struct_add_many.search(snake_case_ ) is not None:
_A : int = _re_import_struct_add_many.search(snake_case_ ).groups()[0].split(""", """ )
_A : Any = [obj[1:-1] for obj in imports if len(snake_case_ ) > 0]
objects.extend(snake_case_ )
elif _re_between_brackets.search(snake_case_ ) is not None:
_A : List[str] = _re_between_brackets.search(snake_case_ ).groups()[0].split(""", """ )
_A : Union[str, Any] = [obj[1:-1] for obj in imports if len(snake_case_ ) > 0]
objects.extend(snake_case_ )
elif _re_quote_object.search(snake_case_ ) is not None:
objects.append(_re_quote_object.search(snake_case_ ).groups()[0] )
elif line.startswith(""" """ * 8 + """\"""" ):
objects.append(line[9:-3] )
elif line.startswith(""" """ * 12 + """\"""" ):
objects.append(line[13:-3] )
line_index += 1
_A : List[Any] = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
_A : Dict = []
while (
line_index < len(snake_case_ )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith("""else""" )
):
_A : Dict = lines[line_index]
_A : Tuple = _re_import.search(snake_case_ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(""", """ ) )
elif line.startswith(""" """ * 8 ):
objects.append(line[8:-2] )
line_index += 1
_A : Union[str, Any] = {"""none""": objects}
# Let's continue with backend-specific objects
while line_index < len(snake_case_ ):
# If the line is an if is_backend_available, we grab all objects associated.
_A : Dict = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
_A : Dict = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
_A : str = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(""" """ * 8 ):
_A : Dict = lines[line_index]
_A : Any = _re_import.search(snake_case_ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(""", """ ) )
elif line.startswith(""" """ * 12 ):
objects.append(line[12:-2] )
line_index += 1
_A : Optional[int] = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
def find_duplicates(snake_case_ ):
return [k for k, v in collections.Counter(snake_case_ ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
_A : Any = []
for key in import_dict_objects.keys():
_A : Union[str, Any] = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(f'''Duplicate _import_structure definitions for: {duplicate_imports}''' )
_A : int = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(f'''Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}''' )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
_A : str = """base imports""" if key == """none""" else f'''{key} backend'''
errors.append(f'''Differences for {name}:''' )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(f''' {a} in TYPE_HINT but not in _import_structure.''' )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(f''' {a} in _import_structure but not in TYPE_HINT.''' )
return errors
def lowerCAmelCase_ ( ):
_A : str = []
for root, _, files in os.walk(snake_case_ ):
if "__init__.py" in files:
_A : List[str] = os.path.join(snake_case_,"""__init__.py""" )
_A : Optional[int] = parse_init(snake_case_ )
if objects is not None:
_A : List[Any] = analyze_results(*snake_case_ )
if len(snake_case_ ) > 0:
_A : str = f'''Problem in {fname}, both halves do not define the same objects.\n{errors[0]}'''
failures.append("""\n""".join(snake_case_ ) )
if len(snake_case_ ) > 0:
raise ValueError("""\n\n""".join(snake_case_ ) )
def lowerCAmelCase_ ( ):
_A : Any = []
for path, directories, files in os.walk(snake_case_ ):
for folder in directories:
# Ignore private modules
if folder.startswith("""_""" ):
directories.remove(snake_case_ )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(snake_case_ ) / folder).glob("""*.py""" ) ) ) == 0:
continue
_A : Union[str, Any] = str((Path(snake_case_ ) / folder).relative_to(snake_case_ ) )
_A : List[Any] = short_path.replace(os.path.sep,""".""" )
submodules.append(snake_case_ )
for fname in files:
if fname == "__init__.py":
continue
_A : Optional[int] = str((Path(snake_case_ ) / fname).relative_to(snake_case_ ) )
_A : str = short_path.replace(""".py""","""""" ).replace(os.path.sep,""".""" )
if len(submodule.split(""".""" ) ) == 1:
submodules.append(snake_case_ )
return submodules
_snake_case = [
"convert_pytorch_checkpoint_to_tf2",
"modeling_flax_pytorch_utils",
"models.esm.openfold_utils",
]
def lowerCAmelCase_ ( ):
# This is to make sure the transformers module imported is the one in the repo.
from transformers.utils import direct_transformers_import
_A : List[Any] = direct_transformers_import(snake_case_ )
_A : Tuple = set(transformers._import_structure.keys() )
# This contains all the base keys of the _import_structure object defined in the init, but if the user is missing
# some optional dependencies, they may not have all of them. Thus we read the init to read all additions and
# (potentiall re-) add them.
with open(os.path.join(snake_case_,"""__init__.py""" ),"""r""" ) as f:
_A : Optional[int] = f.read()
import_structure_keys.update(set(re.findall(r"""import_structure\[\"([^\"]*)\"\]""",snake_case_ ) ) )
_A : Dict = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in import_structure_keys
]
if len(snake_case_ ) > 0:
_A : Dict = """\n""".join(f'''- {module}''' for module in module_not_registered )
raise ValueError(
"""The following submodules are not properly registed in the main init of Transformers:\n"""
f'''{list_of_modules}\n'''
"""Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.""" )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 54 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
_snake_case = {
"configuration_efficientformer": [
"EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"EfficientFormerConfig",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ["EfficientFormerImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"EfficientFormerForImageClassification",
"EfficientFormerForImageClassificationWithTeacher",
"EfficientFormerModel",
"EfficientFormerPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFEfficientFormerForImageClassification",
"TFEfficientFormerForImageClassificationWithTeacher",
"TFEfficientFormerModel",
"TFEfficientFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_efficientformer import EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientformer import EfficientFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientformer import (
EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientFormerForImageClassification,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerModel,
EfficientFormerPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
TFEfficientFormerPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 54 | 1 |
'''simple docstring'''
import numpy as np
def lowercase__ ( __lowercase : np.array ) -> np.array:
"""simple docstring"""
return 1 / (1 + np.exp(-vector ))
def lowercase__ ( __lowercase : np.array ) -> np.array:
"""simple docstring"""
return vector * sigmoid(1.7_0_2 * vector )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 399 |
'''simple docstring'''
import pytest
from datasets import inspect_metric, list_metrics, load_metric
@pytest.fixture
def lowercase__ ( __lowercase : Any ) -> Optional[int]:
"""simple docstring"""
monkeypatch.setattr('datasets.utils.deprecation_utils._emitted_deprecation_warnings' , set() )
@pytest.fixture
def lowercase__ ( __lowercase : List[Any] ) -> List[str]:
"""simple docstring"""
class snake_case :
"""simple docstring"""
def __init__( self : Optional[Any] , __A : Any ):
__UpperCamelCase = metric_id
class snake_case :
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict =[MetricMock(__lowerCamelCase ) for metric_id in ["accuracy", "mse", "precision", "codeparrot/apps_metric"]]
def _lowerCamelCase ( self : str ):
return self._metrics
monkeypatch.setattr('datasets.inspect.huggingface_hub' , HfhMock() )
@pytest.mark.parametrize(
'func, args' , [(load_metric, ('metrics/mse',)), (list_metrics, ()), (inspect_metric, ('metrics/mse', 'tmp_path'))] )
def lowercase__ ( __lowercase : Tuple , __lowercase : Union[str, Any] , __lowercase : Tuple , __lowercase : Tuple , __lowercase : Optional[int] ) -> Optional[int]:
"""simple docstring"""
if "tmp_path" in args:
__UpperCamelCase = tuple(arg if arg != 'tmp_path' else tmp_path for arg in args )
with pytest.warns(__lowercase , match='https://huggingface.co/docs/evaluate' ):
func(*__lowercase )
| 399 | 1 |
import importlib
import shutil
import threading
import warnings
from typing import List
import fsspec
import fsspec.asyn
from . import compression
from .hffilesystem import HfFileSystem
UpperCamelCase = importlib.util.find_spec("s3fs") is not None
if _has_safs:
from .safilesystem import SaFileSystem # noqa: F401
UpperCamelCase = [
compression.BzaFileSystem,
compression.GzipFileSystem,
compression.LzaFileSystem,
compression.XzFileSystem,
compression.ZstdFileSystem,
]
# Register custom filesystems
for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]:
if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class:
warnings.warn(f'''A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.''')
fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True)
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> str:
if "://" in dataset_path:
_lowercase : Optional[Any] = dataset_path.split('://' )[1]
return dataset_path
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> bool:
if fs is not None and fs.protocol != "file":
return True
else:
return False
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Any:
_lowercase : Dict = not is_remote_filesystem(SCREAMING_SNAKE_CASE )
if is_local:
# LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory
shutil.move(fs._strip_protocol(SCREAMING_SNAKE_CASE ) , fs._strip_protocol(SCREAMING_SNAKE_CASE ) )
else:
fs.mv(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , recursive=SCREAMING_SNAKE_CASE )
def __magic_name__ ( ) -> None:
if hasattr(fsspec.asyn , 'reset_lock' ):
# for future fsspec>2022.05.0
fsspec.asyn.reset_lock()
else:
_lowercase : List[Any] = None
_lowercase : Union[str, Any] = None
_lowercase : List[Any] = threading.Lock()
| 718 |
import os
import unittest
from huggingface_hub.utils import are_progress_bars_disabled
import transformers.models.bart.tokenization_bart
from transformers import logging
from transformers.testing_utils import CaptureLogger, mockenv, mockenv_context
from transformers.utils.logging import disable_progress_bar, enable_progress_bar
class lowerCAmelCase_ ( unittest.TestCase ):
def __a ( self ):
_lowercase : List[str] = logging.get_logger()
# the current default level is logging.WARNING
_lowercase : Union[str, Any] = logging.get_verbosity()
logging.set_verbosity_error()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_warning()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_info()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_debug()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
# restore to the original level
logging.set_verbosity(_lowerCAmelCase )
def __a ( self ):
_lowercase : List[str] = logging.get_verbosity()
_lowercase : int = logging.get_logger('transformers.models.bart.tokenization_bart' )
_lowercase : Tuple = 'Testing 1, 2, 3'
# should be able to log warnings (if default settings weren't overridden by `pytest --log-level-all`)
if level_origin <= logging.WARNING:
with CaptureLogger(_lowerCAmelCase ) as cl:
logger.warning(_lowerCAmelCase )
self.assertEqual(cl.out , msg + '\n' )
# this is setting the level for all of `transformers.*` loggers
logging.set_verbosity_error()
# should not be able to log warnings
with CaptureLogger(_lowerCAmelCase ) as cl:
logger.warning(_lowerCAmelCase )
self.assertEqual(cl.out , '' )
# should be able to log warnings again
logging.set_verbosity_warning()
with CaptureLogger(_lowerCAmelCase ) as cl:
logger.warning(_lowerCAmelCase )
self.assertEqual(cl.out , msg + '\n' )
# restore to the original level
logging.set_verbosity(_lowerCAmelCase )
@mockenv(TRANSFORMERS_VERBOSITY='error' )
def __a ( self ):
# reset for the env var to take effect, next time some logger call is made
transformers.utils.logging._reset_library_root_logger()
# this action activates the env var
_lowercase : List[str] = logging.get_logger('transformers.models.bart.tokenization_bart' )
_lowercase : int = os.getenv('TRANSFORMERS_VERBOSITY' , _lowerCAmelCase )
_lowercase : Optional[Any] = logging.log_levels[env_level_str]
_lowercase : Dict = logging.get_verbosity()
self.assertEqual(
_lowerCAmelCase , _lowerCAmelCase , F"""TRANSFORMERS_VERBOSITY={env_level_str}/{env_level}, but internal verbosity is {current_level}""" , )
# restore to the original level
_lowercase : Any = ''
transformers.utils.logging._reset_library_root_logger()
@mockenv(TRANSFORMERS_VERBOSITY='super-error' )
def __a ( self ):
# reset for the env var to take effect, next time some logger call is made
transformers.utils.logging._reset_library_root_logger()
_lowercase : Tuple = logging.logging.getLogger()
with CaptureLogger(_lowerCAmelCase ) as cl:
# this action activates the env var
logging.get_logger('transformers.models.bart.tokenization_bart' )
self.assertIn('Unknown option TRANSFORMERS_VERBOSITY=super-error' , cl.out )
# no need to restore as nothing was changed
def __a ( self ):
# testing `logger.warning_advice()`
transformers.utils.logging._reset_library_root_logger()
_lowercase : str = logging.get_logger('transformers.models.bart.tokenization_bart' )
_lowercase : List[str] = 'Testing 1, 2, 3'
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS='1' ):
# nothing should be logged as env var disables this method
with CaptureLogger(_lowerCAmelCase ) as cl:
logger.warning_advice(_lowerCAmelCase )
self.assertEqual(cl.out , '' )
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS='' ):
# should log normally as TRANSFORMERS_NO_ADVISORY_WARNINGS is unset
with CaptureLogger(_lowerCAmelCase ) as cl:
logger.warning_advice(_lowerCAmelCase )
self.assertEqual(cl.out , msg + '\n' )
def __magic_name__ ( ) -> List[str]:
disable_progress_bar()
assert are_progress_bars_disabled()
enable_progress_bar()
assert not are_progress_bars_disabled()
| 677 | 0 |
"""simple docstring"""
def lowerCAmelCase_ (_SCREAMING_SNAKE_CASE :Dict , _SCREAMING_SNAKE_CASE :Dict ) -> int:
if discount_rate < 0:
raise ValueError("Discount rate cannot be negative" )
if not cash_flows:
raise ValueError("Cash flows list cannot be empty" )
a_ : Union[str, Any] = sum(
cash_flow / ((1 + discount_rate) ** i) for i, cash_flow in enumerate(__UpperCAmelCase ) )
return round(__UpperCAmelCase , ndigits=2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 473 |
'''simple docstring'''
import collections
import os
from typing import List, Optional, Tuple
from transformers.utils import is_jieba_available, requires_backends
if is_jieba_available():
import jieba
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__lowerCamelCase : str = logging.get_logger(__name__)
__lowerCamelCase : List[Any] = {"""vocab_file""": """vocab.txt"""}
__lowerCamelCase : str = {
"""vocab_file""": {
"""openbmb/cpm-ant-10b""": """https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt""",
},
}
__lowerCamelCase : Optional[Any] = {
"""openbmb/cpm-ant-10b""": 1024,
}
def __snake_case (__UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase_ : Optional[Any] = collections.OrderedDict()
with open(__UpperCAmelCase , '''r''' , encoding='''utf-8''' ) as reader:
lowerCamelCase_ : Tuple = reader.readlines()
for index, token in enumerate(__UpperCAmelCase ):
lowerCamelCase_ : str = token.rstrip('''\n''' )
lowerCamelCase_ : Any = index
return vocab
class lowerCAmelCase__ ( _lowerCAmelCase ):
def __init__( self : Tuple , UpperCamelCase_ : Dict , UpperCamelCase_ : int="<unk>" , UpperCamelCase_ : Dict=200 ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ : List[Any] = vocab
lowerCamelCase_ : List[Any] = unk_token
lowerCamelCase_ : List[str] = max_input_chars_per_word
def __UpperCamelCase ( self : List[Any] , UpperCamelCase_ : Dict ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ : List[str] = list(UpperCamelCase_ )
if len(UpperCamelCase_ ) > self.max_input_chars_per_word:
return [self.unk_token]
lowerCamelCase_ : int = 0
lowerCamelCase_ : int = []
while start < len(UpperCamelCase_ ):
lowerCamelCase_ : Optional[int] = len(UpperCamelCase_ )
lowerCamelCase_ : int = None
while start < end:
lowerCamelCase_ : Optional[int] = ''''''.join(chars[start:end] )
if substr in self.vocab:
lowerCamelCase_ : Dict = substr
break
end -= 1
if cur_substr is None:
sub_tokens.append(self.unk_token )
start += 1
else:
sub_tokens.append(UpperCamelCase_ )
lowerCamelCase_ : Union[str, Any] = end
return sub_tokens
class lowerCAmelCase__ ( _lowerCAmelCase ):
A = VOCAB_FILES_NAMES
A = PRETRAINED_VOCAB_FILES_MAP
A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A = ["input_ids", "attention_mask"]
A = False
def __init__( self : List[str] , UpperCamelCase_ : int , UpperCamelCase_ : Optional[int]="<d>" , UpperCamelCase_ : Dict="</d>" , UpperCamelCase_ : str="<s>" , UpperCamelCase_ : Tuple="</s>" , UpperCamelCase_ : Any="<pad>" , UpperCamelCase_ : Union[str, Any]="<unk>" , UpperCamelCase_ : Optional[Any]="</n>" , UpperCamelCase_ : str="</_>" , UpperCamelCase_ : str="left" , **UpperCamelCase_ : Dict , ) -> List[Any]:
"""simple docstring"""
requires_backends(self , ['''jieba'''] )
super().__init__(
bod_token=UpperCamelCase_ , eod_token=UpperCamelCase_ , bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , line_token=UpperCamelCase_ , space_token=UpperCamelCase_ , padding_side=UpperCamelCase_ , **UpperCamelCase_ , )
lowerCamelCase_ : List[str] = bod_token
lowerCamelCase_ : List[str] = eod_token
lowerCamelCase_ : Any = load_vocab(UpperCamelCase_ )
lowerCamelCase_ : Union[str, Any] = self.encoder[space_token]
lowerCamelCase_ : Union[str, Any] = self.encoder[line_token]
del self.encoder[space_token]
del self.encoder[line_token]
lowerCamelCase_ : Union[str, Any] = collections.OrderedDict(sorted(self.encoder.items() , key=lambda UpperCamelCase_ : x[1] ) )
lowerCamelCase_ : Dict = {v: k for k, v in self.encoder.items()}
lowerCamelCase_ : Tuple = WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token )
@property
def __UpperCamelCase ( self : Optional[int] ) -> Dict:
"""simple docstring"""
return self.encoder[self.bod_token]
@property
def __UpperCamelCase ( self : List[str] ) -> Any:
"""simple docstring"""
return self.encoder[self.eod_token]
@property
def __UpperCamelCase ( self : str ) -> Optional[Any]:
"""simple docstring"""
return self.encoder["\n"]
@property
def __UpperCamelCase ( self : str ) -> int:
"""simple docstring"""
return len(self.encoder )
def __UpperCamelCase ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def __UpperCamelCase ( self : str , UpperCamelCase_ : Union[str, Any] ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ : Optional[Any] = []
for x in jieba.cut(UpperCamelCase_ , cut_all=UpperCamelCase_ ):
output_tokens.extend(self.wordpiece_tokenizer.tokenize(UpperCamelCase_ ) )
return output_tokens
def __UpperCamelCase ( self : Optional[int] , UpperCamelCase_ : Optional[int] , **UpperCamelCase_ : Union[str, Any] ) -> Any:
"""simple docstring"""
lowerCamelCase_ : str = [i for i in token_ids if i >= 0]
lowerCamelCase_ : Tuple = [
x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id
]
return super()._decode(UpperCamelCase_ , **UpperCamelCase_ )
def __UpperCamelCase ( self : Optional[Any] , UpperCamelCase_ : Any ) -> int:
"""simple docstring"""
return token in self.encoder
def __UpperCamelCase ( self : Any , UpperCamelCase_ : List[str] ) -> str:
"""simple docstring"""
return "".join(UpperCamelCase_ )
def __UpperCamelCase ( self : Union[str, Any] , UpperCamelCase_ : List[str] ) -> Tuple:
"""simple docstring"""
return self.encoder.get(UpperCamelCase_ , self.encoder.get(self.unk_token ) )
def __UpperCamelCase ( self : Optional[int] , UpperCamelCase_ : Any ) -> Dict:
"""simple docstring"""
return self.decoder.get(UpperCamelCase_ , self.unk_token )
def __UpperCamelCase ( self : Optional[int] , UpperCamelCase_ : str , UpperCamelCase_ : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if os.path.isdir(UpperCamelCase_ ):
lowerCamelCase_ : Optional[int] = os.path.join(
UpperCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
else:
lowerCamelCase_ : Optional[Any] = (filename_prefix + '''-''' if filename_prefix else '''''') + save_directory
lowerCamelCase_ : Optional[Any] = 0
if " " in self.encoder:
lowerCamelCase_ : Tuple = self.encoder[''' ''']
del self.encoder[" "]
if "\n" in self.encoder:
lowerCamelCase_ : List[str] = self.encoder['''\n''']
del self.encoder["\n"]
lowerCamelCase_ : List[Any] = collections.OrderedDict(sorted(self.encoder.items() , key=lambda UpperCamelCase_ : x[1] ) )
with open(UpperCamelCase_ , '''w''' , encoding='''utf-8''' ) as writer:
for token, token_index in self.encoder.items():
if index != token_index:
logger.warning(
F"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
''' Please check that the vocabulary is not corrupted!''' )
lowerCamelCase_ : List[Any] = token_index
writer.write(token + '''\n''' )
index += 1
return (vocab_file,)
def __UpperCamelCase ( self : Any , UpperCamelCase_ : List[int] , UpperCamelCase_ : List[int] = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return [self.bos_token_id] + token_ids_a
return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a
def __UpperCamelCase ( self : Union[str, Any] , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None , UpperCamelCase_ : bool = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase_ , token_ids_a=UpperCamelCase_ , already_has_special_tokens=UpperCamelCase_ )
if token_ids_a is not None:
return [1] + ([0] * len(UpperCamelCase_ )) + [1] + ([0] * len(UpperCamelCase_ ))
return [1] + ([0] * len(UpperCamelCase_ ))
| 501 | 0 |
"""simple docstring"""
from pathlib import Path
from typing import List
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import get_tests_dir, is_tool_test
from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
UpperCAmelCase =["text", "image", "audio"]
def _A ( _a : List[str] ):
"""simple docstring"""
A = []
for input_type in input_types:
if input_type == "text":
inputs.append("""Text input""" )
elif input_type == "image":
inputs.append(
Image.open(Path(get_tests_dir("""fixtures/tests_samples/COCO""" ) ) / """000000039769.png""" ).resize((5_1_2, 5_1_2) ) )
elif input_type == "audio":
inputs.append(torch.ones(3_0_0_0 ) )
elif isinstance(_a , _a ):
inputs.append(create_inputs(_a ) )
else:
raise ValueError(f'Invalid type requested: {input_type}' )
return inputs
def _A ( _a : List ):
"""simple docstring"""
A = []
for output in outputs:
if isinstance(_a , (str, AgentText) ):
output_types.append("""text""" )
elif isinstance(_a , (Image.Image, AgentImage) ):
output_types.append("""image""" )
elif isinstance(_a , (torch.Tensor, AgentAudio) ):
output_types.append("""audio""" )
else:
raise ValueError(f'Invalid output: {output}' )
return output_types
@is_tool_test
class lowerCamelCase__ :
'''simple docstring'''
def UpperCamelCase__ ( self ) -> Optional[int]:
self.assertTrue(hasattr(self.tool ,"""inputs""" ) )
self.assertTrue(hasattr(self.tool ,"""outputs""" ) )
A = self.tool.inputs
for _input in inputs:
if isinstance(_input ,lowerCamelCase_ ):
for __input in _input:
self.assertTrue(__input in authorized_types )
else:
self.assertTrue(_input in authorized_types )
A = self.tool.outputs
for _output in outputs:
self.assertTrue(_output in authorized_types )
def UpperCamelCase__ ( self ) -> Union[str, Any]:
A = create_inputs(self.tool.inputs )
A = self.tool(*lowerCamelCase_ )
# There is a single output
if len(self.tool.outputs ) == 1:
A = [outputs]
self.assertListEqual(output_types(lowerCamelCase_ ) ,self.tool.outputs )
def UpperCamelCase__ ( self ) -> Tuple:
self.assertTrue(hasattr(self.tool ,"""description""" ) )
self.assertTrue(hasattr(self.tool ,"""default_checkpoint""" ) )
self.assertTrue(self.tool.description.startswith("""This is a tool that""" ) )
def UpperCamelCase__ ( self ) -> int:
A = create_inputs(self.tool.inputs )
A = self.tool(*lowerCamelCase_ )
if not isinstance(lowerCamelCase_ ,lowerCamelCase_ ):
A = [outputs]
self.assertEqual(len(lowerCamelCase_ ) ,len(self.tool.outputs ) )
for output, output_type in zip(lowerCamelCase_ ,self.tool.outputs ):
A = AGENT_TYPE_MAPPING[output_type]
self.assertTrue(isinstance(lowerCamelCase_ ,lowerCamelCase_ ) )
def UpperCamelCase__ ( self ) -> Tuple:
A = create_inputs(self.tool.inputs )
A = []
for _input, input_type in zip(lowerCamelCase_ ,self.tool.inputs ):
if isinstance(lowerCamelCase_ ,lowerCamelCase_ ):
_inputs.append([AGENT_TYPE_MAPPING[_input_type](_input ) for _input_type in input_type] )
else:
_inputs.append(AGENT_TYPE_MAPPING[input_type](_input ) )
# Should not raise an error
A = self.tool(*lowerCamelCase_ )
if not isinstance(lowerCamelCase_ ,lowerCamelCase_ ):
A = [outputs]
self.assertEqual(len(lowerCamelCase_ ) ,len(self.tool.outputs ) )
| 255 |
"""simple docstring"""
import numpy as np
from PIL import Image
def _A ( _a : np.ndarray , _a : int , _a : int ):
"""simple docstring"""
A = np.array(_a )
if arr.shape[0] != arr.shape[1]:
raise ValueError("""The input array is not a square matrix""" )
A = 0
A = 0
A = 0
A = 0
# compute the shape of the output matrix
A = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape maxpool_shape
A = np.zeros((maxpool_shape, maxpool_shape) )
while i < arr.shape[0]:
if i + size > arr.shape[0]:
# if the end of the matrix is reached, break
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the maximum of the pooling matrix
A = np.max(arr[i : i + size, j : j + size] )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
A = 0
A = 0
return updated_arr
def _A ( _a : np.ndarray , _a : int , _a : int ):
"""simple docstring"""
A = np.array(_a )
if arr.shape[0] != arr.shape[1]:
raise ValueError("""The input array is not a square matrix""" )
A = 0
A = 0
A = 0
A = 0
# compute the shape of the output matrix
A = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape avgpool_shape
A = np.zeros((avgpool_shape, avgpool_shape) )
while i < arr.shape[0]:
# if the end of the matrix is reached, break
if i + size > arr.shape[0]:
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the average of the pooling matrix
A = int(np.average(arr[i : i + size, j : j + size] ) )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
A = 0
A = 0
return updated_arr
# Main Function
if __name__ == "__main__":
from doctest import testmod
testmod(name="avgpooling", verbose=True)
# Loading the image
UpperCAmelCase =Image.open("path_to_image")
# Converting the image to numpy array and maxpooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(maxpooling(np.array(image), size=3, stride=2)).show()
# Converting the image to numpy array and averagepooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(avgpooling(np.array(image), size=3, stride=2)).show()
| 255 | 1 |
"""simple docstring"""
import enum
import warnings
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
__UpperCAmelCase = logging.get_logger(__name__)
class __lowercase ( enum.Enum ):
snake_case_ = 0
snake_case_ = 1
@add_end_docstrings(__lowerCamelCase )
class __lowercase ( __lowerCamelCase ):
snake_case_ = """generated"""
def __init__( self : List[str] ,*A : Tuple ,**A : str ):
'''simple docstring'''
super().__init__(*A ,**A )
self.check_model_type(
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if self.framework == """tf"""
else MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING )
def __lowercase ( self : Optional[Any] ,A : List[Any]=None ,A : Any=None ,A : Optional[int]=None ,A : Union[str, Any]=None ,A : Optional[Any]=None ,A : Optional[Any]=None ,**A : str ,):
'''simple docstring'''
UpperCAmelCase__ : Any = {}
if truncation is not None:
UpperCAmelCase__ : Any = truncation
UpperCAmelCase__ : Union[str, Any] = generate_kwargs
UpperCAmelCase__ : Dict = {}
if return_tensors is not None and return_type is None:
UpperCAmelCase__ : Optional[Any] = ReturnType.TENSORS if return_tensors else ReturnType.TEXT
if return_type is not None:
UpperCAmelCase__ : Optional[int] = return_type
if clean_up_tokenization_spaces is not None:
UpperCAmelCase__ : Optional[int] = clean_up_tokenization_spaces
if stop_sequence is not None:
UpperCAmelCase__ : List[Any] = self.tokenizer.encode(A ,add_special_tokens=A )
if len(A ) > 1:
warnings.warn(
"""Stopping on a multiple token sequence is not yet supported on transformers. The first token of"""
""" the stop sequence will be used as the stop sequence string in the interim.""" )
UpperCAmelCase__ : Union[str, Any] = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def __lowercase ( self : Dict ,A : int ,A : int ,A : int ):
'''simple docstring'''
return True
def __lowercase ( self : List[Any] ,*A : Dict ,A : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : str = self.model.config.prefix if self.model.config.prefix is not None else """"""
if isinstance(args[0] ,A ):
if self.tokenizer.pad_token_id is None:
raise ValueError("""Please make sure that the tokenizer has a pad_token_id when using a batch input""" )
UpperCAmelCase__ : Union[str, Any] = ([prefix + arg for arg in args[0]],)
UpperCAmelCase__ : int = True
elif isinstance(args[0] ,A ):
UpperCAmelCase__ : Optional[Any] = (prefix + args[0],)
UpperCAmelCase__ : List[Any] = False
else:
raise ValueError(
f" `args[0]`: {args[0]} have the wrong format. The should be either of type `str` or type `list`" )
UpperCAmelCase__ : Dict = self.tokenizer(*A ,padding=A ,truncation=A ,return_tensors=self.framework )
# This is produced by tokenizers but is an invalid generate kwargs
if "token_type_ids" in inputs:
del inputs["token_type_ids"]
return inputs
def __call__( self : Dict ,*A : str ,**A : str ):
'''simple docstring'''
UpperCAmelCase__ : str = super().__call__(*A ,**A )
if (
isinstance(args[0] ,A )
and all(isinstance(A ,A ) for el in args[0] )
and all(len(A ) == 1 for res in result )
):
return [res[0] for res in result]
return result
def __lowercase ( self : Dict ,A : Any ,A : str=TruncationStrategy.DO_NOT_TRUNCATE ,**A : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : int = self._parse_and_tokenize(A ,truncation=A ,**A )
return inputs
def __lowercase ( self : Any ,A : List[str] ,**A : Optional[Any] ):
'''simple docstring'''
if self.framework == "pt":
UpperCAmelCase__ , UpperCAmelCase__ : Tuple = model_inputs["""input_ids"""].shape
elif self.framework == "tf":
UpperCAmelCase__ , UpperCAmelCase__ : Dict = tf.shape(model_inputs["""input_ids"""] ).numpy()
UpperCAmelCase__ : str = generate_kwargs.get("""min_length""" ,self.model.config.min_length )
UpperCAmelCase__ : str = generate_kwargs.get("""max_length""" ,self.model.config.max_length )
self.check_inputs(A ,generate_kwargs["""min_length"""] ,generate_kwargs["""max_length"""] )
UpperCAmelCase__ : Dict = self.model.generate(**A ,**A )
UpperCAmelCase__ : Dict = output_ids.shape[0]
if self.framework == "pt":
UpperCAmelCase__ : List[str] = output_ids.reshape(A ,out_b // in_b ,*output_ids.shape[1:] )
elif self.framework == "tf":
UpperCAmelCase__ : int = tf.reshape(A ,(in_b, out_b // in_b, *output_ids.shape[1:]) )
return {"output_ids": output_ids}
def __lowercase ( self : List[Any] ,A : Any ,A : str=ReturnType.TEXT ,A : Any=False ):
'''simple docstring'''
UpperCAmelCase__ : Any = []
for output_ids in model_outputs["output_ids"][0]:
if return_type == ReturnType.TENSORS:
UpperCAmelCase__ : Optional[Any] = {f"{self.return_name}_token_ids": output_ids}
elif return_type == ReturnType.TEXT:
UpperCAmelCase__ : str = {
f"{self.return_name}_text": self.tokenizer.decode(
A ,skip_special_tokens=A ,clean_up_tokenization_spaces=A ,)
}
records.append(A )
return records
@add_end_docstrings(__lowerCamelCase )
class __lowercase ( __lowerCamelCase ):
snake_case_ = """summary"""
def __call__( self : Optional[Any] ,*A : Any ,**A : Optional[Any] ):
'''simple docstring'''
return super().__call__(*A ,**A )
def __lowercase ( self : Union[str, Any] ,A : int ,A : int ,A : int ):
'''simple docstring'''
if max_length < min_length:
logger.warning(f"Your min_length={min_length} must be inferior than your max_length={max_length}." )
if input_length < max_length:
logger.warning(
f"Your max_length is set to {max_length}, but your input_length is only {input_length}. Since this is "
"""a summarization task, where outputs shorter than the input are typically wanted, you might """
f"consider decreasing max_length manually, e.g. summarizer('...', max_length={input_length//2})" )
@add_end_docstrings(__lowerCamelCase )
class __lowercase ( __lowerCamelCase ):
snake_case_ = """translation"""
def __lowercase ( self : List[str] ,A : int ,A : int ,A : int ):
'''simple docstring'''
if input_length > 0.9 * max_length:
logger.warning(
f"Your input_length: {input_length} is bigger than 0.9 * max_length: {max_length}. You might consider "
"""increasing your max_length manually, e.g. translator('...', max_length=400)""" )
return True
def __lowercase ( self : int ,*A : Any ,A : Tuple=TruncationStrategy.DO_NOT_TRUNCATE ,A : str=None ,A : Optional[int]=None ):
'''simple docstring'''
if getattr(self.tokenizer ,"""_build_translation_inputs""" ,A ):
return self.tokenizer._build_translation_inputs(
*A ,return_tensors=self.framework ,truncation=A ,src_lang=A ,tgt_lang=A )
else:
return super()._parse_and_tokenize(*A ,truncation=A )
def __lowercase ( self : Union[str, Any] ,A : Dict=None ,A : Union[str, Any]=None ,**A : int ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = super()._sanitize_parameters(**A )
if src_lang is not None:
UpperCAmelCase__ : List[str] = src_lang
if tgt_lang is not None:
UpperCAmelCase__ : List[Any] = tgt_lang
if src_lang is None and tgt_lang is None:
# Backward compatibility, direct arguments use is preferred.
UpperCAmelCase__ : Optional[Any] = kwargs.get("""task""" ,self.task )
UpperCAmelCase__ : Any = task.split("""_""" )
if task and len(A ) == 4:
# translation, XX, to YY
UpperCAmelCase__ : str = items[1]
UpperCAmelCase__ : Union[str, Any] = items[3]
return preprocess_params, forward_params, postprocess_params
def __call__( self : List[str] ,*A : int ,**A : Union[str, Any] ):
'''simple docstring'''
return super().__call__(*A ,**A )
| 65 |
"""simple docstring"""
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import BatchEncoding, MarianTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import is_sentencepiece_available, is_tf_available, is_torch_available
if is_sentencepiece_available():
from transformers.models.marian.tokenization_marian import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
__UpperCAmelCase = get_tests_dir('fixtures/test_sentencepiece.model')
__UpperCAmelCase = {'target_lang': 'fi', 'source_lang': 'en'}
__UpperCAmelCase = '>>zh<<'
__UpperCAmelCase = 'Helsinki-NLP/'
if is_torch_available():
__UpperCAmelCase = 'pt'
elif is_tf_available():
__UpperCAmelCase = 'tf'
else:
__UpperCAmelCase = 'jax'
@require_sentencepiece
class __lowercase ( __lowerCamelCase , unittest.TestCase ):
snake_case_ = MarianTokenizer
snake_case_ = False
snake_case_ = True
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
super().setUp()
UpperCAmelCase__ : Optional[Any] = ["""</s>""", """<unk>""", """▁This""", """▁is""", """▁a""", """▁t""", """est""", """\u0120""", """<pad>"""]
UpperCAmelCase__ : int = dict(zip(A ,range(len(A ) ) ) )
UpperCAmelCase__ : Optional[int] = Path(self.tmpdirname )
save_json(A ,save_dir / VOCAB_FILES_NAMES["""vocab"""] )
save_json(A ,save_dir / VOCAB_FILES_NAMES["""tokenizer_config_file"""] )
if not (save_dir / VOCAB_FILES_NAMES["source_spm"]).exists():
copyfile(A ,save_dir / VOCAB_FILES_NAMES["""source_spm"""] )
copyfile(A ,save_dir / VOCAB_FILES_NAMES["""target_spm"""] )
UpperCAmelCase__ : Dict = MarianTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def __lowercase ( self : List[Any] ,**A : List[Any] ):
'''simple docstring'''
return MarianTokenizer.from_pretrained(self.tmpdirname ,**A )
def __lowercase ( self : Union[str, Any] ,A : Tuple ):
'''simple docstring'''
return (
"This is a test",
"This is a test",
)
def __lowercase ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = """</s>"""
UpperCAmelCase__ : int = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(A ) ,A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(A ) ,A )
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] ,"""</s>""" )
self.assertEqual(vocab_keys[1] ,"""<unk>""" )
self.assertEqual(vocab_keys[-1] ,"""<pad>""" )
self.assertEqual(len(A ) ,9 )
def __lowercase ( self : Dict ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size ,9 )
def __lowercase ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = MarianTokenizer.from_pretrained(f"{ORG_NAME}opus-mt-en-de" )
UpperCAmelCase__ : List[str] = en_de_tokenizer(["""I am a small frog"""] ,return_tensors=A )
self.assertIsInstance(A ,A )
UpperCAmelCase__ : str = [38, 121, 14, 697, 38_848, 0]
self.assertListEqual(A ,batch.input_ids[0] )
UpperCAmelCase__ : Optional[Any] = tempfile.mkdtemp()
en_de_tokenizer.save_pretrained(A )
UpperCAmelCase__ : Tuple = [x.name for x in Path(A ).glob("""*""" )]
self.assertIn("""source.spm""" ,A )
MarianTokenizer.from_pretrained(A )
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = self.get_tokenizer()
UpperCAmelCase__ : Any = tok(
["""I am a small frog""" * 1_000, """I am a small frog"""] ,padding=A ,truncation=A ,return_tensors=A )
self.assertIsInstance(A ,A )
self.assertEqual(batch.input_ids.shape ,(2, 512) )
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : int = self.get_tokenizer()
UpperCAmelCase__ : Tuple = tok(["""I am a tiny frog""", """I am a small frog"""] ,padding=A ,return_tensors=A )
self.assertIsInstance(A ,A )
self.assertEqual(batch_smaller.input_ids.shape ,(2, 10) )
@slow
def __lowercase ( self : Dict ):
'''simple docstring'''
# fmt: off
UpperCAmelCase__ : Optional[int] = {"""input_ids""": [[43_495, 462, 20, 42_164, 1_369, 52, 464, 132, 1_703, 492, 13, 7_491, 38_999, 6, 8, 464, 132, 1_703, 492, 13, 4_669, 37_867, 13, 7_525, 27, 1_593, 988, 13, 33_972, 7_029, 6, 20, 8_251, 383, 2, 270, 5_866, 3_788, 2, 2_353, 8_251, 12_338, 2, 13_958, 387, 2, 3_629, 6_953, 188, 2_900, 2, 13_958, 8_011, 11_501, 23, 8_460, 4_073, 34_009, 20, 435, 11_439, 27, 8, 8_460, 4_073, 6_004, 20, 9_988, 375, 27, 33, 266, 1_945, 1_076, 1_350, 37_867, 3_288, 5, 577, 1_076, 4_374, 8, 5_082, 5, 26_453, 257, 556, 403, 2, 242, 132, 383, 316, 492, 8, 10_767, 6, 316, 304, 4_239, 3, 0], [148, 15_722, 19, 1_839, 12, 1_350, 13, 22_327, 5_082, 5_418, 47_567, 35_938, 59, 318, 19_552, 108, 2_183, 54, 14_976, 4_835, 32, 547, 1_114, 8, 315, 2_417, 5, 92, 19_088, 3, 0, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100], [36, 6_395, 12_570, 39_147, 11_597, 6, 266, 4, 45_405, 7_296, 3, 0, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=A ,model_name="""Helsinki-NLP/opus-mt-en-de""" ,revision="""1a8c2263da11e68e50938f97e10cd57820bd504c""" ,decode_kwargs={"""use_source_tokenizer""": True} ,)
def __lowercase ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = MarianTokenizer.from_pretrained("""hf-internal-testing/test-marian-two-vocabs""" )
UpperCAmelCase__ : Any = """Tämä on testi"""
UpperCAmelCase__ : int = """This is a test"""
UpperCAmelCase__ : List[str] = [76, 7, 2_047, 2]
UpperCAmelCase__ : Optional[Any] = [69, 12, 11, 940, 2]
UpperCAmelCase__ : List[str] = tokenizer(A ).input_ids
self.assertListEqual(A ,A )
UpperCAmelCase__ : Optional[int] = tokenizer(text_target=A ).input_ids
self.assertListEqual(A ,A )
UpperCAmelCase__ : int = tokenizer.decode(A ,skip_special_tokens=A )
self.assertEqual(A ,A )
| 65 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
a_ : Optional[Any] = {
'''configuration_mask2former''': [
'''MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''Mask2FormerConfig''',
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Union[str, Any] = ['''Mask2FormerImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Optional[Any] = [
'''MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Mask2FormerForUniversalSegmentation''',
'''Mask2FormerModel''',
'''Mask2FormerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_maskaformer import MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskaFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_maskaformer import MaskaFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskaformer import (
MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskaFormerForUniversalSegmentation,
MaskaFormerModel,
MaskaFormerPreTrainedModel,
)
else:
import sys
a_ : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 714 |
"""simple docstring"""
import json
import os
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from requests.exceptions import HTTPError
from transformers.utils import (
CONFIG_NAME,
FLAX_WEIGHTS_NAME,
TF2_WEIGHTS_NAME,
TRANSFORMERS_CACHE,
WEIGHTS_NAME,
cached_file,
get_file_from_repo,
has_file,
)
a_ : Tuple = '''hf-internal-testing/tiny-random-bert'''
a_ : List[Any] = os.path.join(TRANSFORMERS_CACHE, '''models--hf-internal-testing--tiny-random-bert''')
a_ : Dict = '''9b8c223d42b2188cb49d29af482996f9d0f3e5a6'''
class __lowercase( unittest.TestCase ):
'''simple docstring'''
def snake_case_ ( self ):
__lowerCamelCase : List[Any] = cached_file(__a , __a )
# Should have downloaded the file in here
self.assertTrue(os.path.isdir(__a ) )
# Cache should contain at least those three subfolders:
for subfolder in ["blobs", "refs", "snapshots"]:
self.assertTrue(os.path.isdir(os.path.join(__a , __a ) ) )
with open(os.path.join(__a , 'refs' , 'main' ) ) as f:
__lowerCamelCase : Optional[int] = f.read()
self.assertEqual(__a , os.path.join(__a , 'snapshots' , __a , __a ) )
self.assertTrue(os.path.isfile(__a ) )
# File is cached at the same place the second time.
__lowerCamelCase : Optional[Any] = cached_file(__a , __a )
self.assertEqual(__a , __a )
# Using a specific revision to test the full commit hash.
__lowerCamelCase : Optional[int] = cached_file(__a , __a , revision='9b8c223' )
self.assertEqual(__a , os.path.join(__a , 'snapshots' , __a , __a ) )
def snake_case_ ( self ):
with self.assertRaisesRegex(__a , 'is not a valid model identifier' ):
__lowerCamelCase : List[str] = cached_file('tiny-random-bert' , __a )
with self.assertRaisesRegex(__a , 'is not a valid git identifier' ):
__lowerCamelCase : int = cached_file(__a , __a , revision='aaaa' )
with self.assertRaisesRegex(__a , 'does not appear to have a file named' ):
__lowerCamelCase : List[Any] = cached_file(__a , 'conf' )
def snake_case_ ( self ):
with self.assertRaisesRegex(__a , 'does not appear to have a file named' ):
__lowerCamelCase : Dict = cached_file(__a , 'conf' )
with open(os.path.join(__a , 'refs' , 'main' ) ) as f:
__lowerCamelCase : Any = f.read()
self.assertTrue(os.path.isfile(os.path.join(__a , '.no_exist' , __a , 'conf' ) ) )
__lowerCamelCase : str = cached_file(__a , 'conf' , _raise_exceptions_for_missing_entries=__a )
self.assertIsNone(__a )
__lowerCamelCase : int = cached_file(__a , 'conf' , local_files_only=__a , _raise_exceptions_for_missing_entries=__a )
self.assertIsNone(__a )
__lowerCamelCase : List[str] = mock.Mock()
__lowerCamelCase : str = 500
__lowerCamelCase : Union[str, Any] = {}
__lowerCamelCase : Dict = HTTPError
__lowerCamelCase : Any = {}
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('requests.Session.request' , return_value=__a ) as mock_head:
__lowerCamelCase : Any = cached_file(__a , 'conf' , _raise_exceptions_for_connection_errors=__a )
self.assertIsNone(__a )
# This check we did call the fake head request
mock_head.assert_called()
def snake_case_ ( self ):
self.assertTrue(has_file('hf-internal-testing/tiny-bert-pt-only' , __a ) )
self.assertFalse(has_file('hf-internal-testing/tiny-bert-pt-only' , __a ) )
self.assertFalse(has_file('hf-internal-testing/tiny-bert-pt-only' , __a ) )
def snake_case_ ( self ):
# `get_file_from_repo` returns None if the file does not exist
self.assertIsNone(get_file_from_repo('bert-base-cased' , 'ahah.txt' ) )
# The function raises if the repository does not exist.
with self.assertRaisesRegex(__a , 'is not a valid model identifier' ):
get_file_from_repo('bert-base-case' , __a )
# The function raises if the revision does not exist.
with self.assertRaisesRegex(__a , 'is not a valid git identifier' ):
get_file_from_repo('bert-base-cased' , __a , revision='ahaha' )
__lowerCamelCase : Optional[int] = get_file_from_repo('bert-base-cased' , __a )
# The name is the cached name which is not very easy to test, so instead we load the content.
__lowerCamelCase : int = json.loads(open(__a , 'r' ).read() )
self.assertEqual(config['hidden_size'] , 768 )
def snake_case_ ( self ):
with tempfile.TemporaryDirectory() as tmp_dir:
__lowerCamelCase : int = Path(__a ) / 'a.txt'
filename.touch()
self.assertEqual(get_file_from_repo(__a , 'a.txt' ) , str(__a ) )
self.assertIsNone(get_file_from_repo(__a , 'b.txt' ) )
| 263 | 0 |
from __future__ import annotations
import time
import numpy as np
UpperCAmelCase_ = [8, 5, 9, 7]
UpperCAmelCase_ = [
[2, 0, 1, 1],
[0, 1, 2, 1],
[4, 0, 0, 3],
[0, 2, 1, 0],
[1, 0, 3, 0],
]
UpperCAmelCase_ = [
[3, 2, 1, 4],
[0, 2, 5, 2],
[5, 1, 0, 5],
[1, 5, 3, 0],
[3, 0, 3, 3],
]
class lowercase__ :
'''simple docstring'''
def __init__( self, __magic_name__, __magic_name__, __magic_name__, ) -> None:
"""simple docstring"""
UpperCamelCase__ : List[str] = claim_vector
UpperCamelCase__ : str = allocated_resources_table
UpperCamelCase__ : List[Any] = maximum_claim_table
def UpperCamelCase__ ( self ) -> list[int]:
"""simple docstring"""
return [
sum(p_item[i] for p_item in self.__allocated_resources_table )
for i in range(len(self.__allocated_resources_table[0] ) )
]
def UpperCamelCase__ ( self ) -> list[int]:
"""simple docstring"""
return np.array(self.__claim_vector ) - np.array(
self.__processes_resource_summation() )
def UpperCamelCase__ ( self ) -> list[list[int]]:
"""simple docstring"""
return [
list(np.array(self.__maximum_claim_table[i] ) - np.array(__UpperCAmelCase ) )
for i, allocated_resource in enumerate(self.__allocated_resources_table )
]
def UpperCamelCase__ ( self ) -> dict[int, list[int]]:
"""simple docstring"""
return {self.__need().index(__UpperCAmelCase ): i for i in self.__need()}
def UpperCamelCase__ ( self, **__magic_name__ ) -> None:
"""simple docstring"""
UpperCamelCase__ : List[Any] = self.__need()
UpperCamelCase__ : str = self.__allocated_resources_table
UpperCamelCase__ : List[Any] = self.__available_resources()
UpperCamelCase__ : List[str] = self.__need_index_manager()
for kw, val in kwargs.items():
if kw and val is True:
self.__pretty_data()
print('''_''' * 50 + '''\n''' )
while need_list:
UpperCamelCase__ : Union[str, Any] = False
for each_need in need_list:
UpperCamelCase__ : Optional[int] = True
for index, need in enumerate(__UpperCAmelCase ):
if need > available_resources[index]:
UpperCamelCase__ : int = False
break
if execution:
UpperCamelCase__ : Union[str, Any] = True
# get the original index of the process from ind_ctrl db
for original_need_index, need_clone in need_index_manager.items():
if each_need == need_clone:
UpperCamelCase__ : Optional[Any] = original_need_index
print(f"Process {process_number + 1} is executing." )
# remove the process run from stack
need_list.remove(__UpperCAmelCase )
# update available/freed resources stack
UpperCamelCase__ : str = np.array(__UpperCAmelCase ) + np.array(
alloc_resources_table[process_number] )
print(
'''Updated available resource stack for processes: '''
+ ''' '''.join([str(__UpperCAmelCase ) for x in available_resources] ) )
break
if safe:
print('''The process is in a safe state.\n''' )
else:
print('''System in unsafe state. Aborting...\n''' )
break
def UpperCamelCase__ ( self ) -> List[Any]:
"""simple docstring"""
print(''' ''' * 9 + '''Allocated Resource Table''' )
for item in self.__allocated_resources_table:
print(
f"P{self.__allocated_resources_table.index(__UpperCAmelCase ) + 1}"
+ ''' '''.join(f"{it:>8}" for it in item )
+ '''\n''' )
print(''' ''' * 9 + '''System Resource Table''' )
for item in self.__maximum_claim_table:
print(
f"P{self.__maximum_claim_table.index(__UpperCAmelCase ) + 1}"
+ ''' '''.join(f"{it:>8}" for it in item )
+ '''\n''' )
print(
'''Current Usage by Active Processes: '''
+ ''' '''.join(str(__UpperCAmelCase ) for x in self.__claim_vector ) )
print(
'''Initial Available Resources: '''
+ ''' '''.join(str(__UpperCAmelCase ) for x in self.__available_resources() ) )
time.sleep(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 253 | import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = '▁'
UpperCamelCase__ = {'vocab_file': 'spiece.model'}
UpperCamelCase__ = {
'vocab_file': {
'google/reformer-crime-and-punishment': (
'https://huggingface.co/google/reformer-crime-and-punishment/resolve/main/spiece.model'
)
}
}
UpperCamelCase__ = {
'google/reformer-crime-and-punishment': 5_2_4_2_8_8,
}
class A ( UpperCAmelCase_ ):
__UpperCAmelCase : int = VOCAB_FILES_NAMES
__UpperCAmelCase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase : str = ['input_ids', 'attention_mask']
def __init__(self : Union[str, Any] , __UpperCAmelCase : Any , __UpperCAmelCase : Tuple="</s>" , __UpperCAmelCase : List[Any]="<unk>" , __UpperCAmelCase : Union[str, Any]=[] , __UpperCAmelCase : Optional[Dict[str, Any]] = None , **__UpperCAmelCase : Union[str, Any] , ) -> None:
"""simple docstring"""
UpperCAmelCase__ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , additional_special_tokens=__UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **__UpperCAmelCase , )
UpperCAmelCase__ = vocab_file
UpperCAmelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__UpperCAmelCase )
@property
def lowercase_ (self : Tuple ) -> Dict:
"""simple docstring"""
return self.sp_model.get_piece_size()
def lowercase_ (self : Dict ) -> Dict[str, int]:
"""simple docstring"""
UpperCAmelCase__ = {self.convert_ids_to_tokens(__UpperCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__(self : str ) -> Tuple:
"""simple docstring"""
UpperCAmelCase__ = self.__dict__.copy()
UpperCAmelCase__ = None
return state
def __setstate__(self : Dict , __UpperCAmelCase : Any ) -> Tuple:
"""simple docstring"""
UpperCAmelCase__ = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
UpperCAmelCase__ = {}
UpperCAmelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowercase_ (self : int , __UpperCAmelCase : str ) -> List[str]:
"""simple docstring"""
return self.sp_model.encode(__UpperCAmelCase , out_type=__UpperCAmelCase )
def lowercase_ (self : Optional[Any] , __UpperCAmelCase : Dict ) -> Optional[int]:
"""simple docstring"""
return self.sp_model.piece_to_id(__UpperCAmelCase )
def lowercase_ (self : Tuple , __UpperCAmelCase : List[Any] ) -> Optional[int]:
"""simple docstring"""
if index < self.sp_model.get_piece_size():
UpperCAmelCase__ = self.sp_model.IdToPiece(__UpperCAmelCase )
return token
def lowercase_ (self : Union[str, Any] , __UpperCAmelCase : List[Any] ) -> Dict:
"""simple docstring"""
UpperCAmelCase__ = []
UpperCAmelCase__ = ""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(__UpperCAmelCase ) + token
UpperCAmelCase__ = []
else:
current_sub_tokens.append(__UpperCAmelCase )
out_string += self.sp_model.decode(__UpperCAmelCase )
return out_string.strip()
def lowercase_ (self : Tuple , __UpperCAmelCase : str , __UpperCAmelCase : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(__UpperCAmelCase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
UpperCAmelCase__ = os.path.join(
__UpperCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __UpperCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__UpperCAmelCase , "wb" ) as fi:
UpperCAmelCase__ = self.sp_model.serialized_model_proto()
fi.write(__UpperCAmelCase )
return (out_vocab_file,)
| 486 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any ="bert-generation"
def __init__( self : Optional[int] , __A : List[Any]=5_0_3_5_8 , __A : str=1_0_2_4 , __A : Union[str, Any]=2_4 , __A : Optional[Any]=1_6 , __A : List[Any]=4_0_9_6 , __A : Dict="gelu" , __A : Optional[int]=0.1 , __A : Union[str, Any]=0.1 , __A : int=5_1_2 , __A : str=0.02 , __A : Dict=1e-12 , __A : Optional[Any]=0 , __A : Tuple=2 , __A : Optional[Any]=1 , __A : Union[str, Any]="absolute" , __A : Union[str, Any]=True , **__A : int , ):
super().__init__(pad_token_id=__A , bos_token_id=__A , eos_token_id=__A , **__A )
__UpperCamelCase = vocab_size
__UpperCamelCase = hidden_size
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = num_attention_heads
__UpperCamelCase = hidden_act
__UpperCamelCase = intermediate_size
__UpperCamelCase = hidden_dropout_prob
__UpperCamelCase = attention_probs_dropout_prob
__UpperCamelCase = max_position_embeddings
__UpperCamelCase = initializer_range
__UpperCamelCase = layer_norm_eps
__UpperCamelCase = position_embedding_type
__UpperCamelCase = use_cache
| 711 |
'''simple docstring'''
import pytest
import requests
from datasets.utils.file_utils import http_head
from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline
@pytest.mark.integration
def lowercase__ ( ) -> Optional[int]:
"""simple docstring"""
with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ):
with pytest.raises(__lowercase ):
requests.request('GET' , 'https://huggingface.co' )
with pytest.raises(requests.exceptions.ConnectTimeout ):
requests.request('GET' , 'https://huggingface.co' , timeout=1.0 )
@pytest.mark.integration
def lowercase__ ( ) -> Any:
"""simple docstring"""
with offline(OfflineSimulationMode.CONNECTION_FAILS ):
with pytest.raises(requests.exceptions.ConnectionError ):
requests.request('GET' , 'https://huggingface.co' )
def lowercase__ ( ) -> List[str]:
"""simple docstring"""
with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ):
with pytest.raises(__lowercase ):
http_head('https://huggingface.co' )
| 434 | 0 |
import os
import sys
import unittest
__snake_case = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
__snake_case = os.path.join(git_repo_path, '''src''', '''diffusers''')
class __lowerCamelCase (unittest.TestCase ):
def snake_case_ ( self: Union[str, Any] ):
'''simple docstring'''
__UpperCamelCase = find_backend(' if not is_torch_available():' )
self.assertEqual(A_,'torch' )
# backend_with_underscore = find_backend(" if not is_tensorflow_text_available():")
# self.assertEqual(backend_with_underscore, "tensorflow_text")
__UpperCamelCase = find_backend(' if not (is_torch_available() and is_transformers_available()):' )
self.assertEqual(A_,'torch_and_transformers' )
# double_backend_with_underscore = find_backend(
# " if not (is_sentencepiece_available() and is_tensorflow_text_available()):"
# )
# self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text")
__UpperCamelCase = find_backend(
' if not (is_torch_available() and is_transformers_available() and is_onnx_available()):' )
self.assertEqual(A_,'torch_and_transformers_and_onnx' )
def snake_case_ ( self: Optional[int] ):
'''simple docstring'''
__UpperCamelCase = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn('torch',A_ )
self.assertIn('torch_and_transformers',A_ )
self.assertIn('flax_and_transformers',A_ )
self.assertIn('torch_and_transformers_and_onnx',A_ )
# Likewise, we can't assert on the exact content of a key
self.assertIn('UNet2DModel',objects['torch'] )
self.assertIn('FlaxUNet2DConditionModel',objects['flax'] )
self.assertIn('StableDiffusionPipeline',objects['torch_and_transformers'] )
self.assertIn('FlaxStableDiffusionPipeline',objects['flax_and_transformers'] )
self.assertIn('LMSDiscreteScheduler',objects['torch_and_scipy'] )
self.assertIn('OnnxStableDiffusionPipeline',objects['torch_and_transformers_and_onnx'] )
def snake_case_ ( self: Optional[int] ):
'''simple docstring'''
__UpperCamelCase = create_dummy_object('CONSTANT','\'torch\'' )
self.assertEqual(A_,'\nCONSTANT = None\n' )
__UpperCamelCase = create_dummy_object('function','\'torch\'' )
self.assertEqual(
A_,'\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n' )
__UpperCamelCase = '\nclass FakeClass(metaclass=DummyObject):\n _backends = \'torch\'\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, \'torch\')\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, \'torch\')\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, \'torch\')\n'
__UpperCamelCase = create_dummy_object('FakeClass','\'torch\'' )
self.assertEqual(A_,A_ )
def snake_case_ ( self: List[str] ):
'''simple docstring'''
__UpperCamelCase = '# This file is autogenerated by the command `make fix-copies`, do not edit.\nfrom ..utils import DummyObject, requires_backends\n\n\nCONSTANT = None\n\n\ndef function(*args, **kwargs):\n requires_backends(function, ["torch"])\n\n\nclass FakeClass(metaclass=DummyObject):\n _backends = ["torch"]\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, ["torch"])\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, ["torch"])\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, ["torch"])\n'
__UpperCamelCase = create_dummy_files({'torch': ['CONSTANT', 'function', 'FakeClass']} )
self.assertEqual(dummy_files['torch'],A_ )
| 1 |
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class __lowerCamelCase (_a ):
@slow
@require_torch
def snake_case_ ( self: Union[str, Any] ):
'''simple docstring'''
__UpperCamelCase = EncoderDecoderModel.from_encoder_decoder_pretrained('prajjwal1/bert-tiny','prajjwal1/bert-tiny' )
__UpperCamelCase = BertTokenizer.from_pretrained('bert-base-uncased' )
__UpperCamelCase = bertabert.config.encoder.vocab_size
__UpperCamelCase = tokenizer.sep_token_id
__UpperCamelCase = tokenizer.cls_token_id
__UpperCamelCase = 128
__UpperCamelCase = datasets.load_dataset('cnn_dailymail','3.0.0',split='train[:1%]' )
__UpperCamelCase = datasets.load_dataset('cnn_dailymail','3.0.0',split='validation[:1%]' )
__UpperCamelCase = train_dataset.select(range(32 ) )
__UpperCamelCase = val_dataset.select(range(16 ) )
__UpperCamelCase = 4
def _map_to_encoder_decoder_inputs(A_: Dict ):
# Tokenizer will automatically set [BOS] <text> [EOS]
__UpperCamelCase = tokenizer(batch['article'],padding='max_length',truncation=A_,max_length=512 )
__UpperCamelCase = tokenizer(batch['highlights'],padding='max_length',truncation=A_,max_length=128 )
__UpperCamelCase = inputs.input_ids
__UpperCamelCase = inputs.attention_mask
__UpperCamelCase = outputs.input_ids
__UpperCamelCase = outputs.input_ids.copy()
__UpperCamelCase = [
[-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch['labels']
]
__UpperCamelCase = outputs.attention_mask
assert all(len(A_ ) == 512 for x in inputs.input_ids )
assert all(len(A_ ) == 128 for x in outputs.input_ids )
return batch
def _compute_metrics(A_: str ):
__UpperCamelCase = pred.label_ids
__UpperCamelCase = pred.predictions
# all unnecessary tokens are removed
__UpperCamelCase = tokenizer.batch_decode(A_,skip_special_tokens=A_ )
__UpperCamelCase = tokenizer.batch_decode(A_,skip_special_tokens=A_ )
__UpperCamelCase = sum([int(pred_str[i] == label_str[i] ) for i in range(len(A_ ) )] ) / len(A_ )
return {"accuracy": accuracy}
# map train dataset
__UpperCamelCase = train_dataset.map(
_map_to_encoder_decoder_inputs,batched=A_,batch_size=A_,remove_columns=['article', 'highlights'],)
train_dataset.set_format(
type='torch',columns=['input_ids', 'attention_mask', 'decoder_input_ids', 'decoder_attention_mask', 'labels'],)
# same for validation dataset
__UpperCamelCase = val_dataset.map(
_map_to_encoder_decoder_inputs,batched=A_,batch_size=A_,remove_columns=['article', 'highlights'],)
val_dataset.set_format(
type='torch',columns=['input_ids', 'attention_mask', 'decoder_input_ids', 'decoder_attention_mask', 'labels'],)
__UpperCamelCase = self.get_auto_remove_tmp_dir()
__UpperCamelCase = SeqaSeqTrainingArguments(
output_dir=A_,per_device_train_batch_size=A_,per_device_eval_batch_size=A_,predict_with_generate=A_,evaluation_strategy='steps',do_train=A_,do_eval=A_,warmup_steps=0,eval_steps=2,logging_steps=2,)
# instantiate trainer
__UpperCamelCase = SeqaSeqTrainer(
model=A_,args=A_,compute_metrics=_compute_metrics,train_dataset=A_,eval_dataset=A_,tokenizer=A_,)
# start training
trainer.train()
| 1 | 1 |
'''simple docstring'''
# DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax
import jax.numpy as jnp
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils_flax import (
CommonSchedulerState,
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
add_noise_common,
get_velocity_common,
)
@flax.struct.dataclass
class A :
__magic_name__ = 42
# setable values
__magic_name__ = 42
__magic_name__ = 42
__magic_name__ = None
@classmethod
def __lowerCAmelCase ( cls , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
"""simple docstring"""
return cls(common=SCREAMING_SNAKE_CASE , init_noise_sigma=SCREAMING_SNAKE_CASE , timesteps=SCREAMING_SNAKE_CASE )
@dataclass
class A ( __snake_case ):
__magic_name__ = 42
class A ( __snake_case , __snake_case ):
__magic_name__ = [e.name for e in FlaxKarrasDiffusionSchedulers]
__magic_name__ = 42
@property
def __lowerCAmelCase ( self ) -> List[Any]:
"""simple docstring"""
return True
@register_to_config
def __init__( self , SCREAMING_SNAKE_CASE = 1000 , SCREAMING_SNAKE_CASE = 0.0_001 , SCREAMING_SNAKE_CASE = 0.02 , SCREAMING_SNAKE_CASE = "linear" , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = "fixed_small" , SCREAMING_SNAKE_CASE = True , SCREAMING_SNAKE_CASE = "epsilon" , SCREAMING_SNAKE_CASE = jnp.floataa , ) -> Optional[Any]:
"""simple docstring"""
A : str = dtype
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE = None ) -> DDPMSchedulerState:
"""simple docstring"""
if common is None:
A : Optional[Any] = CommonSchedulerState.create(self )
# standard deviation of the initial noise distribution
A : int = jnp.array(1.0 , dtype=self.dtype )
A : Any = jnp.arange(0 , self.config.num_train_timesteps ).round()[::-1]
return DDPMSchedulerState.create(
common=SCREAMING_SNAKE_CASE , init_noise_sigma=SCREAMING_SNAKE_CASE , timesteps=SCREAMING_SNAKE_CASE , )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None ) -> jnp.ndarray:
"""simple docstring"""
return sample
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = () ) -> DDPMSchedulerState:
"""simple docstring"""
A : str = self.config.num_train_timesteps // num_inference_steps
# creates integer timesteps by multiplying by ratio
# rounding to avoid issues when num_inference_step is power of 3
A : Optional[int] = (jnp.arange(0 , SCREAMING_SNAKE_CASE ) * step_ratio).round()[::-1]
return state.replace(
num_inference_steps=SCREAMING_SNAKE_CASE , timesteps=SCREAMING_SNAKE_CASE , )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None ) -> Dict:
"""simple docstring"""
A : Dict = state.common.alphas_cumprod[t]
A : Optional[Any] = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
A : List[str] = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t]
if variance_type is None:
A : List[str] = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small":
A : Any = jnp.clip(SCREAMING_SNAKE_CASE , a_min=1e-20 )
# for rl-diffuser https://arxiv.org/abs/2205.09991
elif variance_type == "fixed_small_log":
A : Dict = jnp.log(jnp.clip(SCREAMING_SNAKE_CASE , a_min=1e-20 ) )
elif variance_type == "fixed_large":
A : Union[str, Any] = state.common.betas[t]
elif variance_type == "fixed_large_log":
# Glide max_log
A : Optional[int] = jnp.log(state.common.betas[t] )
elif variance_type == "learned":
return predicted_variance
elif variance_type == "learned_range":
A : Tuple = variance
A : Any = state.common.betas[t]
A : Any = (predicted_variance + 1) / 2
A : List[Any] = frac * max_log + (1 - frac) * min_log
return variance
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = True , ) -> Union[FlaxDDPMSchedulerOutput, Tuple]:
"""simple docstring"""
A : Dict = timestep
if key is None:
A : Optional[int] = jax.random.PRNGKey(0 )
if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]:
A, A : List[Any] = jnp.split(SCREAMING_SNAKE_CASE , sample.shape[1] , axis=1 )
else:
A : Tuple = None
# 1. compute alphas, betas
A : List[Any] = state.common.alphas_cumprod[t]
A : int = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
A : Optional[Any] = 1 - alpha_prod_t
A : List[str] = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
A : Optional[Any] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
A : Union[str, Any] = model_output
elif self.config.prediction_type == "v_prediction":
A : str = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
else:
raise ValueError(
F'prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` '
''' for the FlaxDDPMScheduler.''' )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
A : int = jnp.clip(SCREAMING_SNAKE_CASE , -1 , 1 )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
A : Dict = (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t
A : str = state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
A : str = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
def random_variance():
A : Union[str, Any] = jax.random.split(SCREAMING_SNAKE_CASE , num=1 )
A : Optional[int] = jax.random.normal(SCREAMING_SNAKE_CASE , shape=model_output.shape , dtype=self.dtype )
return (self._get_variance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , predicted_variance=SCREAMING_SNAKE_CASE ) ** 0.5) * noise
A : Dict = jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype ) )
A : Dict = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample, state)
return FlaxDDPMSchedulerOutput(prev_sample=SCREAMING_SNAKE_CASE , state=SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , ) -> jnp.ndarray:
"""simple docstring"""
return add_noise_common(state.common , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , ) -> jnp.ndarray:
"""simple docstring"""
return get_velocity_common(state.common , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __len__( self ) -> Tuple:
"""simple docstring"""
return self.config.num_train_timesteps
| 343 |
'''simple docstring'''
# We ignore warnings about stepping the scheduler since we step it ourselves during gradient accumulation
import warnings
from .state import AcceleratorState, GradientState
warnings.filterwarnings('ignore', category=UserWarning, module='torch.optim.lr_scheduler')
class A :
def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = True , SCREAMING_SNAKE_CASE = False ) -> int:
"""simple docstring"""
A : Any = scheduler
A : Tuple = optimizers if isinstance(SCREAMING_SNAKE_CASE , (list, tuple) ) else [optimizers]
A : Dict = split_batches
A : Tuple = step_with_optimizer
A : Any = GradientState()
def __lowerCAmelCase ( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> List[str]:
"""simple docstring"""
if not self.step_with_optimizer:
# No link between scheduler and optimizer -> just step
self.scheduler.step(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
return
# Otherwise, first make sure the optimizer was stepped.
if not self.gradient_state.sync_gradients:
if self.gradient_state.adjust_scheduler:
self.scheduler._step_count += 1
return
for opt in self.optimizers:
if opt.step_was_skipped:
return
if self.split_batches:
# Split batches -> the training dataloader batch size is not changed so one step per training step
self.scheduler.step(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
else:
# Otherwise the training dataloader batch size was multiplied by `num_processes`, so we need to do
# num_processes steps per training step
A : str = AcceleratorState().num_processes
for _ in range(SCREAMING_SNAKE_CASE ):
# Special case when using OneCycle and `drop_last` was not used
if hasattr(self.scheduler , '''total_steps''' ):
if self.scheduler._step_count <= self.scheduler.total_steps:
self.scheduler.step(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
else:
self.scheduler.step(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> Tuple:
"""simple docstring"""
return self.scheduler.get_last_lr()
def __lowerCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
return self.scheduler.state_dict()
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
"""simple docstring"""
self.scheduler.load_state_dict(SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> Any:
"""simple docstring"""
return self.scheduler.get_lr()
def __lowerCAmelCase ( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
return self.scheduler.print_lr(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
| 343 | 1 |
import logging
import os
from typing import List, TextIO, Union
from conllu import parse_incr
from utils_ner import InputExample, Split, TokenClassificationTask
SCREAMING_SNAKE_CASE :Optional[int] = logging.getLogger(__name__)
class __magic_name__ ( snake_case ):
def __init__( self , _lowercase=-1 )-> List[Any]:
UpperCamelCase_ = label_idx
def UpperCAmelCase_ ( self , _lowercase , _lowercase )-> List[InputExample]:
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
UpperCamelCase_ = mode.value
UpperCamelCase_ = os.path.join(_UpperCAmelCase , F"{mode}.txt" )
UpperCamelCase_ = 1
UpperCamelCase_ = []
with open(_UpperCAmelCase , encoding="utf-8" ) as f:
UpperCamelCase_ = []
UpperCamelCase_ = []
for line in f:
if line.startswith("-DOCSTART-" ) or line == "" or line == "\n":
if words:
examples.append(InputExample(guid=F"{mode}-{guid_index}" , words=_UpperCAmelCase , labels=_UpperCAmelCase ) )
guid_index += 1
UpperCamelCase_ = []
UpperCamelCase_ = []
else:
UpperCamelCase_ = line.split(" " )
words.append(splits[0] )
if len(_UpperCAmelCase ) > 1:
labels.append(splits[self.label_idx].replace("\n" , "" ) )
else:
# Examples could have no label for mode = "test"
labels.append("O" )
if words:
examples.append(InputExample(guid=F"{mode}-{guid_index}" , words=_UpperCAmelCase , labels=_UpperCAmelCase ) )
return examples
def UpperCAmelCase_ ( self , _lowercase , _lowercase , _lowercase )-> Dict:
UpperCamelCase_ = 0
for line in test_input_reader:
if line.startswith("-DOCSTART-" ) or line == "" or line == "\n":
writer.write(_UpperCAmelCase )
if not preds_list[example_id]:
example_id += 1
elif preds_list[example_id]:
UpperCamelCase_ = line.split()[0] + " " + preds_list[example_id].pop(0 ) + "\n"
writer.write(_UpperCAmelCase )
else:
logger.warning("Maximum sequence length exceeded: No prediction for '%s'." , line.split()[0] )
def UpperCAmelCase_ ( self , _lowercase )-> List[str]:
if path:
with open(_UpperCAmelCase , "r" ) as f:
UpperCamelCase_ = f.read().splitlines()
if "O" not in labels:
UpperCamelCase_ = ["O"] + labels
return labels
else:
return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"]
class __magic_name__ ( snake_case ):
def __init__( self )-> Optional[int]:
super().__init__(label_idx=-2 )
def UpperCAmelCase_ ( self , _lowercase )-> List[str]:
if path:
with open(_UpperCAmelCase , "r" ) as f:
UpperCamelCase_ = f.read().splitlines()
if "O" not in labels:
UpperCamelCase_ = ["O"] + labels
return labels
else:
return [
"O",
"B-ADVP",
"B-INTJ",
"B-LST",
"B-PRT",
"B-NP",
"B-SBAR",
"B-VP",
"B-ADJP",
"B-CONJP",
"B-PP",
"I-ADVP",
"I-INTJ",
"I-LST",
"I-PRT",
"I-NP",
"I-SBAR",
"I-VP",
"I-ADJP",
"I-CONJP",
"I-PP",
]
class __magic_name__ ( snake_case ):
def UpperCAmelCase_ ( self , _lowercase , _lowercase )-> List[InputExample]:
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
UpperCamelCase_ = mode.value
UpperCamelCase_ = os.path.join(_UpperCAmelCase , F"{mode}.txt" )
UpperCamelCase_ = 1
UpperCamelCase_ = []
with open(_UpperCAmelCase , encoding="utf-8" ) as f:
for sentence in parse_incr(_UpperCAmelCase ):
UpperCamelCase_ = []
UpperCamelCase_ = []
for token in sentence:
words.append(token["form"] )
labels.append(token["upos"] )
assert len(_UpperCAmelCase ) == len(_UpperCAmelCase )
if words:
examples.append(InputExample(guid=F"{mode}-{guid_index}" , words=_UpperCAmelCase , labels=_UpperCAmelCase ) )
guid_index += 1
return examples
def UpperCAmelCase_ ( self , _lowercase , _lowercase , _lowercase )-> int:
UpperCamelCase_ = 0
for sentence in parse_incr(_UpperCAmelCase ):
UpperCamelCase_ = preds_list[example_id]
UpperCamelCase_ = ""
for token in sentence:
out += F"{token['form']} ({token['upos']}|{s_p.pop(0 )}) "
out += "\n"
writer.write(_UpperCAmelCase )
example_id += 1
def UpperCAmelCase_ ( self , _lowercase )-> List[str]:
if path:
with open(_UpperCAmelCase , "r" ) as f:
return f.read().splitlines()
else:
return [
"ADJ",
"ADP",
"ADV",
"AUX",
"CCONJ",
"DET",
"INTJ",
"NOUN",
"NUM",
"PART",
"PRON",
"PROPN",
"PUNCT",
"SCONJ",
"SYM",
"VERB",
"X",
]
| 628 |
"""simple docstring"""
import math_equivalence # From: git+https://github.com/hendrycks/math.git
import datasets
lowerCamelCase = """\
@article{hendrycksmath2021,
title={Measuring Mathematical Problem Solving With the MATH Dataset},
author={Dan Hendrycks
and Collin Burns
and Saurav Kadavath
and Akul Arora
and Steven Basart
and Eric Tang
and Dawn Song
and Jacob Steinhardt},
journal={arXiv preprint arXiv:2103.03874},
year={2021}
}
"""
lowerCamelCase = """\
This metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset.
It first canonicalizes the inputs (e.g., converting \"1/2\" to \"\\frac{1}{2}\") and then computes accuracy.
"""
lowerCamelCase = r"""
Calculates accuracy after canonicalizing inputs.
Args:
predictions: list of predictions to score. Each prediction
is a string that contains natural language and LaTex.
references: list of reference for each prediction. Each
reference is a string that contains natural language
and LaTex.
Returns:
accuracy: accuracy after canonicalizing inputs
(e.g., converting \"1/2\" to \"\\frac{1}{2}\")
Examples:
>>> metric = datasets.load_metric(\"competition_math\")
>>> results = metric.compute(references=[\"\\frac{1}{2}\"], predictions=[\"1/2\"])
>>> print(results)
{'accuracy': 1.0}
"""
@datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase__ ( datasets.Metric ):
'''simple docstring'''
def lowercase__ ( self : int ) -> Tuple:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" ),
"references": datasets.Value("string" ),
} ) , homepage="https://github.com/hendrycks/math" , codebase_urls=["https://github.com/hendrycks/math"] , )
def lowercase__ ( self : Optional[int] , _UpperCAmelCase : Dict , _UpperCAmelCase : Any ) -> Any:
'''simple docstring'''
UpperCAmelCase_ = 0.0
for i, j in zip(_UpperCAmelCase , _UpperCAmelCase ):
n_correct += 1.0 if math_equivalence.is_equiv(_UpperCAmelCase , _UpperCAmelCase ) else 0.0
UpperCAmelCase_ = n_correct / len(_UpperCAmelCase )
return {
"accuracy": accuracy,
}
| 82 | 0 |
def a__ (__lowercase :List[Any] , __lowercase :Union[str, Any] ) -> Dict:
_A : List[str] = 0
_A : Tuple = len(__lowercase ) - 1
while left <= right:
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
_A : str = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(__lowercase ):
return None
_A : Tuple = sorted_collection[point]
if current_item == item:
return point
else:
if point < left:
_A : List[str] = left
_A : str = point
elif point > right:
_A : Tuple = right
_A : int = point
else:
if item < current_item:
_A : Union[str, Any] = point - 1
else:
_A : List[str] = point + 1
return None
def a__ (__lowercase :List[Any] , __lowercase :Tuple , __lowercase :str , __lowercase :Any ) -> Dict:
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
_A : Tuple = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(__lowercase ):
return None
if sorted_collection[point] == item:
return point
elif point < left:
return interpolation_search_by_recursion(__lowercase , __lowercase , __lowercase , __lowercase )
elif point > right:
return interpolation_search_by_recursion(__lowercase , __lowercase , __lowercase , __lowercase )
else:
if sorted_collection[point] > item:
return interpolation_search_by_recursion(
__lowercase , __lowercase , __lowercase , point - 1 )
else:
return interpolation_search_by_recursion(
__lowercase , __lowercase , point + 1 , __lowercase )
def a__ (__lowercase :List[str] ) -> List[str]:
if collection != sorted(__lowercase ):
raise ValueError('''Collection must be ascending sorted''' )
return True
if __name__ == "__main__":
import sys
_UpperCamelCase : Optional[Any] =0
if debug == 1:
_UpperCamelCase : Dict =[10, 30, 40, 45, 50, 66, 77, 93]
try:
__assert_sorted(collection)
except ValueError:
sys.exit('Sequence must be ascending sorted to apply interpolation search')
_UpperCamelCase : Optional[int] =67
_UpperCamelCase : Union[str, Any] =interpolation_search(collection, target)
if result is not None:
print(f'''{target} found at positions: {result}''')
else:
print('Not found')
| 332 |
import importlib
import os
import fsspec
import pytest
from fsspec import register_implementation
from fsspec.registry import _registry as _fsspec_registry
from datasets.filesystems import COMPRESSION_FILESYSTEMS, HfFileSystem, extract_path_from_uri, is_remote_filesystem
from .utils import require_lza, require_zstandard
def a__ (__lowercase :Optional[int] ) -> Any:
assert "mock" in _fsspec_registry
assert "bz2" in _fsspec_registry
def a__ () -> Tuple:
assert "mock" not in _fsspec_registry
assert "bz2" in _fsspec_registry
def a__ () -> List[Any]:
_A : Dict = '''mock-s3-bucket'''
_A : List[Any] = f"""s3://{mock_bucket}"""
_A : Tuple = extract_path_from_uri(__lowercase )
assert dataset_path.startswith('''s3://''' ) is False
_A : Tuple = '''./local/path'''
_A : int = extract_path_from_uri(__lowercase )
assert dataset_path == new_dataset_path
def a__ (__lowercase :Tuple ) -> Optional[int]:
_A : Optional[int] = is_remote_filesystem(__lowercase )
assert is_remote is True
_A : Optional[Any] = fsspec.filesystem('''file''' )
_A : List[Any] = is_remote_filesystem(__lowercase )
assert is_remote is False
@pytest.mark.parametrize('''compression_fs_class''' , __lowercase )
def a__ (__lowercase :Tuple , __lowercase :Dict , __lowercase :List[str] , __lowercase :List[Any] , __lowercase :List[str] , __lowercase :List[Any] , __lowercase :Any ) -> Any:
_A : str = {'''gzip''': gz_file, '''xz''': xz_file, '''zstd''': zstd_file, '''bz2''': bza_file, '''lz4''': lza_file}
_A : Tuple = input_paths[compression_fs_class.protocol]
if input_path is None:
_A : Tuple = f"""for '{compression_fs_class.protocol}' compression protocol, """
if compression_fs_class.protocol == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_fs_class.protocol == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(__lowercase )
_A : Any = fsspec.filesystem(compression_fs_class.protocol , fo=__lowercase )
assert isinstance(__lowercase , __lowercase )
_A : Tuple = os.path.basename(__lowercase )
_A : List[Any] = expected_filename[: expected_filename.rindex('''.''' )]
assert fs.glob('''*''' ) == [expected_filename]
with fs.open(__lowercase , '''r''' , encoding='''utf-8''' ) as f, open(__lowercase , encoding='''utf-8''' ) as expected_file:
assert f.read() == expected_file.read()
@pytest.mark.parametrize('''protocol''' , ['''zip''', '''gzip'''] )
def a__ (__lowercase :Union[str, Any] , __lowercase :Optional[int] , __lowercase :Optional[int] ) -> Optional[int]:
_A : Any = {'''zip''': zip_jsonl_path, '''gzip''': jsonl_gz_path}
_A : Tuple = compressed_file_paths[protocol]
_A : Tuple = '''dataset.jsonl'''
_A : List[Any] = f"""{protocol}://{member_file_path}::{compressed_file_path}"""
_A , *_A : Optional[int] = fsspec.get_fs_token_paths(__lowercase )
assert fs.isfile(__lowercase )
assert not fs.isfile('''non_existing_''' + member_file_path )
@pytest.mark.integration
def a__ (__lowercase :List[Any] , __lowercase :int , __lowercase :int , __lowercase :str ) -> Optional[Any]:
_A : int = hf_api.dataset_info(__lowercase , token=__lowercase )
_A : int = HfFileSystem(repo_info=__lowercase , token=__lowercase )
assert sorted(hffs.glob('''*''' ) ) == [".gitattributes", "data"]
assert hffs.isdir('''data''' )
assert hffs.isfile('''.gitattributes''' ) and hffs.isfile('''data/text_data.txt''' )
with open(__lowercase ) as f:
assert hffs.open('''data/text_data.txt''' , '''r''' ).read() == f.read()
def a__ () -> Optional[Any]:
_A : Any = '''bz2'''
# Import module
import datasets.filesystems
# Overwrite protocol and reload
register_implementation(__lowercase , __lowercase , clobber=__lowercase )
with pytest.warns(__lowercase ) as warning_info:
importlib.reload(datasets.filesystems )
assert len(__lowercase ) == 1
assert (
str(warning_info[0].message )
== f"""A filesystem protocol was already set for {protocol} and will be overwritten."""
)
| 332 | 1 |
'''simple docstring'''
import json
import os
from typing import Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__UpperCAmelCase =logging.get_logger(__name__)
__UpperCAmelCase ={
"vocab_file": "vocab.json",
"merges_file": "merges.txt",
}
__UpperCAmelCase ={
"vocab_file": {"ctrl": "https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-vocab.json"},
"merges_file": {"ctrl": "https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-merges.txt"},
}
__UpperCAmelCase ={
"ctrl": 2_5_6,
}
__UpperCAmelCase ={
"Pregnancy": 1_6_8_6_2_9,
"Christianity": 7_6_7_5,
"Explain": 1_0_6_4_2_3,
"Fitness": 6_3_4_4_0,
"Saving": 6_3_1_6_3,
"Ask": 2_7_1_7_1,
"Ass": 9_5_9_8_5,
"Joke": 1_6_3_5_0_9,
"Questions": 4_5_6_2_2,
"Thoughts": 4_9_6_0_5,
"Retail": 5_2_3_4_2,
"Feminism": 1_6_4_3_3_8,
"Writing": 1_1_9_9_2,
"Atheism": 1_9_2_2_6_3,
"Netflix": 4_8_6_1_6,
"Computing": 3_9_6_3_9,
"Opinion": 4_3_2_1_3,
"Alone": 4_4_9_6_7,
"Funny": 5_8_9_1_7,
"Gaming": 4_0_3_5_8,
"Human": 4_0_8_8,
"India": 1_3_3_1,
"Joker": 7_7_1_3_8,
"Diet": 3_6_2_0_6,
"Legal": 1_1_8_5_9,
"Norman": 4_9_3_9,
"Tip": 7_2_6_8_9,
"Weight": 5_2_3_4_3,
"Movies": 4_6_2_7_3,
"Running": 2_3_4_2_5,
"Science": 2_0_9_0,
"Horror": 3_7_7_9_3,
"Confession": 6_0_5_7_2,
"Finance": 1_2_2_5_0,
"Politics": 1_6_3_6_0,
"Scary": 1_9_1_9_8_5,
"Support": 1_2_6_5_4,
"Technologies": 3_2_5_1_6,
"Teenage": 6_6_1_6_0,
"Event": 3_2_7_6_9,
"Learned": 6_7_4_6_0,
"Notion": 1_8_2_7_7_0,
"Wikipedia": 3_7_5_8_3,
"Books": 6_6_6_5,
"Extract": 7_6_0_5_0,
"Confessions": 1_0_2_7_0_1,
"Conspiracy": 7_5_9_3_2,
"Links": 6_3_6_7_4,
"Narcissus": 1_5_0_4_2_5,
"Relationship": 5_4_7_6_6,
"Relationships": 1_3_4_7_9_6,
"Reviews": 4_1_6_7_1,
"News": 4_2_5_6,
"Translation": 2_6_8_2_0,
"multilingual": 1_2_8_4_0_6,
}
def __lowerCAmelCase ( UpperCamelCase__ ) -> Union[str, Any]:
__lowerCamelCase = set()
__lowerCamelCase = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__lowerCamelCase = char
__lowerCamelCase = set(UpperCamelCase__ )
return pairs
class a__ ( UpperCAmelCase__ ):
lowerCamelCase : str =VOCAB_FILES_NAMES
lowerCamelCase : Optional[int] =PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : Tuple =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : Union[str, Any] =CONTROL_CODES
def __init__( self : Optional[Any] , a : str , a : List[str] , a : Optional[int]="<unk>" , **a : Any ):
"""simple docstring"""
super().__init__(unk_token=a , **a )
with open(a , encoding='''utf-8''' ) as vocab_handle:
__lowerCamelCase = json.load(a )
__lowerCamelCase = {v: k for k, v in self.encoder.items()}
with open(a , encoding='''utf-8''' ) as merges_handle:
__lowerCamelCase = merges_handle.read().split('''\n''' )[1:-1]
__lowerCamelCase = [tuple(merge.split() ) for merge in merges]
__lowerCamelCase = dict(zip(a , range(len(a ) ) ) )
__lowerCamelCase = {}
@property
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
"""simple docstring"""
return len(self.encoder )
def SCREAMING_SNAKE_CASE__ ( self : int ):
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , a : Dict ):
"""simple docstring"""
if token in self.cache:
return self.cache[token]
__lowerCamelCase = tuple(a )
__lowerCamelCase = tuple(list(word[:-1] ) + [word[-1] + '''</w>'''] )
__lowerCamelCase = get_pairs(a )
if not pairs:
return token
while True:
__lowerCamelCase = min(a , key=lambda a : self.bpe_ranks.get(a , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
__lowerCamelCase , __lowerCamelCase = bigram
__lowerCamelCase = []
__lowerCamelCase = 0
while i < len(a ):
try:
__lowerCamelCase = word.index(a , a )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
__lowerCamelCase = j
if word[i] == first and i < len(a ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__lowerCamelCase = tuple(a )
__lowerCamelCase = new_word
if len(a ) == 1:
break
else:
__lowerCamelCase = get_pairs(a )
__lowerCamelCase = '''@@ '''.join(a )
__lowerCamelCase = word[:-4]
__lowerCamelCase = word
return word
def SCREAMING_SNAKE_CASE__ ( self : str , a : List[str] ):
"""simple docstring"""
__lowerCamelCase = []
__lowerCamelCase = re.findall(R'''\S+\n?''' , a )
for token in words:
split_tokens.extend(list(self.bpe(a ).split(''' ''' ) ) )
return split_tokens
def SCREAMING_SNAKE_CASE__ ( self : Any , a : Dict ):
"""simple docstring"""
return self.encoder.get(a , self.encoder.get(self.unk_token ) )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , a : Optional[Any] ):
"""simple docstring"""
return self.decoder.get(a , self.unk_token )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , a : Dict ):
"""simple docstring"""
__lowerCamelCase = ''' '''.join(a ).replace('''@@ ''' , '''''' ).strip()
return out_string
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , a : str , a : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(a ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
__lowerCamelCase = os.path.join(
a , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
__lowerCamelCase = os.path.join(
a , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(a , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=a , ensure_ascii=a ) + '''\n''' )
__lowerCamelCase = 0
with open(a , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda a : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
''' Please check that the tokenizer is not corrupted!''' )
__lowerCamelCase = token_index
writer.write(''' '''.join(a ) + '''\n''' )
index += 1
return vocab_file, merge_file
# def decode(self, token_ids, skip_special_tokens=False, clean_up_tokenization_spaces=True):
# filtered_tokens = ' '.join(self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens))
# tokens_generated_so_far = re.sub('(@@ )', '', string=filtered_tokens)
# tokens_generated_so_far = re.sub('(@@ ?$)', '', string=tokens_generated_so_far)
# return ''.join(tokens_generated_so_far)
| 546 | '''simple docstring'''
import re
from flax.core.frozen_dict import freeze
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.experimental import PartitionSpec as P
# Sentinels
__UpperCAmelCase =object()
# For specifying empty leaf dict `{}`
__UpperCAmelCase =object()
def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ) -> List[Any]:
__lowerCamelCase = tuple((re.compile(x + '''$''' ) for x in qs) )
for i in range(len(UpperCamelCase__ ) - len(UpperCamelCase__ ) + 1 ):
__lowerCamelCase = [x.match(UpperCamelCase__ ) for x, y in zip(UpperCamelCase__ , ks[i:] )]
if matches and all(UpperCamelCase__ ):
return True
return False
def __lowerCAmelCase ( UpperCamelCase__ ) -> Optional[int]:
def replace(UpperCamelCase__ , UpperCamelCase__ ):
for rule, replacement in rules:
if _match(UpperCamelCase__ , UpperCamelCase__ ):
return replacement
return val
return replace
def __lowerCAmelCase ( ) -> Dict:
return [
# embeddings
(("transformer", "wpe", "embedding"), P('''mp''' , UpperCamelCase__ )),
(("transformer", "wte", "embedding"), P('''mp''' , UpperCamelCase__ )),
# atention
(("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(UpperCamelCase__ , '''mp''' )),
(("attention", "out_proj", "kernel"), P('''mp''' , UpperCamelCase__ )),
(("attention", "out_proj", "bias"), None),
# mlp
(("mlp", "c_fc", "kernel"), P(UpperCamelCase__ , '''mp''' )),
(("mlp", "c_fc", "bias"), P('''mp''' )),
(("mlp", "c_proj", "kernel"), P('''mp''' , UpperCamelCase__ )),
(("mlp", "c_proj", "bias"), None),
# layer norms
((r"ln_\d+", "bias"), None),
((r"\d+", r"ln_\d+", "scale"), None),
(("ln_f", "bias"), None),
(("ln_f", "scale"), None),
]
def __lowerCAmelCase ( UpperCamelCase__ ) -> int:
__lowerCamelCase = _get_partition_rules()
__lowerCamelCase = _replacement_rules(UpperCamelCase__ )
__lowerCamelCase = {k: _unmatched for k in flatten_dict(UpperCamelCase__ )}
__lowerCamelCase = {k: replace(UpperCamelCase__ , UpperCamelCase__ ) for k, v in initd.items()}
assert _unmatched not in result.values(), "Incomplete partition spec."
return freeze(unflatten_dict(UpperCamelCase__ ) )
| 546 | 1 |
"""simple docstring"""
from heapq import heappop, heappush
import numpy as np
def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase = grid.shape
UpperCamelCase = [-1, 1, 0, 0]
UpperCamelCase = [0, 0, -1, 1]
if allow_diagonal:
dx += [-1, -1, 1, 1]
dy += [-1, 1, -1, 1]
UpperCamelCase , UpperCamelCase = [(0, source)], set()
UpperCamelCase = np.full((rows, cols) , np.inf )
UpperCamelCase = 0
UpperCamelCase = np.empty((rows, cols) , dtype=_SCREAMING_SNAKE_CASE )
UpperCamelCase = None
while queue:
((UpperCamelCase) , (UpperCamelCase)) = heappop(_SCREAMING_SNAKE_CASE )
if (x, y) in visited:
continue
visited.add((x, y) )
if (x, y) == destination:
UpperCamelCase = []
while (x, y) != source:
path.append((x, y) )
UpperCamelCase , UpperCamelCase = predecessors[x, y]
path.append(_SCREAMING_SNAKE_CASE ) # add the source manually
path.reverse()
return matrix[destination], path
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
UpperCamelCase , UpperCamelCase = x + dx[i], y + dy[i]
if 0 <= nx < rows and 0 <= ny < cols:
UpperCamelCase = grid[nx][ny]
if next_node == 1 and matrix[nx, ny] > dist + 1:
heappush(_SCREAMING_SNAKE_CASE , (dist + 1, (nx, ny)) )
UpperCamelCase = dist + 1
UpperCamelCase = (x, y)
return np.inf, []
if __name__ == "__main__":
import doctest
doctest.testmod()
| 544 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''microsoft/swinv2-tiny-patch4-window8-256''': (
'''https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json'''
),
}
class _lowerCamelCase ( _lowercase ):
UpperCAmelCase_ = "swinv2"
UpperCAmelCase_ = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__(self , __a=2_24 , __a=4 , __a=3 , __a=96 , __a=[2, 2, 6, 2] , __a=[3, 6, 12, 24] , __a=7 , __a=4.0 , __a=True , __a=0.0 , __a=0.0 , __a=0.1 , __a="gelu" , __a=False , __a=0.02 , __a=1e-5 , __a=32 , **__a , ) -> Optional[Any]:
super().__init__(**__a )
UpperCamelCase = image_size
UpperCamelCase = patch_size
UpperCamelCase = num_channels
UpperCamelCase = embed_dim
UpperCamelCase = depths
UpperCamelCase = len(__a )
UpperCamelCase = num_heads
UpperCamelCase = window_size
UpperCamelCase = mlp_ratio
UpperCamelCase = qkv_bias
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = drop_path_rate
UpperCamelCase = hidden_act
UpperCamelCase = use_absolute_embeddings
UpperCamelCase = layer_norm_eps
UpperCamelCase = initializer_range
UpperCamelCase = encoder_stride
# we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
UpperCamelCase = int(embed_dim * 2 ** (len(__a ) - 1) )
UpperCamelCase = (0, 0, 0, 0)
| 544 | 1 |
import math_equivalence # From: git+https://github.com/hendrycks/math.git
import datasets
lowercase__ : Optional[int] = '\\n@article{hendrycksmath2021,\n title={Measuring Mathematical Problem Solving With the MATH Dataset},\n author={Dan Hendrycks\n and Collin Burns\n and Saurav Kadavath\n and Akul Arora\n and Steven Basart\n and Eric Tang\n and Dawn Song\n and Jacob Steinhardt},\n journal={arXiv preprint arXiv:2103.03874},\n year={2021}\n}\n'
lowercase__ : int = '\\nThis metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset.\nIt first canonicalizes the inputs (e.g., converting "1/2" to "\\frac{1}{2}") and then computes accuracy.\n'
lowercase__ : Tuple = r'\nCalculates accuracy after canonicalizing inputs.\n\nArgs:\n predictions: list of predictions to score. Each prediction\n is a string that contains natural language and LaTex.\n references: list of reference for each prediction. Each\n reference is a string that contains natural language\n and LaTex.\nReturns:\n accuracy: accuracy after canonicalizing inputs\n (e.g., converting "1/2" to "\\frac{1}{2}")\n\nExamples:\n >>> metric = datasets.load_metric("competition_math")\n >>> results = metric.compute(references=["\\frac{1}{2}"], predictions=["1/2"])\n >>> print(results)\n {\'accuracy\': 1.0}\n'
@datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase ( datasets.Metric ):
'''simple docstring'''
def snake_case__ ( self : Optional[Any] ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" ),
"references": datasets.Value("string" ),
} ) , homepage="https://github.com/hendrycks/math" , codebase_urls=["https://github.com/hendrycks/math"] , )
def snake_case__ ( self : Optional[int] , __lowercase : str , __lowercase : Union[str, Any] ):
"""simple docstring"""
snake_case_ = 0.0
for i, j in zip(_lowerCAmelCase , _lowerCAmelCase ):
n_correct += 1.0 if math_equivalence.is_equiv(_lowerCAmelCase , _lowerCAmelCase ) else 0.0
snake_case_ = n_correct / len(_lowerCAmelCase )
return {
"accuracy": accuracy,
}
| 376 |
'''simple docstring'''
# coding=utf-8
# Copyright 2020 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import sys
import transformers
UpperCamelCase : int = '3'
print('Python version:', sys.version)
print('transformers version:', transformers.__version__)
try:
import torch
print('Torch version:', torch.__version__)
print('Cuda available:', torch.cuda.is_available())
print('Cuda version:', torch.version.cuda)
print('CuDNN version:', torch.backends.cudnn.version())
print('Number of GPUs available:', torch.cuda.device_count())
print('NCCL version:', torch.cuda.nccl.version())
except ImportError:
print('Torch version:', None)
try:
import deepspeed
print('DeepSpeed version:', deepspeed.__version__)
except ImportError:
print('DeepSpeed version:', None)
try:
import tensorflow as tf
print('TensorFlow version:', tf.__version__)
print('TF GPUs available:', bool(tf.config.list_physical_devices('GPU')))
print('Number of TF GPUs available:', len(tf.config.list_physical_devices('GPU')))
except ImportError:
print('TensorFlow version:', None)
| 50 | 0 |
from . import (
albert,
align,
altclip,
audio_spectrogram_transformer,
auto,
autoformer,
bark,
bart,
barthez,
bartpho,
beit,
bert,
bert_generation,
bert_japanese,
bertweet,
big_bird,
bigbird_pegasus,
biogpt,
bit,
blenderbot,
blenderbot_small,
blip,
blip_a,
bloom,
bridgetower,
byta,
camembert,
canine,
chinese_clip,
clap,
clip,
clipseg,
codegen,
conditional_detr,
convbert,
convnext,
convnextva,
cpm,
cpmant,
ctrl,
cvt,
dataavec,
deberta,
deberta_va,
decision_transformer,
deformable_detr,
deit,
deprecated,
deta,
detr,
dialogpt,
dinat,
distilbert,
dit,
donut,
dpr,
dpt,
efficientformer,
efficientnet,
electra,
encodec,
encoder_decoder,
ernie,
ernie_m,
esm,
falcon,
flaubert,
flava,
fnet,
focalnet,
fsmt,
funnel,
git,
glpn,
gpta,
gpt_bigcode,
gpt_neo,
gpt_neox,
gpt_neox_japanese,
gpt_swa,
gptj,
gptsan_japanese,
graphormer,
groupvit,
herbert,
hubert,
ibert,
imagegpt,
informer,
instructblip,
jukebox,
layoutlm,
layoutlmva,
layoutlmva,
layoutxlm,
led,
levit,
lilt,
llama,
longformer,
longta,
luke,
lxmert,
mam_aaa,
marian,
markuplm,
maskaformer,
maskformer,
mbart,
mbartaa,
mega,
megatron_bert,
megatron_gpta,
mgp_str,
mluke,
mobilebert,
mobilenet_va,
mobilenet_va,
mobilevit,
mobilevitva,
mpnet,
mra,
mta,
musicgen,
mvp,
nat,
nezha,
nllb,
nllb_moe,
nystromformer,
oneformer,
open_llama,
openai,
opt,
owlvit,
pegasus,
pegasus_x,
perceiver,
phobert,
pixastruct,
plbart,
poolformer,
prophetnet,
qdqbert,
rag,
realm,
reformer,
regnet,
rembert,
resnet,
roberta,
roberta_prelayernorm,
roc_bert,
roformer,
rwkv,
sam,
segformer,
sew,
sew_d,
speech_encoder_decoder,
speech_to_text,
speech_to_text_a,
speechta,
splinter,
squeezebert,
swiftformer,
swin,
swinasr,
swinva,
switch_transformers,
ta,
table_transformer,
tapas,
time_series_transformer,
timesformer,
timm_backbone,
transfo_xl,
trocr,
tvlt,
umta,
unispeech,
unispeech_sat,
upernet,
videomae,
vilt,
vision_encoder_decoder,
vision_text_dual_encoder,
visual_bert,
vit,
vit_hybrid,
vit_mae,
vit_msn,
vivit,
wavaveca,
wavaveca_conformer,
wavaveca_phoneme,
wavaveca_with_lm,
wavlm,
whisper,
x_clip,
xglm,
xlm,
xlm_prophetnet,
xlm_roberta,
xlm_roberta_xl,
xlnet,
xmod,
yolos,
yoso,
)
| 689 |
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
__a = subprocess.check_output("""git merge-base main HEAD""".split()).decode("""utf-8""")
__a = (
subprocess.check_output(F'''git diff --diff-filter=d --name-only {fork_point_sha}'''.split()).decode("""utf-8""").split()
)
__a = """|""".join(sys.argv[1:])
__a = re.compile(rF'''^({joined_dirs}).*?\.py$''')
__a = [x for x in modified_files if regex.match(x)]
print(""" """.join(relevant_modified_files), end="""""")
| 689 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.