code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
"""simple docstring"""
def lowercase ( _SCREAMING_SNAKE_CASE : float ):
'''simple docstring'''
return 10 - x * x
def lowercase ( _SCREAMING_SNAKE_CASE : float , _SCREAMING_SNAKE_CASE : float ):
'''simple docstring'''
if equation(snake_case__ ) * equation(snake_case__ ) >= 0:
raise ValueError('''Wrong space!''' )
_UpperCAmelCase = a
while (b - a) >= 0.01:
# Find middle point
_UpperCAmelCase = (a + b) / 2
# Check if middle point is root
if equation(snake_case__ ) == 0.0:
break
# Decide the side to repeat the steps
if equation(snake_case__ ) * equation(snake_case__ ) < 0:
_UpperCAmelCase = c
else:
_UpperCAmelCase = c
return c
if __name__ == "__main__":
import doctest
doctest.testmod()
print(bisection(-2, 5))
print(bisection(0, 6))
| 602 |
import os
import string
import sys
SCREAMING_SNAKE_CASE__ : List[str] = 1 << 8
SCREAMING_SNAKE_CASE__ : str = {
'tab': ord('\t'),
'newline': ord('\r'),
'esc': 27,
'up': 65 + ARROW_KEY_FLAG,
'down': 66 + ARROW_KEY_FLAG,
'right': 67 + ARROW_KEY_FLAG,
'left': 68 + ARROW_KEY_FLAG,
'mod_int': 91,
'undefined': sys.maxsize,
'interrupt': 3,
'insert': 50,
'delete': 51,
'pg_up': 53,
'pg_down': 54,
}
SCREAMING_SNAKE_CASE__ : List[str] = KEYMAP['up']
SCREAMING_SNAKE_CASE__ : Optional[int] = KEYMAP['left']
if sys.platform == "win32":
SCREAMING_SNAKE_CASE__ : Union[str, Any] = []
SCREAMING_SNAKE_CASE__ : Dict = {
b'\xe0H': KEYMAP['up'] - ARROW_KEY_FLAG,
b'\x00H': KEYMAP['up'] - ARROW_KEY_FLAG,
b'\xe0P': KEYMAP['down'] - ARROW_KEY_FLAG,
b'\x00P': KEYMAP['down'] - ARROW_KEY_FLAG,
b'\xe0M': KEYMAP['right'] - ARROW_KEY_FLAG,
b'\x00M': KEYMAP['right'] - ARROW_KEY_FLAG,
b'\xe0K': KEYMAP['left'] - ARROW_KEY_FLAG,
b'\x00K': KEYMAP['left'] - ARROW_KEY_FLAG,
}
for i in range(10):
SCREAMING_SNAKE_CASE__ : List[str] = ord(str(i))
def a__ ( ):
if os.name == "nt":
import msvcrt
_UpperCAmelCase : int = """mbcs"""
# Flush the keyboard buffer
while msvcrt.kbhit():
msvcrt.getch()
if len(snake_case__ ) == 0:
# Read the keystroke
_UpperCAmelCase : Any = msvcrt.getch()
# If it is a prefix char, get second part
if ch in (b"\x00", b"\xe0"):
_UpperCAmelCase : str = ch + msvcrt.getch()
# Translate actual Win chars to bullet char types
try:
_UpperCAmelCase : Optional[int] = chr(WIN_KEYMAP[cha] )
WIN_CH_BUFFER.append(chr(KEYMAP["""mod_int"""] ) )
WIN_CH_BUFFER.append(snake_case__ )
if ord(snake_case__ ) in (
KEYMAP["insert"] - 1 << 9,
KEYMAP["delete"] - 1 << 9,
KEYMAP["pg_up"] - 1 << 9,
KEYMAP["pg_down"] - 1 << 9,
):
WIN_CH_BUFFER.append(chr(126 ) )
_UpperCAmelCase : Any = chr(KEYMAP["""esc"""] )
except KeyError:
_UpperCAmelCase : Optional[int] = cha[1]
else:
_UpperCAmelCase : Union[str, Any] = ch.decode(snake_case__ )
else:
_UpperCAmelCase : Optional[int] = WIN_CH_BUFFER.pop(0 )
elif os.name == "posix":
import termios
import tty
_UpperCAmelCase : List[str] = sys.stdin.fileno()
_UpperCAmelCase : Optional[Any] = termios.tcgetattr(snake_case__ )
try:
tty.setraw(snake_case__ )
_UpperCAmelCase : Tuple = sys.stdin.read(1 )
finally:
termios.tcsetattr(snake_case__ , termios.TCSADRAIN , snake_case__ )
return ch
def a__ ( ):
_UpperCAmelCase : int = get_raw_chars()
if ord(snake_case__ ) in [KEYMAP["interrupt"], KEYMAP["newline"]]:
return char
elif ord(snake_case__ ) == KEYMAP["esc"]:
_UpperCAmelCase : int = get_raw_chars()
if ord(snake_case__ ) == KEYMAP["mod_int"]:
_UpperCAmelCase : Optional[Any] = get_raw_chars()
if ord(snake_case__ ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(snake_case__ ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG:
return chr(ord(snake_case__ ) + ARROW_KEY_FLAG )
else:
return KEYMAP["undefined"]
else:
return get_raw_chars()
else:
if char in string.printable:
return char
else:
return KEYMAP["undefined"]
| 643 | 0 |
"""simple docstring"""
from __future__ import annotations
def UpperCAmelCase ( snake_case : float , snake_case : float , snake_case : float ):
if (voltage, current, resistance).count(0 ) != 1:
raise ValueError('''One and only one argument must be 0''' )
if resistance < 0:
raise ValueError('''Resistance cannot be negative''' )
if voltage == 0:
return {"voltage": float(current * resistance )}
elif current == 0:
return {"current": voltage / resistance}
elif resistance == 0:
return {"resistance": voltage / current}
else:
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 439 |
"""simple docstring"""
import inspect
import logging
import os
import random
import shutil
import tempfile
import unittest
import pytest
import torch
from torch import nn
from torch.utils.data import DataLoader, TensorDataset
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_cuda
from accelerate.utils import ProjectConfiguration, set_seed
UpperCamelCase__ = logging.getLogger(__name__)
def UpperCAmelCase ( snake_case : List[Any]=2 , snake_case : int=3 , snake_case : Optional[int]=16 , snake_case : int = 10 , snake_case : int = 2 ):
def get_dataset(snake_case : Any ):
_lowerCAmelCase:Union[str, Any] = torch.randn(batch_size * n_batches , 1 )
return TensorDataset(snake_case , a * x + b + 0.1 * torch.randn(batch_size * n_batches , 1 ) )
_lowerCAmelCase:Tuple = get_dataset(snake_case )
_lowerCAmelCase:List[Any] = get_dataset(snake_case )
_lowerCAmelCase:Dict = DataLoader(snake_case , shuffle=snake_case , batch_size=snake_case , num_workers=4 )
_lowerCAmelCase:Any = DataLoader(snake_case , shuffle=snake_case , batch_size=snake_case , num_workers=4 )
return (train_dataloader, valid_dataloader)
def UpperCAmelCase ( snake_case : List[Any] , snake_case : int , snake_case : Dict , snake_case : Dict , snake_case : int , snake_case : Tuple=None ):
_lowerCAmelCase:Optional[Any] = []
for epoch in range(snake_case ):
# Train quickly
model.train()
for batch in dataloader:
_lowerCAmelCase , _lowerCAmelCase:List[Any] = batch
_lowerCAmelCase:int = model(snake_case )
_lowerCAmelCase:Optional[int] = torch.nn.functional.mse_loss(snake_case , snake_case )
accelerator.backward(snake_case )
optimizer.step()
optimizer.zero_grad()
rands.append(random.random() ) # Introduce some randomness
if scheduler is not None:
scheduler.step()
return rands
class a__ ( nn.Module ):
def __init__( self : List[Any]) -> Any:
"""simple docstring"""
super().__init__()
_lowerCAmelCase:str = nn.Parameter(torch.randn(1))
_lowerCAmelCase:Dict = nn.Parameter(torch.randn(1))
def __UpperCamelCase ( self : Tuple ,a__ : List[Any]) -> Tuple:
"""simple docstring"""
return x * self.a + self.b
class a__ ( unittest.TestCase ):
def __UpperCamelCase ( self : Any) -> Optional[Any]:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42)
_lowerCAmelCase:List[str] = DummyModel()
_lowerCAmelCase:Any = torch.optim.Adam(params=model.parameters() ,lr=1E-3)
_lowerCAmelCase , _lowerCAmelCase:Optional[Any] = dummy_dataloaders()
_lowerCAmelCase:List[str] = ProjectConfiguration(total_limit=1 ,project_dir=a__ ,automatic_checkpoint_naming=a__)
# Train baseline
_lowerCAmelCase:List[Any] = Accelerator(project_config=a__)
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase:int = accelerator.prepare(
a__ ,a__ ,a__ ,a__)
# Save initial
accelerator.save_state()
# Save second state
accelerator.save_state()
self.assertEqual(len(os.listdir(accelerator.project_dir)) ,1)
def __UpperCamelCase ( self : Any) -> Union[str, Any]:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42)
_lowerCAmelCase:int = DummyModel()
_lowerCAmelCase:Dict = torch.optim.Adam(params=model.parameters() ,lr=1E-3)
_lowerCAmelCase , _lowerCAmelCase:Dict = dummy_dataloaders()
# Train baseline
_lowerCAmelCase:List[str] = Accelerator()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase:Optional[Any] = accelerator.prepare(
a__ ,a__ ,a__ ,a__)
# Save initial
_lowerCAmelCase:Union[str, Any] = os.path.join(a__ ,'''initial''')
accelerator.save_state(a__)
((_lowerCAmelCase) , (_lowerCAmelCase)):Any = model.a.item(), model.b.item()
_lowerCAmelCase:Optional[int] = optimizer.state_dict()
_lowerCAmelCase:Tuple = train(3 ,a__ ,a__ ,a__ ,a__)
((_lowerCAmelCase) , (_lowerCAmelCase)):Optional[int] = model.a.item(), model.b.item()
_lowerCAmelCase:Any = optimizer.state_dict()
# Train partially
set_seed(42)
_lowerCAmelCase:str = DummyModel()
_lowerCAmelCase:Union[str, Any] = torch.optim.Adam(params=model.parameters() ,lr=1E-3)
_lowerCAmelCase , _lowerCAmelCase:Any = dummy_dataloaders()
_lowerCAmelCase:Union[str, Any] = Accelerator()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase:Tuple = accelerator.prepare(
a__ ,a__ ,a__ ,a__)
accelerator.load_state(a__)
((_lowerCAmelCase) , (_lowerCAmelCase)):str = model.a.item(), model.b.item()
_lowerCAmelCase:Any = optimizer.state_dict()
self.assertEqual(a__ ,a__)
self.assertEqual(a__ ,a__)
self.assertEqual(a__ ,a__)
_lowerCAmelCase:str = train(2 ,a__ ,a__ ,a__ ,a__)
# Save everything
_lowerCAmelCase:Optional[Any] = os.path.join(a__ ,'''checkpoint''')
accelerator.save_state(a__)
# Load everything back in and make sure all states work
accelerator.load_state(a__)
test_rands += train(1 ,a__ ,a__ ,a__ ,a__)
((_lowerCAmelCase) , (_lowerCAmelCase)):List[Any] = model.a.item(), model.b.item()
_lowerCAmelCase:List[Any] = optimizer.state_dict()
self.assertEqual(a__ ,a__)
self.assertEqual(a__ ,a__)
self.assertEqual(a__ ,a__)
self.assertEqual(a__ ,a__)
def __UpperCamelCase ( self : Optional[Any]) -> Optional[Any]:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42)
_lowerCAmelCase:List[str] = DummyModel()
_lowerCAmelCase:int = torch.optim.Adam(params=model.parameters() ,lr=1E-3)
_lowerCAmelCase , _lowerCAmelCase:Tuple = dummy_dataloaders()
_lowerCAmelCase:Any = ProjectConfiguration(automatic_checkpoint_naming=a__)
# Train baseline
_lowerCAmelCase:str = Accelerator(project_dir=a__ ,project_config=a__)
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase:List[Any] = accelerator.prepare(
a__ ,a__ ,a__ ,a__)
# Save initial
accelerator.save_state()
((_lowerCAmelCase) , (_lowerCAmelCase)):List[str] = model.a.item(), model.b.item()
_lowerCAmelCase:str = optimizer.state_dict()
_lowerCAmelCase:Optional[int] = train(3 ,a__ ,a__ ,a__ ,a__)
((_lowerCAmelCase) , (_lowerCAmelCase)):int = model.a.item(), model.b.item()
_lowerCAmelCase:int = optimizer.state_dict()
# Train partially
set_seed(42)
_lowerCAmelCase:Optional[int] = DummyModel()
_lowerCAmelCase:Union[str, Any] = torch.optim.Adam(params=model.parameters() ,lr=1E-3)
_lowerCAmelCase , _lowerCAmelCase:Optional[Any] = dummy_dataloaders()
_lowerCAmelCase:Union[str, Any] = ProjectConfiguration(iteration=1 ,automatic_checkpoint_naming=a__)
_lowerCAmelCase:Dict = Accelerator(project_dir=a__ ,project_config=a__)
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase:Optional[int] = accelerator.prepare(
a__ ,a__ ,a__ ,a__)
accelerator.load_state(os.path.join(a__ ,'''checkpoints''' ,'''checkpoint_0'''))
((_lowerCAmelCase) , (_lowerCAmelCase)):Dict = model.a.item(), model.b.item()
_lowerCAmelCase:int = optimizer.state_dict()
self.assertEqual(a__ ,a__)
self.assertEqual(a__ ,a__)
self.assertEqual(a__ ,a__)
_lowerCAmelCase:Dict = train(2 ,a__ ,a__ ,a__ ,a__)
# Save everything
accelerator.save_state()
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(a__ ,'''checkpoints''' ,'''checkpoint_1'''))
test_rands += train(1 ,a__ ,a__ ,a__ ,a__)
((_lowerCAmelCase) , (_lowerCAmelCase)):Dict = model.a.item(), model.b.item()
_lowerCAmelCase:Union[str, Any] = optimizer.state_dict()
self.assertEqual(a__ ,a__)
self.assertEqual(a__ ,a__)
self.assertEqual(a__ ,a__)
self.assertEqual(a__ ,a__)
def __UpperCamelCase ( self : Dict) -> int:
"""simple docstring"""
_lowerCAmelCase:str = torch.tensor([1, 2, 3])
_lowerCAmelCase:Tuple = torch.tensor([2, 3, 4])
_lowerCAmelCase:Tuple = DummyModel()
_lowerCAmelCase:Union[str, Any] = torch.optim.Adam(net.parameters())
_lowerCAmelCase:Union[str, Any] = Accelerator()
with self.assertRaises(a__) as ve:
accelerator.register_for_checkpointing(a__ ,a__ ,a__ ,a__)
_lowerCAmelCase:List[Any] = str(ve.exception)
self.assertTrue('''Item at index 0''' in message)
self.assertTrue('''Item at index 1''' in message)
self.assertFalse('''Item at index 2''' in message)
self.assertFalse('''Item at index 3''' in message)
def __UpperCamelCase ( self : str) -> Optional[Any]:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42)
_lowerCAmelCase:int = DummyModel()
_lowerCAmelCase:str = torch.optim.Adam(params=model.parameters() ,lr=1E-3)
_lowerCAmelCase:Union[str, Any] = torch.optim.lr_scheduler.StepLR(a__ ,step_size=1 ,gamma=0.99)
_lowerCAmelCase , _lowerCAmelCase:List[str] = dummy_dataloaders()
_lowerCAmelCase:Any = ProjectConfiguration(automatic_checkpoint_naming=a__)
# Train baseline
_lowerCAmelCase:Optional[int] = Accelerator(project_dir=a__ ,project_config=a__)
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase:int = accelerator.prepare(
a__ ,a__ ,a__ ,a__ ,a__)
# Save initial
accelerator.save_state()
_lowerCAmelCase:Dict = scheduler.state_dict()
train(3 ,a__ ,a__ ,a__ ,a__ ,a__)
self.assertNotEqual(a__ ,scheduler.state_dict())
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(a__ ,'''checkpoints''' ,'''checkpoint_0'''))
self.assertEqual(a__ ,scheduler.state_dict())
def __UpperCamelCase ( self : Union[str, Any]) -> Tuple:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42)
_lowerCAmelCase:str = DummyModel()
_lowerCAmelCase:Optional[Any] = ProjectConfiguration(automatic_checkpoint_naming=a__ ,total_limit=2)
# Train baseline
_lowerCAmelCase:List[str] = Accelerator(project_dir=a__ ,project_config=a__)
_lowerCAmelCase:List[str] = accelerator.prepare(a__)
# Save 3 states:
for _ in range(11):
accelerator.save_state()
self.assertTrue(not os.path.exists(os.path.join(a__ ,'''checkpoints''' ,'''checkpoint_0''')))
self.assertTrue(os.path.exists(os.path.join(a__ ,'''checkpoints''' ,'''checkpoint_9''')))
self.assertTrue(os.path.exists(os.path.join(a__ ,'''checkpoints''' ,'''checkpoint_10''')))
@require_cuda
def __UpperCamelCase ( self : int) -> str:
"""simple docstring"""
_lowerCAmelCase:Union[str, Any] = ['''torchrun''', F'--nproc_per_node={torch.cuda.device_count()}', inspect.getfile(self.__class__)]
execute_subprocess_async(a__ ,env=os.environ.copy())
if __name__ == "__main__":
UpperCamelCase__ = '''/tmp/accelerate/state_checkpointing'''
UpperCamelCase__ = DummyModel()
UpperCamelCase__ = torch.optim.Adam(params=model.parameters(), lr=1e-3)
UpperCamelCase__ = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.99)
UpperCamelCase__ , UpperCamelCase__ = dummy_dataloaders()
UpperCamelCase__ = ProjectConfiguration(automatic_checkpoint_naming=True)
# Train baseline
UpperCamelCase__ = Accelerator(project_dir=savedir, project_config=project_config, mixed_precision='''no''')
if accelerator.process_index == 0:
if os.path.exists(savedir):
shutil.rmtree(savedir)
os.makedirs(savedir)
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = accelerator.prepare(
model, optimizer, train_dataloader, valid_dataloader, scheduler
)
UpperCamelCase__ , UpperCamelCase__ = accelerator.prepare(model, optimizer)
train(3, model, train_dataloader, optimizer, accelerator, scheduler)
# Check that the intial optimizer is loaded on the GPU
for group in optimizer.param_groups:
UpperCamelCase__ = group['''params'''][0].device
break
assert param_device.type == accelerator.device.type
UpperCamelCase__ = model.cpu()
accelerator.wait_for_everyone()
accelerator.save_state()
accelerator.wait_for_everyone()
# Check CPU state
accelerator.load_state(os.path.join(savedir, '''checkpoints''', '''checkpoint_0'''), map_location='''cpu''')
for group in optimizer.param_groups:
UpperCamelCase__ = group['''params'''][0].device
break
assert (
param_device.type == torch.device('''cpu''').type
), F"Loaded optimizer states did not match, expected to be loaded on the CPU but got {param_device}"
# Check device state
model.to(accelerator.device)
accelerator.load_state(os.path.join(savedir, '''checkpoints''', '''checkpoint_0'''), map_location='''on_device''')
for group in optimizer.param_groups:
UpperCamelCase__ = group['''params'''][0].device
break
assert (
param_device.type == accelerator.device.type
), F"Loaded optimizer states did not match, expected to be loaded on {accelerator.device} but got {param_device}"
# Check error
with pytest.raises(TypeError, match='''Unsupported optimizer map location passed'''):
accelerator.load_state(os.path.join(savedir, '''checkpoints''', '''checkpoint_0'''), map_location='''invalid''')
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
shutil.rmtree(savedir)
accelerator.wait_for_everyone()
| 439 | 1 |
import argparse
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import BigBirdPegasusConfig, BigBirdPegasusForConditionalGeneration
_A = [
# tf -> hf
('''/''', '''.'''),
('''layer_''', '''layers.'''),
('''kernel''', '''weight'''),
('''beta''', '''bias'''),
('''gamma''', '''weight'''),
('''pegasus''', '''model'''),
]
_A = [
('''.output.dense''', '''.fc2'''),
('''intermediate.LayerNorm''', '''final_layer_norm'''),
('''intermediate.dense''', '''fc1'''),
]
_A = (
INIT_COMMON
+ [
('''attention.self.LayerNorm''', '''self_attn_layer_norm'''),
('''attention.output.dense''', '''self_attn.out_proj'''),
('''attention.self''', '''self_attn'''),
('''attention.encdec.LayerNorm''', '''encoder_attn_layer_norm'''),
('''attention.encdec_output.dense''', '''encoder_attn.out_proj'''),
('''attention.encdec''', '''encoder_attn'''),
('''key''', '''k_proj'''),
('''value''', '''v_proj'''),
('''query''', '''q_proj'''),
('''decoder.LayerNorm''', '''decoder.layernorm_embedding'''),
]
+ END_COMMON
)
_A = (
INIT_COMMON
+ [
('''embeddings.word_embeddings''', '''shared.weight'''),
('''embeddings.position_embeddings''', '''embed_positions.weight'''),
('''attention.self.LayerNorm''', '''self_attn_layer_norm'''),
('''attention.output.dense''', '''self_attn.output'''),
('''attention.self''', '''self_attn.self'''),
('''encoder.LayerNorm''', '''encoder.layernorm_embedding'''),
]
+ END_COMMON
)
_A = [
'''encdec/key/bias''',
'''encdec/query/bias''',
'''encdec/value/bias''',
'''self/key/bias''',
'''self/query/bias''',
'''self/value/bias''',
'''encdec_output/dense/bias''',
'''attention/output/dense/bias''',
]
def __UpperCamelCase ( _A , _A ):
for tf_name, hf_name in patterns:
lowerCAmelCase_ = k.replace(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return k
def __UpperCamelCase ( _A , _A ):
lowerCAmelCase_ = BigBirdPegasusConfig(**SCREAMING_SNAKE_CASE__ )
lowerCAmelCase_ = BigBirdPegasusForConditionalGeneration(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase_ = torch_model.state_dict()
lowerCAmelCase_ = {}
# separating decoder weights
lowerCAmelCase_ = {k: tf_weights[k] for k in tf_weights if k.startswith('''pegasus/decoder''' )}
lowerCAmelCase_ = {k: tf_weights[k] for k in tf_weights if not k.startswith('''pegasus/decoder''' )}
for k, v in tqdm(decoder_weights.items() , '''tf -> hf conversion''' ):
lowerCAmelCase_ = [k.endswith(SCREAMING_SNAKE_CASE__ ) for ending in KEYS_TO_IGNORE]
if any(SCREAMING_SNAKE_CASE__ ):
continue
lowerCAmelCase_ = DECODER_PATTERNS
lowerCAmelCase_ = rename_state_dict_key(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if new_k not in state_dict:
raise ValueError(f"could not find new key {new_k} in state dict. (converted from {k})" )
if any(True if i in k else False for i in ['''dense''', '''query''', '''key''', '''value'''] ):
lowerCAmelCase_ = v.T
lowerCAmelCase_ = torch.from_numpy(SCREAMING_SNAKE_CASE__ )
assert v.shape == state_dict[new_k].shape, f"{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}"
for k, v in tqdm(remaining_weights.items() , '''tf -> hf conversion''' ):
lowerCAmelCase_ = [k.endswith(SCREAMING_SNAKE_CASE__ ) for ending in KEYS_TO_IGNORE]
if any(SCREAMING_SNAKE_CASE__ ):
continue
lowerCAmelCase_ = REMAINING_PATTERNS
lowerCAmelCase_ = rename_state_dict_key(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if new_k not in state_dict and k != "pegasus/embeddings/position_embeddings":
raise ValueError(f"could not find new key {new_k} in state dict. (converted from {k})" )
if any(True if i in k else False for i in ['''dense''', '''query''', '''key''', '''value'''] ):
lowerCAmelCase_ = v.T
lowerCAmelCase_ = torch.from_numpy(SCREAMING_SNAKE_CASE__ )
if k != "pegasus/embeddings/position_embeddings":
assert v.shape == state_dict[new_k].shape, f"{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}"
lowerCAmelCase_ = mapping['''model.embed_positions.weight''']
lowerCAmelCase_ = mapping.pop('''model.embed_positions.weight''' )
lowerCAmelCase_ , lowerCAmelCase_ = torch_model.load_state_dict(SCREAMING_SNAKE_CASE__ , strict=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase_ = [
k
for k in missing
if k
not in [
'''final_logits_bias''',
'''model.encoder.embed_tokens.weight''',
'''model.decoder.embed_tokens.weight''',
'''lm_head.weight''',
]
]
assert unexpected_missing == [], f"no matches found for the following torch keys {unexpected_missing}"
assert extra == [], f"no matches found for the following tf keys {extra}"
return torch_model
def __UpperCamelCase ( _A ):
lowerCAmelCase_ = tf.train.list_variables(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase_ = {}
lowerCAmelCase_ = ['''global_step''']
for name, shape in tqdm(SCREAMING_SNAKE_CASE__ , desc='''converting tf checkpoint to dict''' ):
lowerCAmelCase_ = any(pat in name for pat in ignore_name )
if skip_key:
continue
lowerCAmelCase_ = tf.train.load_variable(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowerCAmelCase_ = array
return tf_weights
def __UpperCamelCase ( _A , _A , _A ):
lowerCAmelCase_ = get_tf_weights_as_numpy(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase_ = convert_bigbird_pegasus(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
torch_model.save_pretrained(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
parser.add_argument('''--tf_ckpt_path''', type=str, help='''passed to tf.train.list_variables''')
parser.add_argument('''--save_dir''', default=None, type=str, help='''Path to the output PyTorch model.''')
_A = parser.parse_args()
_A = {}
convert_bigbird_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir, config_update=config_update)
| 431 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
'''RWKV/rwkv-4-169m-pile''': '''https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-430m-pile''': '''https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-1b5-pile''': '''https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-3b-pile''': '''https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-7b-pile''': '''https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-14b-pile''': '''https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json''',
'''RWKV/rwkv-raven-1b5''': '''https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json''',
'''RWKV/rwkv-raven-3b''': '''https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json''',
'''RWKV/rwkv-raven-7b''': '''https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json''',
'''RWKV/rwkv-raven-14b''': '''https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json''',
}
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = "rwkv"
SCREAMING_SNAKE_CASE : Any = {"max_position_embeddings": "context_length"}
def __init__( self : Union[str, Any] , _UpperCamelCase : Any=5_0_2_7_7 , _UpperCamelCase : Optional[int]=1_0_2_4 , _UpperCamelCase : Optional[int]=4_0_9_6 , _UpperCamelCase : str=3_2 , _UpperCamelCase : Tuple=None , _UpperCamelCase : Dict=None , _UpperCamelCase : Optional[int]=1e-5 , _UpperCamelCase : Any=0 , _UpperCamelCase : Optional[Any]=0 , _UpperCamelCase : int=6 , _UpperCamelCase : Dict=False , _UpperCamelCase : Optional[int]=True , **_UpperCamelCase : int , ) ->List[str]:
snake_case_ = vocab_size
snake_case_ = context_length
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = attention_hidden_size if attention_hidden_size is not None else hidden_size
snake_case_ = intermediate_size if intermediate_size is not None else 4 * hidden_size
snake_case_ = layer_norm_epsilon
snake_case_ = rescale_every
snake_case_ = use_cache
snake_case_ = bos_token_id
snake_case_ = eos_token_id
super().__init__(
tie_word_embeddings=_UpperCamelCase , bos_token_id=_UpperCamelCase , eos_token_id=_UpperCamelCase , **_UpperCamelCase ) | 39 | 0 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Callable
from typing import Generic, TypeVar
UpperCamelCase =TypeVar("T")
UpperCamelCase =TypeVar("U")
class A ( Generic[T, U] ):
"""simple docstring"""
def __init__( self , __lowerCAmelCase , __lowerCAmelCase ):
UpperCamelCase_ : int = key
UpperCamelCase_ : List[Any] = val
UpperCamelCase_ : DoubleLinkedListNode[T, U] | None = None
UpperCamelCase_ : DoubleLinkedListNode[T, U] | None = None
def __repr__( self ):
return (
F"Node: key: {self.key}, val: {self.val}, "
F"has next: {bool(self.next )}, has prev: {bool(self.prev )}"
)
class A ( Generic[T, U] ):
"""simple docstring"""
def __init__( self ):
UpperCamelCase_ : DoubleLinkedListNode[T, U] = DoubleLinkedListNode(__lowerCAmelCase , __lowerCAmelCase )
UpperCamelCase_ : DoubleLinkedListNode[T, U] = DoubleLinkedListNode(__lowerCAmelCase , __lowerCAmelCase )
UpperCamelCase_ : Dict = self.rear, self.head
def __repr__( self ):
UpperCamelCase_ : str = ["""DoubleLinkedList"""]
UpperCamelCase_ : int = self.head
while node.next is not None:
rep.append(str(__lowerCAmelCase ) )
UpperCamelCase_ : Union[str, Any] = node.next
rep.append(str(self.rear ) )
return ",\n ".join(__lowerCAmelCase )
def _UpperCAmelCase ( self , __lowerCAmelCase ):
UpperCamelCase_ : Any = self.rear.prev
# All nodes other than self.head are guaranteed to have non-None previous
assert previous is not None
UpperCamelCase_ : str = node
UpperCamelCase_ : Tuple = previous
UpperCamelCase_ : int = node
UpperCamelCase_ : str = self.rear
def _UpperCAmelCase ( self , __lowerCAmelCase ):
if node.prev is None or node.next is None:
return None
UpperCamelCase_ : Any = node.next
UpperCamelCase_ : Union[str, Any] = node.prev
UpperCamelCase_ : Optional[int] = None
UpperCamelCase_ : List[str] = None
return node
class A ( Generic[T, U] ):
"""simple docstring"""
__a : dict[Callable[[T], U], LRUCache[T, U]] = {}
def __init__( self , __lowerCAmelCase ):
UpperCamelCase_ : DoubleLinkedList[T, U] = DoubleLinkedList()
UpperCamelCase_ : Optional[Any] = capacity
UpperCamelCase_ : int = 0
UpperCamelCase_ : List[str] = 0
UpperCamelCase_ : List[Any] = 0
UpperCamelCase_ : dict[T, DoubleLinkedListNode[T, U]] = {}
def __repr__( self ):
return (
F"CacheInfo(hits={self.hits}, misses={self.miss}, "
F"capacity={self.capacity}, current size={self.num_keys})"
)
def __contains__( self , __lowerCAmelCase ):
return key in self.cache
def _UpperCAmelCase ( self , __lowerCAmelCase ):
# Note: pythonic interface would throw KeyError rather than return None
if key in self.cache:
self.hits += 1
UpperCamelCase_ : DoubleLinkedListNode[T, U] = self.cache[key]
UpperCamelCase_ : int = self.list.remove(self.cache[key] )
assert node == value_node
# node is guaranteed not None because it is in self.cache
assert node is not None
self.list.add(__lowerCAmelCase )
return node.val
self.miss += 1
return None
def _UpperCAmelCase ( self , __lowerCAmelCase , __lowerCAmelCase ):
if key not in self.cache:
if self.num_keys >= self.capacity:
# delete first node (oldest) when over capacity
UpperCamelCase_ : str = self.list.head.next
# guaranteed to have a non-None first node when num_keys > 0
# explain to type checker via assertions
assert first_node is not None
assert first_node.key is not None
assert (
self.list.remove(__lowerCAmelCase ) is not None
) # node guaranteed to be in list assert node.key is not None
del self.cache[first_node.key]
self.num_keys -= 1
UpperCamelCase_ : Dict = DoubleLinkedListNode(__lowerCAmelCase , __lowerCAmelCase )
self.list.add(self.cache[key] )
self.num_keys += 1
else:
# bump node to the end of the list, update value
UpperCamelCase_ : str = self.list.remove(self.cache[key] )
assert node is not None # node guaranteed to be in list
UpperCamelCase_ : Dict = value
self.list.add(__lowerCAmelCase )
@classmethod
def _UpperCAmelCase ( cls , __lowerCAmelCase = 1_28 ):
def cache_decorator_inner(__lowerCAmelCase ) -> Callable[..., U]:
def cache_decorator_wrapper(*__lowerCAmelCase ) -> U:
if func not in cls.decorator_function_to_instance_map:
UpperCamelCase_ : List[Any] = LRUCache(__lowerCAmelCase )
UpperCamelCase_ : Union[str, Any] = cls.decorator_function_to_instance_map[func].get(args[0] )
if result is None:
UpperCamelCase_ : Optional[Any] = func(*__lowerCAmelCase )
cls.decorator_function_to_instance_map[func].put(args[0] , __lowerCAmelCase )
return result
def cache_info() -> LRUCache[T, U]:
return cls.decorator_function_to_instance_map[func]
setattr(__lowerCAmelCase , """cache_info""" , __lowerCAmelCase ) # noqa: B010
return cache_decorator_wrapper
return cache_decorator_inner
if __name__ == "__main__":
import doctest
doctest.testmod()
| 720 |
'''simple docstring'''
from __future__ import annotations
import os
from typing import Any
import requests
UpperCamelCase ="https://api.github.com"
# https://docs.github.com/en/free-pro-team@latest/rest/reference/users#get-the-authenticated-user
UpperCamelCase =BASE_URL + "/user"
# https://github.com/settings/tokens
UpperCamelCase =os.environ.get("USER_TOKEN", "")
def snake_case ( a_ : str ) -> dict[Any, Any]:
"""simple docstring"""
UpperCamelCase_ : Tuple = {
"""Authorization""": f"token {auth_token}",
"""Accept""": """application/vnd.github.v3+json""",
}
return requests.get(a_ , headers=a_ ).json()
if __name__ == "__main__": # pragma: no cover
if USER_TOKEN:
for key, value in fetch_github_info(USER_TOKEN).items():
print(f"{key}: {value}")
else:
raise ValueError("'USER_TOKEN' field cannot be empty.")
| 543 | 0 |
"""simple docstring"""
import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from tensorflow.keras.layers import LSTM, Dense
from tensorflow.keras.models import Sequential
if __name__ == "__main__":
__magic_name__ = pd.read_csv("""sample_data.csv""", header=None)
__magic_name__ = df.shape[:1][0]
# If you're using some other dataset input the target column
__magic_name__ = df.iloc[:, 1:2]
__magic_name__ = actual_data.values.reshape(len_data, 1)
__magic_name__ = MinMaxScaler().fit_transform(actual_data)
__magic_name__ = 10
__magic_name__ = 5
__magic_name__ = 20
__magic_name__ = len_data - periods * look_back
__magic_name__ = actual_data[:division]
__magic_name__ = actual_data[division - look_back :]
__magic_name__ , __magic_name__ = [], []
__magic_name__ , __magic_name__ = [], []
for i in range(0, len(train_data) - forward_days - look_back + 1):
train_x.append(train_data[i : i + look_back])
train_y.append(train_data[i + look_back : i + look_back + forward_days])
for i in range(0, len(test_data) - forward_days - look_back + 1):
test_x.append(test_data[i : i + look_back])
test_y.append(test_data[i + look_back : i + look_back + forward_days])
__magic_name__ = np.array(train_x)
__magic_name__ = np.array(test_x)
__magic_name__ = np.array([list(i.ravel()) for i in train_y])
__magic_name__ = np.array([list(i.ravel()) for i in test_y])
__magic_name__ = Sequential()
model.add(LSTM(1_28, input_shape=(look_back, 1), return_sequences=True))
model.add(LSTM(64, input_shape=(1_28, 1)))
model.add(Dense(forward_days))
model.compile(loss="""mean_squared_error""", optimizer="""adam""")
__magic_name__ = model.fit(
x_train, y_train, epochs=1_50, verbose=1, shuffle=True, batch_size=4
)
__magic_name__ = model.predict(x_test)
| 129 |
"""simple docstring"""
import os
import random
import sys
from . import cryptomath_module as cryptomath
from . import rabin_miller
__magic_name__ = 3
def _A ( __lowercase ):
"""simple docstring"""
print("""Generating primitive root of p""" )
while True:
lowerCamelCase__ = random.randrange(3 , __lowercase )
if pow(__lowercase , 2 , __lowercase ) == 1:
continue
if pow(__lowercase , __lowercase , __lowercase ) == 1:
continue
return g
def _A ( __lowercase ):
"""simple docstring"""
print("""Generating prime p...""" )
lowerCamelCase__ = rabin_miller.generate_large_prime(__lowercase ) # select large prime number.
lowerCamelCase__ = primitive_root(__lowercase ) # one primitive root on modulo p.
lowerCamelCase__ = random.randrange(3 , __lowercase ) # private_key -> have to be greater than 2 for safety.
lowerCamelCase__ = cryptomath.find_mod_inverse(pow(__lowercase , __lowercase , __lowercase ) , __lowercase )
lowerCamelCase__ = (key_size, e_a, e_a, p)
lowerCamelCase__ = (key_size, d)
return public_key, private_key
def _A ( __lowercase , __lowercase ):
"""simple docstring"""
if os.path.exists(f"""{name}_pubkey.txt""" ) or os.path.exists(f"""{name}_privkey.txt""" ):
print("""\nWARNING:""" )
print(
f"""\"{name}_pubkey.txt\" or \"{name}_privkey.txt\" already exists. \n"""
"""Use a different name or delete these files and re-run this program.""" )
sys.exit()
lowerCamelCase__ , lowerCamelCase__ = generate_key(__lowercase )
print(f"""\nWriting public key to file {name}_pubkey.txt...""" )
with open(f"""{name}_pubkey.txt""" , """w""" ) as fo:
fo.write(f"""{public_key[0]},{public_key[1]},{public_key[2]},{public_key[3]}""" )
print(f"""Writing private key to file {name}_privkey.txt...""" )
with open(f"""{name}_privkey.txt""" , """w""" ) as fo:
fo.write(f"""{private_key[0]},{private_key[1]}""" )
def _A ( ):
"""simple docstring"""
print("""Making key files...""" )
make_key_files("""elgamal""" , 2048 )
print("""Key files generation successful""" )
if __name__ == "__main__":
main()
| 129 | 1 |
'''simple docstring'''
def __lowercase ( __SCREAMING_SNAKE_CASE = 100_0000 ) -> int:
"""simple docstring"""
__a = 1
__a = 1
__a = {1: 1}
for inputa in range(2 , __SCREAMING_SNAKE_CASE ):
__a = 0
__a = inputa
while True:
if number in counters:
counter += counters[number]
break
if number % 2 == 0:
number //= 2
counter += 1
else:
__a = (3 * number) + 1
counter += 1
if inputa not in counters:
__a = counter
if counter > pre_counter:
__a = inputa
__a = counter
return largest_number
if __name__ == "__main__":
print(solution(int(input().strip())))
| 721 |
'''simple docstring'''
import json
import os
import unittest
from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast
from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCAmelCase_ ( snake_case__ , unittest.TestCase ):
"""simple docstring"""
a_ :Union[str, Any] =LEDTokenizer
a_ :List[Any] =LEDTokenizerFast
a_ :Union[str, Any] =True
def __a ( self : Dict ):
'''simple docstring'''
super().setUp()
__a = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
__a = dict(zip(SCREAMING_SNAKE_CASE__ , range(len(SCREAMING_SNAKE_CASE__ ) ) ) )
__a = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
__a = {"""unk_token""": """<unk>"""}
__a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
__a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE__ ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(SCREAMING_SNAKE_CASE__ ) )
def __a ( self : Optional[Any] , **SCREAMING_SNAKE_CASE__ : Optional[Any] ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__ )
def __a ( self : int , **SCREAMING_SNAKE_CASE__ : Optional[int] ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__ )
def __a ( self : str , SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
return "lower newer", "lower newer"
@cached_property
def __a ( self : Dict ):
'''simple docstring'''
return LEDTokenizer.from_pretrained("""allenai/led-base-16384""" )
@cached_property
def __a ( self : Union[str, Any] ):
'''simple docstring'''
return LEDTokenizerFast.from_pretrained("""allenai/led-base-16384""" )
@require_torch
def __a ( self : Tuple ):
'''simple docstring'''
__a = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
__a = [0, 2_5_0, 2_5_1, 1_7_8_1_8, 1_3, 3_9_1_8_6, 1_9_3_8, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__a = tokenizer(SCREAMING_SNAKE_CASE__ , max_length=len(SCREAMING_SNAKE_CASE__ ) , padding=SCREAMING_SNAKE_CASE__ , return_tensors="""pt""" )
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
__a = batch.input_ids.tolist()[0]
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@require_torch
def __a ( self : Dict ):
'''simple docstring'''
__a = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__a = tokenizer(SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ , return_tensors="""pt""" )
self.assertIn("""input_ids""" , SCREAMING_SNAKE_CASE__ )
self.assertIn("""attention_mask""" , SCREAMING_SNAKE_CASE__ )
self.assertNotIn("""labels""" , SCREAMING_SNAKE_CASE__ )
self.assertNotIn("""decoder_attention_mask""" , SCREAMING_SNAKE_CASE__ )
@require_torch
def __a ( self : Union[str, Any] ):
'''simple docstring'''
__a = [
"""Summary of the text.""",
"""Another summary.""",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__a = tokenizer(text_target=SCREAMING_SNAKE_CASE__ , max_length=3_2 , padding="""max_length""" , return_tensors="""pt""" )
self.assertEqual(3_2 , targets["""input_ids"""].shape[1] )
@require_torch
def __a ( self : Any ):
'''simple docstring'''
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__a = tokenizer(
["""I am a small frog""" * 1_0_2_4, """I am a small frog"""] , padding=SCREAMING_SNAKE_CASE__ , truncation=SCREAMING_SNAKE_CASE__ , return_tensors="""pt""" )
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self.assertEqual(batch.input_ids.shape , (2, 5_1_2_2) )
@require_torch
def __a ( self : Tuple ):
'''simple docstring'''
__a = ["""A long paragraph for summarization."""]
__a = [
"""Summary of the text.""",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__a = tokenizer(SCREAMING_SNAKE_CASE__ , return_tensors="""pt""" )
__a = tokenizer(text_target=SCREAMING_SNAKE_CASE__ , return_tensors="""pt""" )
__a = inputs["""input_ids"""]
__a = targets["""input_ids"""]
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
@require_torch
def __a ( self : Dict ):
'''simple docstring'''
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__a = ["""Summary of the text.""", """Another summary."""]
__a = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]]
__a = tokenizer(SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ )
__a = [[0] * len(SCREAMING_SNAKE_CASE__ ) for x in encoded_output["""input_ids"""]]
__a = tokenizer.pad(SCREAMING_SNAKE_CASE__ )
self.assertSequenceEqual(outputs["""global_attention_mask"""] , SCREAMING_SNAKE_CASE__ )
def __a ( self : List[Any] ):
'''simple docstring'''
pass
def __a ( self : Any ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
__a = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
__a = self.tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
__a = """A, <mask> AllenNLP sentence."""
__a = tokenizer_r.encode_plus(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ , return_token_type_ids=SCREAMING_SNAKE_CASE__ )
__a = tokenizer_p.encode_plus(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ , return_token_type_ids=SCREAMING_SNAKE_CASE__ )
self.assertEqual(sum(tokens_r["""token_type_ids"""] ) , sum(tokens_p["""token_type_ids"""] ) )
self.assertEqual(
sum(tokens_r["""attention_mask"""] ) / len(tokens_r["""attention_mask"""] ) , sum(tokens_p["""attention_mask"""] ) / len(tokens_p["""attention_mask"""] ) , )
__a = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""] )
__a = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""] )
self.assertSequenceEqual(tokens_p["""input_ids"""] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] )
self.assertSequenceEqual(tokens_r["""input_ids"""] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] )
self.assertSequenceEqual(
SCREAMING_SNAKE_CASE__ , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
self.assertSequenceEqual(
SCREAMING_SNAKE_CASE__ , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
| 201 | 0 |
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotConfig, is_flax_available
from transformers.testing_utils import jax_device, require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
lowerCamelCase__ : List[str] = 'platform'
import jax
import jax.numpy as jnp
from transformers import BlenderbotTokenizer
from transformers.models.blenderbot.modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
shift_tokens_right,
)
def UpperCAmelCase_ ( __UpperCAmelCase : List[Any] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : str=None , __UpperCAmelCase : str=None , __UpperCAmelCase : str=None , __UpperCAmelCase : Union[str, Any]=None , __UpperCAmelCase : Optional[int]=None , __UpperCAmelCase : Optional[Any]=None , ) -> Tuple:
if attention_mask is None:
SCREAMING_SNAKE_CASE_ = np.where(input_ids != config.pad_token_id , 1 , 0 )
if decoder_attention_mask is None:
SCREAMING_SNAKE_CASE_ = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 )
if head_mask is None:
SCREAMING_SNAKE_CASE_ = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
SCREAMING_SNAKE_CASE_ = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
SCREAMING_SNAKE_CASE_ = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self : List[str] , _lowerCAmelCase : Any , _lowerCAmelCase : int=13 , _lowerCAmelCase : str=7 , _lowerCAmelCase : Optional[Any]=True , _lowerCAmelCase : List[str]=False , _lowerCAmelCase : Dict=99 , _lowerCAmelCase : Tuple=16 , _lowerCAmelCase : Optional[int]=2 , _lowerCAmelCase : int=4 , _lowerCAmelCase : int=4 , _lowerCAmelCase : str="gelu" , _lowerCAmelCase : List[Any]=0.1 , _lowerCAmelCase : Tuple=0.1 , _lowerCAmelCase : Tuple=32 , _lowerCAmelCase : List[Any]=2 , _lowerCAmelCase : Optional[int]=1 , _lowerCAmelCase : Dict=0 , _lowerCAmelCase : Any=0.02 , ):
SCREAMING_SNAKE_CASE_ = parent
SCREAMING_SNAKE_CASE_ = batch_size
SCREAMING_SNAKE_CASE_ = seq_length
SCREAMING_SNAKE_CASE_ = is_training
SCREAMING_SNAKE_CASE_ = use_labels
SCREAMING_SNAKE_CASE_ = vocab_size
SCREAMING_SNAKE_CASE_ = hidden_size
SCREAMING_SNAKE_CASE_ = num_hidden_layers
SCREAMING_SNAKE_CASE_ = num_attention_heads
SCREAMING_SNAKE_CASE_ = intermediate_size
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ = max_position_embeddings
SCREAMING_SNAKE_CASE_ = eos_token_id
SCREAMING_SNAKE_CASE_ = pad_token_id
SCREAMING_SNAKE_CASE_ = bos_token_id
SCREAMING_SNAKE_CASE_ = initializer_range
def lowerCAmelCase_ ( self : str ):
SCREAMING_SNAKE_CASE_ = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size )
SCREAMING_SNAKE_CASE_ = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 )
SCREAMING_SNAKE_CASE_ = shift_tokens_right(_lowerCAmelCase , 1 , 2 )
SCREAMING_SNAKE_CASE_ = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=_lowerCAmelCase , )
SCREAMING_SNAKE_CASE_ = prepare_blenderbot_inputs_dict(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
return config, inputs_dict
def lowerCAmelCase_ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.prepare_config_and_inputs()
return config, inputs_dict
def lowerCAmelCase_ ( self : List[str] , _lowerCAmelCase : Dict , _lowerCAmelCase : List[str] , _lowerCAmelCase : List[Any] ):
SCREAMING_SNAKE_CASE_ = 20
SCREAMING_SNAKE_CASE_ = model_class_name(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = model.encode(inputs_dict['input_ids'] )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = (
inputs_dict['decoder_input_ids'],
inputs_dict['decoder_attention_mask'],
)
SCREAMING_SNAKE_CASE_ = model.init_cache(decoder_input_ids.shape[0] , _lowerCAmelCase , _lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='i4' )
SCREAMING_SNAKE_CASE_ = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
SCREAMING_SNAKE_CASE_ = model.decode(
decoder_input_ids[:, :-1] , _lowerCAmelCase , decoder_attention_mask=_lowerCAmelCase , past_key_values=_lowerCAmelCase , decoder_position_ids=_lowerCAmelCase , )
SCREAMING_SNAKE_CASE_ = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='i4' )
SCREAMING_SNAKE_CASE_ = model.decode(
decoder_input_ids[:, -1:] , _lowerCAmelCase , decoder_attention_mask=_lowerCAmelCase , past_key_values=outputs_cache.past_key_values , decoder_position_ids=_lowerCAmelCase , )
SCREAMING_SNAKE_CASE_ = model.decode(_lowerCAmelCase , _lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F"Max diff is {diff}" )
def lowerCAmelCase_ ( self : int , _lowerCAmelCase : List[str] , _lowerCAmelCase : Dict , _lowerCAmelCase : int ):
SCREAMING_SNAKE_CASE_ = 20
SCREAMING_SNAKE_CASE_ = model_class_name(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = model.encode(inputs_dict['input_ids'] )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = (
inputs_dict['decoder_input_ids'],
inputs_dict['decoder_attention_mask'],
)
SCREAMING_SNAKE_CASE_ = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
SCREAMING_SNAKE_CASE_ = model.init_cache(decoder_input_ids.shape[0] , _lowerCAmelCase , _lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
SCREAMING_SNAKE_CASE_ = model.decode(
decoder_input_ids[:, :-1] , _lowerCAmelCase , decoder_attention_mask=_lowerCAmelCase , past_key_values=_lowerCAmelCase , decoder_position_ids=_lowerCAmelCase , )
SCREAMING_SNAKE_CASE_ = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='i4' )
SCREAMING_SNAKE_CASE_ = model.decode(
decoder_input_ids[:, -1:] , _lowerCAmelCase , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=_lowerCAmelCase , decoder_position_ids=_lowerCAmelCase , )
SCREAMING_SNAKE_CASE_ = model.decode(_lowerCAmelCase , _lowerCAmelCase , decoder_attention_mask=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F"Max diff is {diff}" )
@require_flax
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
lowercase_ = 99
def lowerCAmelCase_ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE_ = np.array(
[
[71, 82, 18, 33, 46, 91, 2],
[68, 34, 26, 58, 30, 82, 2],
[5, 97, 17, 39, 94, 40, 2],
[76, 83, 94, 25, 70, 78, 2],
[87, 59, 41, 35, 48, 66, 2],
[55, 13, 16, 58, 5, 2, 1], # note padding
[64, 27, 31, 51, 12, 75, 2],
[52, 64, 86, 17, 83, 39, 2],
[48, 61, 9, 24, 71, 82, 2],
[26, 1, 60, 48, 22, 13, 2],
[21, 5, 62, 28, 14, 76, 2],
[45, 98, 37, 86, 59, 48, 2],
[70, 70, 50, 9, 28, 0, 2],
] , dtype=np.intaa , )
SCREAMING_SNAKE_CASE_ = input_ids.shape[0]
SCREAMING_SNAKE_CASE_ = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=24 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=32 , decoder_ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def lowerCAmelCase_ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self._get_config_and_data()
SCREAMING_SNAKE_CASE_ = FlaxBlenderbotForConditionalGeneration(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = lm_model(input_ids=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs['logits'].shape , _lowerCAmelCase )
def lowerCAmelCase_ ( self : List[Any] ):
SCREAMING_SNAKE_CASE_ = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=14 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=48 , )
SCREAMING_SNAKE_CASE_ = FlaxBlenderbotForConditionalGeneration(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]] , dtype=np.intaa )
SCREAMING_SNAKE_CASE_ = np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]] , dtype=np.intaa )
SCREAMING_SNAKE_CASE_ = lm_model(input_ids=_lowerCAmelCase , decoder_input_ids=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = (*summary.shape, config.vocab_size)
self.assertEqual(outputs['logits'].shape , _lowerCAmelCase )
def lowerCAmelCase_ ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE_ = np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]] , dtype=np.intaa )
SCREAMING_SNAKE_CASE_ = shift_tokens_right(_lowerCAmelCase , 1 , 2 )
SCREAMING_SNAKE_CASE_ = np.equal(_lowerCAmelCase , 1 ).astype(np.floataa ).sum()
SCREAMING_SNAKE_CASE_ = np.equal(_lowerCAmelCase , 1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape , input_ids.shape )
self.assertEqual(_lowerCAmelCase , n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() )
@require_flax
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE , unittest.TestCase , _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = True
lowercase_ = (
(
FlaxBlenderbotModel,
FlaxBlenderbotForConditionalGeneration,
)
if is_flax_available()
else ()
)
lowercase_ = (FlaxBlenderbotForConditionalGeneration,) if is_flax_available() else ()
def lowerCAmelCase_ ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE_ = FlaxBlenderbotModelTester(self )
def lowerCAmelCase_ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
def lowerCAmelCase_ ( self : str ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
def lowerCAmelCase_ ( self : Tuple ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
SCREAMING_SNAKE_CASE_ = self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = model_class(_lowerCAmelCase )
@jax.jit
def encode_jitted(_lowerCAmelCase : str , _lowerCAmelCase : Optional[Any]=None , **_lowerCAmelCase : str ):
return model.encode(input_ids=_lowerCAmelCase , attention_mask=_lowerCAmelCase )
with self.subTest('JIT Enabled' ):
SCREAMING_SNAKE_CASE_ = encode_jitted(**_lowerCAmelCase ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
SCREAMING_SNAKE_CASE_ = encode_jitted(**_lowerCAmelCase ).to_tuple()
self.assertEqual(len(_lowerCAmelCase ) , len(_lowerCAmelCase ) )
for jitted_output, output in zip(_lowerCAmelCase , _lowerCAmelCase ):
self.assertEqual(jitted_output.shape , output.shape )
def lowerCAmelCase_ ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
SCREAMING_SNAKE_CASE_ = model_class(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = model.encode(inputs_dict['input_ids'] , inputs_dict['attention_mask'] )
SCREAMING_SNAKE_CASE_ = {
'decoder_input_ids': inputs_dict['decoder_input_ids'],
'decoder_attention_mask': inputs_dict['decoder_attention_mask'],
'encoder_outputs': encoder_outputs,
}
@jax.jit
def decode_jitted(_lowerCAmelCase : int , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Dict ):
return model.decode(
decoder_input_ids=_lowerCAmelCase , decoder_attention_mask=_lowerCAmelCase , encoder_outputs=_lowerCAmelCase , )
with self.subTest('JIT Enabled' ):
SCREAMING_SNAKE_CASE_ = decode_jitted(**_lowerCAmelCase ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
SCREAMING_SNAKE_CASE_ = decode_jitted(**_lowerCAmelCase ).to_tuple()
self.assertEqual(len(_lowerCAmelCase ) , len(_lowerCAmelCase ) )
for jitted_output, output in zip(_lowerCAmelCase , _lowerCAmelCase ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def lowerCAmelCase_ ( self : Union[str, Any] ):
for model_class_name in self.all_model_classes:
SCREAMING_SNAKE_CASE_ = model_class_name.from_pretrained('facebook/blenderbot-400M-distill' )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
SCREAMING_SNAKE_CASE_ = np.ones((1, 1) ) * model.config.eos_token_id
SCREAMING_SNAKE_CASE_ = model(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
@unittest.skipUnless(jax_device != 'cpu' , '3B test too slow on CPU.' )
@slow
def lowerCAmelCase_ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE_ = {'num_beams': 1, 'early_stopping': True, 'min_length': 15, 'max_length': 25}
SCREAMING_SNAKE_CASE_ = {'skip_special_tokens': True, 'clean_up_tokenization_spaces': True}
SCREAMING_SNAKE_CASE_ = FlaxBlenderbotForConditionalGeneration.from_pretrained('facebook/blenderbot-3B' , from_pt=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = BlenderbotTokenizer.from_pretrained('facebook/blenderbot-3B' )
SCREAMING_SNAKE_CASE_ = ['Sam']
SCREAMING_SNAKE_CASE_ = tokenizer(_lowerCAmelCase , return_tensors='jax' )
SCREAMING_SNAKE_CASE_ = model.generate(**_lowerCAmelCase , **_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = 'Sam is a great name. It means "sun" in Gaelic.'
SCREAMING_SNAKE_CASE_ = tokenizer.batch_decode(_lowerCAmelCase , **_lowerCAmelCase )
assert generated_txt[0].strip() == tgt_text | 31 |
"""simple docstring"""
def __snake_case ( UpperCamelCase__ ) -> list[int]:
"""simple docstring"""
A = [0 for i in range(len(UpperCamelCase__ ) )]
# initialize interval's left pointer and right pointer
A , A = 0, 0
for i in range(1 , len(UpperCamelCase__ ) ):
# case when current index is inside the interval
if i <= right_pointer:
A = min(right_pointer - i + 1 , z_result[i - left_pointer] )
A = min_edge
while go_next(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
z_result[i] += 1
# if new index's result gives us more right interval,
# we've to update left_pointer and right_pointer
if i + z_result[i] - 1 > right_pointer:
A , A = i, i + z_result[i] - 1
return z_result
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> bool:
"""simple docstring"""
return i + z_result[i] < len(UpperCamelCase__ ) and s[z_result[i]] == s[i + z_result[i]]
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ ) -> int:
"""simple docstring"""
A = 0
# concatenate 'pattern' and 'input_str' and call z_function
# with concatenated string
A = z_function(pattern + input_str )
for val in z_result:
# if value is greater then length of the pattern string
# that means this index is starting position of substring
# which is equal to pattern string
if val >= len(UpperCamelCase__ ):
answer += 1
return answer
if __name__ == "__main__":
import doctest
doctest.testmod()
| 690 | 0 |
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class lowerCamelCase_ ( unittest.TestCase ):
def lowercase ( self ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = "ylacombe/bark-small"
_UpperCamelCase = tempfile.mkdtemp()
_UpperCamelCase = "en_speaker_1"
_UpperCamelCase = "This is a test string"
_UpperCamelCase = "speaker_embeddings_path.json"
_UpperCamelCase = "speaker_embeddings"
def lowercase ( self , **lowerCamelCase_ ) -> Optional[Any]:
"""simple docstring"""
return AutoTokenizer.from_pretrained(self.checkpoint , **lowerCamelCase_ )
def lowercase ( self ) -> Any:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def lowercase ( self ) -> int:
"""simple docstring"""
_UpperCamelCase = self.get_tokenizer()
_UpperCamelCase = BarkProcessor(tokenizer=lowerCamelCase_ )
processor.save_pretrained(self.tmpdirname )
_UpperCamelCase = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def lowercase ( self ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
_UpperCamelCase = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
_UpperCamelCase = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token="(BOS)" , eos_token="(EOS)" , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def lowercase ( self ) -> int:
"""simple docstring"""
_UpperCamelCase = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
_UpperCamelCase = 35
_UpperCamelCase = 2
_UpperCamelCase = 8
_UpperCamelCase = {
"semantic_prompt": np.ones(lowerCamelCase_ ),
"coarse_prompt": np.ones((nb_codebooks_coarse, seq_len) ),
"fine_prompt": np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
_UpperCamelCase = processor(text=self.input_string , voice_preset=lowerCamelCase_ )
_UpperCamelCase = inputs["history_prompt"]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(lowerCamelCase_ , np.array([] ) ).tolist() )
# test loading voice preset from npz file
_UpperCamelCase = os.path.join(self.tmpdirname , "file.npz" )
np.savez(lowerCamelCase_ , **lowerCamelCase_ )
_UpperCamelCase = processor(text=self.input_string , voice_preset=lowerCamelCase_ )
_UpperCamelCase = inputs["history_prompt"]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(lowerCamelCase_ , np.array([] ) ).tolist() )
# test loading voice preset from the hub
_UpperCamelCase = processor(text=self.input_string , voice_preset=self.voice_preset )
def lowercase ( self ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = self.get_tokenizer()
_UpperCamelCase = BarkProcessor(tokenizer=lowerCamelCase_ )
_UpperCamelCase = processor(text=self.input_string )
_UpperCamelCase = tokenizer(
self.input_string , padding="max_length" , max_length=2_56 , add_special_tokens=lowerCamelCase_ , return_attention_mask=lowerCamelCase_ , return_token_type_ids=lowerCamelCase_ , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
| 707 |
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
__lowerCAmelCase = imread(r"""digital_image_processing/image_data/lena_small.jpg""")
__lowerCAmelCase = cvtColor(img, COLOR_BGR2GRAY)
def _lowercase ( ) -> List[str]:
"""simple docstring"""
_UpperCamelCase = cn.convert_to_negative(a__ )
# assert negative_img array for at least one True
assert negative_img.any()
def _lowercase ( ) -> Union[str, Any]:
"""simple docstring"""
with Image.open("digital_image_processing/image_data/lena_small.jpg" ) as img:
# Work around assertion for response
assert str(cc.change_contrast(a__ , 1_10 ) ).startswith(
"<PIL.Image.Image image mode=RGB size=100x100 at" )
def _lowercase ( ) -> Any:
"""simple docstring"""
_UpperCamelCase = canny.gen_gaussian_kernel(9 , sigma=1.4 )
# Assert ambiguous array
assert resp.all()
def _lowercase ( ) -> List[str]:
"""simple docstring"""
_UpperCamelCase = imread("digital_image_processing/image_data/lena_small.jpg" , 0 )
# assert ambiguous array for all == True
assert canny_img.all()
_UpperCamelCase = canny.canny(a__ )
# assert canny array for at least one True
assert canny_array.any()
def _lowercase ( ) -> Tuple:
"""simple docstring"""
assert gg.gaussian_filter(a__ , 5 , sigma=0.9 ).all()
def _lowercase ( ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] )
_UpperCamelCase = conv.img_convolve(a__ , a__ ).astype(a__ )
assert res.any()
def _lowercase ( ) -> int:
"""simple docstring"""
assert med.median_filter(a__ , 3 ).any()
def _lowercase ( ) -> Tuple:
"""simple docstring"""
_UpperCamelCase , _UpperCamelCase = sob.sobel_filter(a__ )
assert grad.any() and theta.any()
def _lowercase ( ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = sp.make_sepia(a__ , 20 )
assert sepia.all()
def _lowercase ( a__ : str = "digital_image_processing/image_data/lena_small.jpg" ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = bs.Burkes(imread(a__ , 1 ) , 1_20 )
burkes.process()
assert burkes.output_img.any()
def _lowercase ( a__ : str = "digital_image_processing/image_data/lena_small.jpg" , ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = rs.NearestNeighbour(imread(a__ , 1 ) , 4_00 , 2_00 )
nn.process()
assert nn.output.any()
def _lowercase ( ) -> Any:
"""simple docstring"""
_UpperCamelCase = "digital_image_processing/image_data/lena.jpg"
# Reading the image and converting it to grayscale.
_UpperCamelCase = imread(a__ , 0 )
# Test for get_neighbors_pixel function() return not None
_UpperCamelCase = 0
_UpperCamelCase = 0
_UpperCamelCase = image[x_coordinate][y_coordinate]
_UpperCamelCase = lbp.get_neighbors_pixel(
a__ , a__ , a__ , a__ )
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
_UpperCamelCase = np.zeros((image.shape[0], image.shape[1]) )
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0 , image.shape[0] ):
for j in range(0 , image.shape[1] ):
_UpperCamelCase = lbp.local_binary_value(a__ , a__ , a__ )
assert lbp_image.any()
| 589 | 0 |
"""simple docstring"""
import string
def __snake_case ( _lowercase ):
"""simple docstring"""
for key in range(len(string.ascii_uppercase ) ):
UpperCamelCase = ''''''
for symbol in message:
if symbol in string.ascii_uppercase:
UpperCamelCase = string.ascii_uppercase.find(_lowercase )
UpperCamelCase = num - key
if num < 0:
UpperCamelCase = num + len(string.ascii_uppercase )
UpperCamelCase = translated + string.ascii_uppercase[num]
else:
UpperCamelCase = translated + symbol
print(f'Decryption using Key #{key}: {translated}' )
def __snake_case ( ):
"""simple docstring"""
UpperCamelCase = input('''Encrypted message: ''' )
UpperCamelCase = message.upper()
decrypt(_lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod()
main() | 34 |
"""simple docstring"""
from typing import Dict
from transformers import EvalPrediction, HfArgumentParser, TrainingArguments, is_torch_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
get_torch_dist_unique_port,
require_torch_multi_gpu,
require_torch_neuroncore,
)
from transformers.training_args import ParallelMode
from transformers.utils import logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
from transformers import Trainer
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
def __init__( self , lowerCamelCase_ = 1_0_1) -> Tuple:
UpperCamelCase = length
def __len__( self) -> List[str]:
return self.length
def __getitem__( self , lowerCamelCase_) -> int:
return i
class snake_case_ :
"""simple docstring"""
def __call__( self , lowerCamelCase_) -> str:
return {"input_ids": torch.tensor(lowerCamelCase_), "labels": torch.tensor(lowerCamelCase_)}
class snake_case_ ( nn.Module ):
"""simple docstring"""
def __init__( self) -> List[Any]:
super().__init__()
# Add some (unused) params otherwise DDP will complain.
UpperCamelCase = nn.Linear(1_2_0 , 8_0)
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_=None) -> Any:
if labels is not None:
return torch.tensor(0.0 , device=input_ids.device), input_ids
else:
return input_ids
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
@require_torch_neuroncore
def UpperCAmelCase__ ( self) -> Tuple:
UpperCamelCase = F'--nproc_per_node=2\n --master_port={get_torch_dist_unique_port()}\n {self.test_file_dir}/test_trainer_distributed.py\n '.split()
UpperCamelCase = self.get_auto_remove_tmp_dir()
UpperCamelCase = F'--output_dir {output_dir}'.split()
UpperCamelCase = ['''torchrun'''] + distributed_args + args
execute_subprocess_async(lowerCamelCase_ , env=self.get_env())
# successful return here == success - any errors would have caused an error in the sub-call
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
@require_torch_multi_gpu
def UpperCAmelCase__ ( self) -> Union[str, Any]:
UpperCamelCase = F'--nproc_per_node={torch.cuda.device_count()}\n --master_port={get_torch_dist_unique_port()}\n {self.test_file_dir}/test_trainer_distributed.py\n '.split()
UpperCamelCase = self.get_auto_remove_tmp_dir()
UpperCamelCase = F'--output_dir {output_dir}'.split()
UpperCamelCase = ['''torchrun'''] + distributed_args + args
execute_subprocess_async(lowerCamelCase_ , env=self.get_env())
# successful return here == success - any errors would have caused an error in the sub-call
if __name__ == "__main__":
# The script below is meant to be run under torch.distributed, on a machine with multiple GPUs:
#
# PYTHONPATH="src" python -m torch.distributed.run --nproc_per_node 2 --output_dir output_dir ./tests/test_trainer_distributed.py
SCREAMING_SNAKE_CASE_ = HfArgumentParser((TrainingArguments,))
SCREAMING_SNAKE_CASE_ = parser.parse_args_into_dataclasses()[0]
logger.warning(
f'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, '
f'distributed training: {training_args.parallel_mode != ParallelMode.NOT_DISTRIBUTED}'
)
# Essentially, what we want to verify in the distributed case is that we get all samples back,
# in the right order. (this is crucial for prediction for instance)
for dataset_length in [101, 40, 7]:
SCREAMING_SNAKE_CASE_ = DummyDataset(dataset_length)
def __snake_case ( _lowercase ):
"""simple docstring"""
UpperCamelCase = list(range(len(_lowercase ) ) )
UpperCamelCase = p.predictions.tolist() == sequential and p.label_ids.tolist() == sequential
if not success and training_args.local_rank == 0:
logger.warning(
'''Predictions and/or labels do not match expected results:\n - predictions: '''
f'{p.predictions.tolist()}\n - labels: {p.label_ids.tolist()}\n - expected: {sequential}' )
return {"success": success}
SCREAMING_SNAKE_CASE_ = Trainer(
model=DummyModel(),
args=training_args,
data_collator=DummyDataCollator(),
eval_dataset=dataset,
compute_metrics=compute_metrics,
)
SCREAMING_SNAKE_CASE_ = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
SCREAMING_SNAKE_CASE_ = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
SCREAMING_SNAKE_CASE_ = 2
SCREAMING_SNAKE_CASE_ = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
SCREAMING_SNAKE_CASE_ = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
SCREAMING_SNAKE_CASE_ = None | 34 | 1 |
"""simple docstring"""
import os
import pickle
import unittest
from transformers import AutoTokenizer
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.models.bert_japanese.tokenization_bert_japanese import (
VOCAB_FILES_NAMES,
BertJapaneseTokenizer,
CharacterTokenizer,
JumanppTokenizer,
MecabTokenizer,
SudachiTokenizer,
WordpieceTokenizer,
)
from transformers.testing_utils import custom_tokenizers, require_jumanpp, require_sudachi
from ...test_tokenization_common import TokenizerTesterMixin
@custom_tokenizers
class lowerCamelCase (_SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
a = BertJapaneseTokenizer
a = False
a = True
def lowerCAmelCase_ ( self : Any ) -> Optional[Any]:
super().setUp()
SCREAMING_SNAKE_CASE__ = [
"[UNK]",
"[CLS]",
"[SEP]",
"こんにちは",
"こん",
"にちは",
"ばんは",
"##こん",
"##にちは",
"##ばんは",
"世界",
"##世界",
"、",
"##、",
"。",
"##。",
]
SCREAMING_SNAKE_CASE__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def lowerCAmelCase_ ( self : Optional[Any] , _snake_case : int ) -> Tuple:
SCREAMING_SNAKE_CASE__ = "こんにちは、世界。 \nこんばんは、世界。"
SCREAMING_SNAKE_CASE__ = "こんにちは 、 世界 。 こんばんは 、 世界 。"
return input_text, output_text
def lowerCAmelCase_ ( self : Tuple , _snake_case : int ) -> List[Any]:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.get_input_output_texts(_snake_case )
SCREAMING_SNAKE_CASE__ = tokenizer.encode(_snake_case , add_special_tokens=_snake_case )
SCREAMING_SNAKE_CASE__ = tokenizer.decode(_snake_case , clean_up_tokenization_spaces=_snake_case )
return text, ids
def lowerCAmelCase_ ( self : List[Any] ) -> int:
pass # TODO add if relevant
def lowerCAmelCase_ ( self : Tuple ) -> str:
pass # TODO add if relevant
def lowerCAmelCase_ ( self : Tuple ) -> List[str]:
pass # TODO add if relevant
def lowerCAmelCase_ ( self : Dict ) -> List[Any]:
SCREAMING_SNAKE_CASE__ = self.tokenizer_class(self.vocab_file )
SCREAMING_SNAKE_CASE__ = tokenizer.tokenize("こんにちは、世界。\nこんばんは、世界。" )
self.assertListEqual(_snake_case , ["こんにちは", "、", "世界", "。", "こん", "##ばんは", "、", "世界", "。"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_snake_case ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
def lowerCAmelCase_ ( self : int ) -> List[Any]:
SCREAMING_SNAKE_CASE__ = self.tokenizer_class(self.vocab_file , word_tokenizer_type="mecab" )
self.assertIsNotNone(_snake_case )
SCREAMING_SNAKE_CASE__ = "こんにちは、世界。\nこんばんは、世界。"
SCREAMING_SNAKE_CASE__ = tokenizer.tokenize(_snake_case )
self.assertListEqual(_snake_case , ["こんにちは", "、", "世界", "。", "こん", "##ばんは", "、", "世界", "。"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_snake_case ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
SCREAMING_SNAKE_CASE__ = os.path.join(self.tmpdirname , "tokenizer.bin" )
with open(_snake_case , "wb" ) as handle:
pickle.dump(_snake_case , _snake_case )
with open(_snake_case , "rb" ) as handle:
SCREAMING_SNAKE_CASE__ = pickle.load(_snake_case )
SCREAMING_SNAKE_CASE__ = tokenizer_new.tokenize(_snake_case )
self.assertListEqual(_snake_case , _snake_case )
def lowerCAmelCase_ ( self : Optional[Any] ) -> List[Any]:
SCREAMING_SNAKE_CASE__ = MecabTokenizer(mecab_dic="ipadic" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップルストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", "。"] , )
def lowerCAmelCase_ ( self : Optional[Any] ) -> Optional[Any]:
try:
SCREAMING_SNAKE_CASE__ = MecabTokenizer(mecab_dic="unidic_lite" )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップル", "ストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", "。"] , )
def lowerCAmelCase_ ( self : Optional[Any] ) -> Any:
try:
SCREAMING_SNAKE_CASE__ = MecabTokenizer(mecab_dic="unidic" )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップル", "ストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", "。"] , )
def lowerCAmelCase_ ( self : List[Any] ) -> str:
SCREAMING_SNAKE_CASE__ = MecabTokenizer(do_lower_case=_snake_case , mecab_dic="ipadic" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップルストア", "で", "iphone", "8", "が", "発売", "さ", "れ", "た", "。"] , )
def lowerCAmelCase_ ( self : Dict ) -> str:
try:
SCREAMING_SNAKE_CASE__ = MecabTokenizer(
do_lower_case=_snake_case , normalize_text=_snake_case , mecab_option="-d /usr/local/lib/mecab/dic/jumandic" )
except RuntimeError:
# if dict doesn't exist in the system, previous code raises this error.
return
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップルストア", "で", "iPhone", "8", "が", "発売", "さ", "れた", "\u3000", "。"] , )
def lowerCAmelCase_ ( self : Optional[int] ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = MecabTokenizer(normalize_text=_snake_case , mecab_dic="ipadic" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップルストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", " ", "。"] , )
@require_sudachi
def lowerCAmelCase_ ( self : Any ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ = self.tokenizer_class(self.vocab_file , word_tokenizer_type="sudachi" )
self.assertIsNotNone(_snake_case )
SCREAMING_SNAKE_CASE__ = "こんにちは、世界。\nこんばんは、世界。"
SCREAMING_SNAKE_CASE__ = tokenizer.tokenize(_snake_case )
self.assertListEqual(_snake_case , ["こんにちは", "、", "世界", "。", "こん", "##ばんは", "、", "世界", "。"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_snake_case ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
SCREAMING_SNAKE_CASE__ = os.path.join(self.tmpdirname , "tokenizer.bin" )
with open(_snake_case , "wb" ) as handle:
pickle.dump(_snake_case , _snake_case )
with open(_snake_case , "rb" ) as handle:
SCREAMING_SNAKE_CASE__ = pickle.load(_snake_case )
SCREAMING_SNAKE_CASE__ = tokenizer_new.tokenize(_snake_case )
self.assertListEqual(_snake_case , _snake_case )
@require_sudachi
def lowerCAmelCase_ ( self : str ) -> str:
SCREAMING_SNAKE_CASE__ = SudachiTokenizer(sudachi_dict_type="core" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , [" ", "\t", "アップル", "ストア", "で", "iPhone", "8", " ", "が", " ", " ", "\n ", "発売", "さ", "れ", "た", " ", "。", " ", " "] , )
@require_sudachi
def lowerCAmelCase_ ( self : str ) -> Dict:
SCREAMING_SNAKE_CASE__ = SudachiTokenizer(sudachi_dict_type="core" , sudachi_split_mode="A" )
self.assertListEqual(tokenizer.tokenize("外国人参政権" ) , ["外国", "人", "参政", "権"] )
@require_sudachi
def lowerCAmelCase_ ( self : str ) -> Tuple:
SCREAMING_SNAKE_CASE__ = SudachiTokenizer(sudachi_dict_type="core" , sudachi_split_mode="B" )
self.assertListEqual(tokenizer.tokenize("外国人参政権" ) , ["外国人", "参政権"] )
@require_sudachi
def lowerCAmelCase_ ( self : Optional[Any] ) -> List[Any]:
SCREAMING_SNAKE_CASE__ = SudachiTokenizer(sudachi_dict_type="core" , sudachi_split_mode="C" )
self.assertListEqual(tokenizer.tokenize("外国人参政権" ) , ["外国人参政権"] )
@require_sudachi
def lowerCAmelCase_ ( self : List[str] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ = SudachiTokenizer(do_lower_case=_snake_case , sudachi_dict_type="core" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , [" ", "\t", "アップル", "ストア", "で", "iphone", "8", " ", "が", " ", " ", "\n ", "発売", "さ", "れ", "た", " ", "。", " ", " "] , )
@require_sudachi
def lowerCAmelCase_ ( self : int ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ = SudachiTokenizer(normalize_text=_snake_case , sudachi_dict_type="core" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , [" ", "\t", "アップル", "ストア", "で", "iPhone", "8", " ", "が", " ", " ", "\n ", "発売", "さ", "れ", "た", "\u3000", "。", " ", " "] , )
@require_sudachi
def lowerCAmelCase_ ( self : Dict ) -> int:
SCREAMING_SNAKE_CASE__ = SudachiTokenizer(trim_whitespace=_snake_case , sudachi_dict_type="core" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップル", "ストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", "。"] , )
@require_jumanpp
def lowerCAmelCase_ ( self : List[Any] ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = self.tokenizer_class(self.vocab_file , word_tokenizer_type="jumanpp" )
self.assertIsNotNone(_snake_case )
SCREAMING_SNAKE_CASE__ = "こんにちは、世界。\nこんばんは、世界。"
SCREAMING_SNAKE_CASE__ = tokenizer.tokenize(_snake_case )
self.assertListEqual(_snake_case , ["こんにちは", "、", "世界", "。", "こん", "##ばんは", "、", "世界", "。"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_snake_case ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
SCREAMING_SNAKE_CASE__ = os.path.join(self.tmpdirname , "tokenizer.bin" )
with open(_snake_case , "wb" ) as handle:
pickle.dump(_snake_case , _snake_case )
with open(_snake_case , "rb" ) as handle:
SCREAMING_SNAKE_CASE__ = pickle.load(_snake_case )
SCREAMING_SNAKE_CASE__ = tokenizer_new.tokenize(_snake_case )
self.assertListEqual(_snake_case , _snake_case )
@require_jumanpp
def lowerCAmelCase_ ( self : Optional[Any] ) -> Any:
SCREAMING_SNAKE_CASE__ = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップル", "ストア", "で", "iPhone", "8", "\u3000", "が", "\u3000", "\u3000", "\u3000", "発売", "さ", "れた", "\u3000", "。"] , )
@require_jumanpp
def lowerCAmelCase_ ( self : Union[str, Any] ) -> str:
SCREAMING_SNAKE_CASE__ = JumanppTokenizer(do_lower_case=_snake_case )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップル", "ストア", "で", "iphone", "8", "\u3000", "が", "\u3000", "\u3000", "\u3000", "発売", "さ", "れた", "\u3000", "。"] , )
@require_jumanpp
def lowerCAmelCase_ ( self : int ) -> List[Any]:
SCREAMING_SNAKE_CASE__ = JumanppTokenizer(normalize_text=_snake_case )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["ア", "ッ", "フ", "゚", "ル", "ストア", "で", "iPhone", "8", "\u3000", "が", "\u3000", "\u3000", "\u3000", "発売", "さ", "れた", "\u3000", "。"] , )
@require_jumanpp
def lowerCAmelCase_ ( self : Any ) -> Dict:
SCREAMING_SNAKE_CASE__ = JumanppTokenizer(trim_whitespace=_snake_case )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップル", "ストア", "で", "iPhone", "8", "が", "発売", "さ", "れた", "。"] , )
@require_jumanpp
def lowerCAmelCase_ ( self : Optional[int] ) -> Any:
SCREAMING_SNAKE_CASE__ = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize("ありがとうございますm(_ _)m見つけるのが大変です。" ) , ["ありがとう", "ございます", "m(_ _)m", "見つける", "の", "が", "大変です", "。"] , )
def lowerCAmelCase_ ( self : Any ) -> Dict:
SCREAMING_SNAKE_CASE__ = ["[UNK]", "[CLS]", "[SEP]", "こんにちは", "こん", "にちは", "ばんは", "##こん", "##にちは", "##ばんは"]
SCREAMING_SNAKE_CASE__ = {}
for i, token in enumerate(_snake_case ):
SCREAMING_SNAKE_CASE__ = i
SCREAMING_SNAKE_CASE__ = WordpieceTokenizer(vocab=_snake_case , unk_token="[UNK]" )
self.assertListEqual(tokenizer.tokenize("" ) , [] )
self.assertListEqual(tokenizer.tokenize("こんにちは" ) , ["こんにちは"] )
self.assertListEqual(tokenizer.tokenize("こんばんは" ) , ["こん", "##ばんは"] )
self.assertListEqual(tokenizer.tokenize("こんばんは こんばんにちは こんにちは" ) , ["こん", "##ばんは", "[UNK]", "こんにちは"] )
def lowerCAmelCase_ ( self : List[Any] ) -> Tuple:
SCREAMING_SNAKE_CASE__ = BertJapaneseTokenizer.from_pretrained("nlp-waseda/roberta-base-japanese-with-auto-jumanpp" )
SCREAMING_SNAKE_CASE__ = tokenizer.subword_tokenizer
SCREAMING_SNAKE_CASE__ = subword_tokenizer.tokenize("国境 の 長い トンネル を 抜ける と 雪国 であった 。" )
self.assertListEqual(_snake_case , ["▁国境", "▁の", "▁長い", "▁トンネル", "▁を", "▁抜ける", "▁と", "▁雪", "国", "▁であった", "▁。"] )
SCREAMING_SNAKE_CASE__ = subword_tokenizer.tokenize("こんばんは こんばん にち は こんにちは" )
self.assertListEqual(_snake_case , ["▁こん", "ばん", "は", "▁こん", "ばん", "▁に", "ち", "▁は", "▁こんにちは"] )
def lowerCAmelCase_ ( self : Union[str, Any] ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ = self.tokenizer_class.from_pretrained("cl-tohoku/bert-base-japanese" )
SCREAMING_SNAKE_CASE__ = tokenizer.encode("ありがとう。" , add_special_tokens=_snake_case )
SCREAMING_SNAKE_CASE__ = tokenizer.encode("どういたしまして。" , add_special_tokens=_snake_case )
SCREAMING_SNAKE_CASE__ = tokenizer.build_inputs_with_special_tokens(_snake_case )
SCREAMING_SNAKE_CASE__ = tokenizer.build_inputs_with_special_tokens(_snake_case , _snake_case )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class lowerCamelCase (_SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
a = BertJapaneseTokenizer
a = False
def lowerCAmelCase_ ( self : Optional[Any] ) -> int:
super().setUp()
SCREAMING_SNAKE_CASE__ = ["[UNK]", "[CLS]", "[SEP]", "こ", "ん", "に", "ち", "は", "ば", "世", "界", "、", "。"]
SCREAMING_SNAKE_CASE__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def lowerCAmelCase_ ( self : Union[str, Any] , **_snake_case : str ) -> Optional[int]:
return BertJapaneseTokenizer.from_pretrained(self.tmpdirname , subword_tokenizer_type="character" , **_snake_case )
def lowerCAmelCase_ ( self : str , _snake_case : List[Any] ) -> List[str]:
SCREAMING_SNAKE_CASE__ = "こんにちは、世界。 \nこんばんは、世界。"
SCREAMING_SNAKE_CASE__ = "こ ん に ち は 、 世 界 。 こ ん ば ん は 、 世 界 。"
return input_text, output_text
def lowerCAmelCase_ ( self : str ) -> Any:
pass # TODO add if relevant
def lowerCAmelCase_ ( self : Optional[int] ) -> Tuple:
pass # TODO add if relevant
def lowerCAmelCase_ ( self : Dict ) -> List[str]:
pass # TODO add if relevant
def lowerCAmelCase_ ( self : Any ) -> List[Any]:
SCREAMING_SNAKE_CASE__ = self.tokenizer_class(self.vocab_file , subword_tokenizer_type="character" )
SCREAMING_SNAKE_CASE__ = tokenizer.tokenize("こんにちは、世界。 \nこんばんは、世界。" )
self.assertListEqual(
_snake_case , ["こ", "ん", "に", "ち", "は", "、", "世", "界", "。", "こ", "ん", "ば", "ん", "は", "、", "世", "界", "。"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_snake_case ) , [3, 4, 5, 6, 7, 11, 9, 10, 12, 3, 4, 8, 4, 7, 11, 9, 10, 12] )
def lowerCAmelCase_ ( self : str ) -> List[str]:
SCREAMING_SNAKE_CASE__ = ["[UNK]", "[CLS]", "[SEP]", "こ", "ん", "に", "ち", "は", "ば", "世", "界", "、", "。"]
SCREAMING_SNAKE_CASE__ = {}
for i, token in enumerate(_snake_case ):
SCREAMING_SNAKE_CASE__ = i
SCREAMING_SNAKE_CASE__ = CharacterTokenizer(vocab=_snake_case , unk_token="[UNK]" )
self.assertListEqual(tokenizer.tokenize("" ) , [] )
self.assertListEqual(tokenizer.tokenize("こんにちは" ) , ["こ", "ん", "に", "ち", "は"] )
self.assertListEqual(tokenizer.tokenize("こんにちほ" ) , ["こ", "ん", "に", "ち", "[UNK]"] )
def lowerCAmelCase_ ( self : Union[str, Any] ) -> int:
SCREAMING_SNAKE_CASE__ = self.tokenizer_class.from_pretrained("cl-tohoku/bert-base-japanese-char" )
SCREAMING_SNAKE_CASE__ = tokenizer.encode("ありがとう。" , add_special_tokens=_snake_case )
SCREAMING_SNAKE_CASE__ = tokenizer.encode("どういたしまして。" , add_special_tokens=_snake_case )
SCREAMING_SNAKE_CASE__ = tokenizer.build_inputs_with_special_tokens(_snake_case )
SCREAMING_SNAKE_CASE__ = tokenizer.build_inputs_with_special_tokens(_snake_case , _snake_case )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class lowerCamelCase (unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ ( self : Any ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ = "cl-tohoku/bert-base-japanese"
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(_snake_case )
self.assertIsInstance(_snake_case , _snake_case )
class lowerCamelCase (unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ ( self : str ) -> Tuple:
SCREAMING_SNAKE_CASE__ = "cl-tohoku/bert-base-japanese"
with self.assertLogs("transformers" , level="WARNING" ) as cm:
BertTokenizer.from_pretrained(_snake_case )
self.assertTrue(
cm.records[0].message.startswith(
"The tokenizer class you load from this checkpoint is not the same type as the class this function"
" is called from." ) )
SCREAMING_SNAKE_CASE__ = "bert-base-cased"
with self.assertLogs("transformers" , level="WARNING" ) as cm:
BertJapaneseTokenizer.from_pretrained(_snake_case )
self.assertTrue(
cm.records[0].message.startswith(
"The tokenizer class you load from this checkpoint is not the same type as the class this function"
" is called from." ) )
| 705 | """simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
_A = {
'configuration_vision_encoder_decoder': ['VisionEncoderDecoderConfig', 'VisionEncoderDecoderOnnxConfig']
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = ['VisionEncoderDecoderModel']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = ['TFVisionEncoderDecoderModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = ['FlaxVisionEncoderDecoderModel']
if TYPE_CHECKING:
from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel
else:
import sys
_A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 538 | 0 |
from __future__ import annotations
lowerCAmelCase__ : Union[str, Any] =[
[-1, 0], # left
[0, -1], # down
[1, 0], # right
[0, 1], # up
]
def a__ ( A__, A__, A__, A__, A__, ):
SCREAMING_SNAKE_CASE_ : List[Any] = [
[0 for col in range(len(grid[0] ) )] for row in range(len(A__ ) )
] # the reference grid
SCREAMING_SNAKE_CASE_ : List[Any] = 1
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [
[0 for col in range(len(grid[0] ) )] for row in range(len(A__ ) )
] # the action grid
SCREAMING_SNAKE_CASE_ : Optional[int] = init[0]
SCREAMING_SNAKE_CASE_ : List[str] = init[1]
SCREAMING_SNAKE_CASE_ : Dict = 0
SCREAMING_SNAKE_CASE_ : List[Any] = g + heuristic[x][y] # cost from starting cell to destination cell
SCREAMING_SNAKE_CASE_ : List[str] = [[f, g, x, y]]
SCREAMING_SNAKE_CASE_ : Dict = False # flag that is set when search is complete
SCREAMING_SNAKE_CASE_ : Tuple = False # flag set if we can't find expand
while not found and not resign:
if len(A__ ) == 0:
raise ValueError('Algorithm is unable to find solution' )
else: # to choose the least costliest action so as to move closer to the goal
cell.sort()
cell.reverse()
SCREAMING_SNAKE_CASE_ : Any = cell.pop()
SCREAMING_SNAKE_CASE_ : Any = next_cell[2]
SCREAMING_SNAKE_CASE_ : str = next_cell[3]
SCREAMING_SNAKE_CASE_ : Tuple = next_cell[1]
if x == goal[0] and y == goal[1]:
SCREAMING_SNAKE_CASE_ : Dict = True
else:
for i in range(len(A__ ) ): # to try out different valid actions
SCREAMING_SNAKE_CASE_ : Optional[int] = x + DIRECTIONS[i][0]
SCREAMING_SNAKE_CASE_ : List[Any] = y + DIRECTIONS[i][1]
if xa >= 0 and xa < len(A__ ) and ya >= 0 and ya < len(grid[0] ):
if closed[xa][ya] == 0 and grid[xa][ya] == 0:
SCREAMING_SNAKE_CASE_ : List[Any] = g + cost
SCREAMING_SNAKE_CASE_ : List[Any] = ga + heuristic[xa][ya]
cell.append([fa, ga, xa, ya] )
SCREAMING_SNAKE_CASE_ : int = 1
SCREAMING_SNAKE_CASE_ : int = i
SCREAMING_SNAKE_CASE_ : Union[str, Any] = []
SCREAMING_SNAKE_CASE_ : List[str] = goal[0]
SCREAMING_SNAKE_CASE_ : Optional[Any] = goal[1]
invpath.append([x, y] ) # we get the reverse path from here
while x != init[0] or y != init[1]:
SCREAMING_SNAKE_CASE_ : Any = x - DIRECTIONS[action[x][y]][0]
SCREAMING_SNAKE_CASE_ : int = y - DIRECTIONS[action[x][y]][1]
SCREAMING_SNAKE_CASE_ : int = xa
SCREAMING_SNAKE_CASE_ : List[Any] = ya
invpath.append([x, y] )
SCREAMING_SNAKE_CASE_ : List[Any] = []
for i in range(len(A__ ) ):
path.append(invpath[len(A__ ) - 1 - i] )
return path, action
if __name__ == "__main__":
lowerCAmelCase__ : Union[str, Any] =[
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
]
lowerCAmelCase__ : Any =[0, 0]
# all coordinates are given in format [y,x]
lowerCAmelCase__ : List[str] =[len(grid) - 1, len(grid[0]) - 1]
lowerCAmelCase__ : str =1
# the cost map which pushes the path closer to the goal
lowerCAmelCase__ : Union[str, Any] =[[0 for row in range(len(grid[0]))] for col in range(len(grid))]
for i in range(len(grid)):
for j in range(len(grid[0])):
lowerCAmelCase__ : List[str] =abs(i - goal[0]) + abs(j - goal[1])
if grid[i][j] == 1:
# added extra penalty in the heuristic map
lowerCAmelCase__ : List[str] =99
lowerCAmelCase__ ,lowerCAmelCase__ : Any =search(grid, init, goal, cost, heuristic)
print('ACTION MAP')
for i in range(len(action)):
print(action[i])
for i in range(len(path)):
print(path[i])
| 101 |
def lowerCamelCase__ ( snake_case_ : Any ) -> List[Any]:
return [
{
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
},
{
0: [6],
1: [9],
2: [4, 5],
3: [4],
4: [2, 3],
5: [2],
6: [0, 7],
7: [6],
8: [],
9: [1],
},
{
0: [4],
1: [6],
2: [],
3: [5, 6, 7],
4: [0, 6],
5: [3, 8, 9],
6: [1, 3, 4, 7],
7: [3, 6, 8, 9],
8: [5, 7],
9: [5, 7],
},
{
0: [1, 3],
1: [0, 2, 4],
2: [1, 3, 4],
3: [0, 2, 4],
4: [1, 2, 3],
},
][index]
def lowerCamelCase__ ( snake_case_ : dict[int, list[int]] ) -> list[tuple[int, int]]:
__snake_case = 0
__snake_case = len(snake_case_ ) # No of vertices in graph
__snake_case = [0] * n
__snake_case = [False] * n
def dfs(snake_case_ : Dict , snake_case_ : Optional[int] , snake_case_ : List[str] , snake_case_ : List[Any] ):
__snake_case = True
__snake_case = id_
id_ += 1
for to in graph[at]:
if to == parent:
pass
elif not visited[to]:
dfs(snake_case_ , snake_case_ , snake_case_ , id_ )
__snake_case = min(low[at] , low[to] )
if id_ <= low[to]:
bridges.append((at, to) if at < to else (to, at) )
else:
# This edge is a back edge and cannot be a bridge
__snake_case = min(low[at] , low[to] )
__snake_case = []
for i in range(snake_case_ ):
if not visited[i]:
dfs(snake_case_ , -1 , snake_case_ , id_ )
return bridges
if __name__ == "__main__":
import doctest
doctest.testmod()
| 592 | 0 |
import importlib.metadata
import warnings
from copy import deepcopy
from packaging import version
from ..utils import logging
from .import_utils import is_accelerate_available, is_bitsandbytes_available
if is_bitsandbytes_available():
import bitsandbytes as bnb
import torch
import torch.nn as nn
from ..pytorch_utils import ConvaD
if is_accelerate_available():
from accelerate import init_empty_weights
from accelerate.utils import find_tied_parameters
__lowercase :List[str] = logging.get_logger(__name__)
def UpperCAmelCase ( _lowerCamelCase : str , _lowerCamelCase : Dict , _lowerCamelCase : str , _lowerCamelCase : Optional[int]=None , _lowerCamelCase : Optional[int]=None ):
'''simple docstring'''
if "." in tensor_name:
SCREAMING_SNAKE_CASE__ : Optional[int] = tensor_name.split("." )
for split in splits[:-1]:
SCREAMING_SNAKE_CASE__ : List[Any] = getattr(_lowerCamelCase , _lowerCamelCase )
if new_module is None:
raise ValueError(f"""{module} has no attribute {split}.""" )
SCREAMING_SNAKE_CASE__ : Any = new_module
SCREAMING_SNAKE_CASE__ : int = splits[-1]
if tensor_name not in module._parameters and tensor_name not in module._buffers:
raise ValueError(f"""{module} does not have a parameter or a buffer named {tensor_name}.""" )
SCREAMING_SNAKE_CASE__ : int = tensor_name in module._buffers
SCREAMING_SNAKE_CASE__ : Tuple = getattr(_lowerCamelCase , _lowerCamelCase )
if old_value.device == torch.device("meta" ) and device not in ["meta", torch.device("meta" )] and value is None:
raise ValueError(f"""{tensor_name} is on the meta device, we need a `value` to put in on {device}.""" )
SCREAMING_SNAKE_CASE__ : str = False
SCREAMING_SNAKE_CASE__ : Optional[Any] = False
if is_buffer or not is_bitsandbytes_available():
SCREAMING_SNAKE_CASE__ : List[Any] = False
SCREAMING_SNAKE_CASE__ : int = False
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] = hasattr(bnb.nn , "Params4bit" ) and isinstance(module._parameters[tensor_name] , bnb.nn.Paramsabit )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = isinstance(module._parameters[tensor_name] , bnb.nn.IntaParams )
if is_abit or is_abit:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = module._parameters[tensor_name]
if param.device.type != "cuda":
if value is None:
SCREAMING_SNAKE_CASE__ : Dict = old_value.to(_lowerCamelCase )
elif isinstance(_lowerCamelCase , torch.Tensor ):
SCREAMING_SNAKE_CASE__ : int = value.to("cpu" )
if value.dtype == torch.inta:
SCREAMING_SNAKE_CASE__ : List[Any] = version.parse(importlib.metadata.version("bitsandbytes" ) ) > version.parse(
"0.37.2" )
if not is_abit_serializable:
raise ValueError(
"Detected int8 weights but the version of bitsandbytes is not compatible with int8 serialization. "
"Make sure to download the latest `bitsandbytes` version. `pip install --upgrade bitsandbytes`." )
else:
SCREAMING_SNAKE_CASE__ : Any = torch.tensor(_lowerCamelCase , device="cpu" )
# Support models using `Conv1D` in place of `nn.Linear` (e.g. gpt2) by transposing the weight matrix prior to quantization.
# Since weights are saved in the correct "orientation", we skip transposing when loading.
if issubclass(module.source_cls , _lowerCamelCase ) and fpaa_statistics is None:
SCREAMING_SNAKE_CASE__ : Dict = new_value.T
SCREAMING_SNAKE_CASE__ : int = old_value.__dict__
if is_abit:
SCREAMING_SNAKE_CASE__ : Optional[int] = bnb.nn.IntaParams(_lowerCamelCase , requires_grad=_lowerCamelCase , **_lowerCamelCase ).to(_lowerCamelCase )
elif is_abit:
SCREAMING_SNAKE_CASE__ : List[str] = bnb.nn.Paramsabit(_lowerCamelCase , requires_grad=_lowerCamelCase , **_lowerCamelCase ).to(_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : List[str] = new_value
if fpaa_statistics is not None:
setattr(module.weight , "SCB" , fpaa_statistics.to(_lowerCamelCase ) )
else:
if value is None:
SCREAMING_SNAKE_CASE__ : List[str] = old_value.to(_lowerCamelCase )
elif isinstance(_lowerCamelCase , torch.Tensor ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = value.to(_lowerCamelCase )
else:
SCREAMING_SNAKE_CASE__ : Any = torch.tensor(_lowerCamelCase , device=_lowerCamelCase )
if is_buffer:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = new_value
else:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = nn.Parameter(_lowerCamelCase , requires_grad=old_value.requires_grad )
SCREAMING_SNAKE_CASE__ : Dict = new_value
def UpperCAmelCase ( _lowerCamelCase : Dict , _lowerCamelCase : Tuple=None , _lowerCamelCase : Any=None , _lowerCamelCase : Tuple=None , _lowerCamelCase : Union[str, Any]=False ):
'''simple docstring'''
for name, module in model.named_children():
if current_key_name is None:
SCREAMING_SNAKE_CASE__ : Optional[Any] = []
current_key_name.append(_lowerCamelCase )
if (isinstance(_lowerCamelCase , nn.Linear ) or isinstance(_lowerCamelCase , _lowerCamelCase )) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
if not any(key in ".".join(_lowerCamelCase ) for key in modules_to_not_convert ):
with init_empty_weights():
if isinstance(_lowerCamelCase , _lowerCamelCase ):
SCREAMING_SNAKE_CASE__ : List[str] = module.weight.shape
else:
SCREAMING_SNAKE_CASE__ : Any = module.in_features
SCREAMING_SNAKE_CASE__ : int = module.out_features
if quantization_config.quantization_method() == "llm_int8":
SCREAMING_SNAKE_CASE__ : str = bnb.nn.LinearabitLt(
_lowerCamelCase , _lowerCamelCase , module.bias is not None , has_fpaa_weights=quantization_config.llm_inta_has_fpaa_weight , threshold=quantization_config.llm_inta_threshold , )
SCREAMING_SNAKE_CASE__ : Tuple = True
else:
if (
quantization_config.llm_inta_skip_modules is not None
and name in quantization_config.llm_inta_skip_modules
):
pass
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] = bnb.nn.Linearabit(
_lowerCamelCase , _lowerCamelCase , module.bias is not None , quantization_config.bnb_abit_compute_dtype , compress_statistics=quantization_config.bnb_abit_use_double_quant , quant_type=quantization_config.bnb_abit_quant_type , )
SCREAMING_SNAKE_CASE__ : str = True
# Store the module class in case we need to transpose the weight later
SCREAMING_SNAKE_CASE__ : Tuple = type(_lowerCamelCase )
# Force requires grad to False to avoid unexpected errors
model._modules[name].requires_grad_(_lowerCamelCase )
if len(list(module.children() ) ) > 0:
SCREAMING_SNAKE_CASE__ : List[str] = _replace_with_bnb_linear(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , has_been_replaced=_lowerCamelCase , )
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def UpperCAmelCase ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Tuple=None , _lowerCamelCase : int=None , _lowerCamelCase : str=None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = ["lm_head"] if modules_to_not_convert is None else modules_to_not_convert
SCREAMING_SNAKE_CASE__ : Any = _replace_with_bnb_linear(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
if not has_been_replaced:
logger.warning(
"You are loading your model in 8bit or 4bit but no linear modules were found in your model."
" Please double check your model architecture, or submit an issue on github if you think this is"
" a bug." )
return model
def UpperCAmelCase ( *_lowerCamelCase : Dict , **_lowerCamelCase : List[Any] ):
'''simple docstring'''
warnings.warn(
"`replace_8bit_linear` will be deprecated in a future version, please use `replace_with_bnb_linear` instead" , _lowerCamelCase , )
return replace_with_bnb_linear(*_lowerCamelCase , **_lowerCamelCase )
def UpperCAmelCase ( *_lowerCamelCase : Union[str, Any] , **_lowerCamelCase : Optional[int] ):
'''simple docstring'''
warnings.warn(
"`set_module_8bit_tensor_to_device` will be deprecated in a future version, please use `set_module_quantized_tensor_to_device` instead" , _lowerCamelCase , )
return set_module_quantized_tensor_to_device(*_lowerCamelCase , **_lowerCamelCase )
def UpperCAmelCase ( _lowerCamelCase : Tuple ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = deepcopy(_lowerCamelCase ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
tied_model.tie_weights()
SCREAMING_SNAKE_CASE__ : List[str] = find_tied_parameters(_lowerCamelCase )
# For compatibility with Accelerate < 0.18
if isinstance(_lowerCamelCase , _lowerCamelCase ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
SCREAMING_SNAKE_CASE__ : Optional[int] = sum(_lowerCamelCase , [] )
SCREAMING_SNAKE_CASE__ : int = len(_lowerCamelCase ) > 0
# Check if it is a base model
SCREAMING_SNAKE_CASE__ : Optional[int] = not hasattr(_lowerCamelCase , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
SCREAMING_SNAKE_CASE__ : Optional[int] = list(model.named_children() )
SCREAMING_SNAKE_CASE__ : int = [list_modules[-1][0]]
# add last module together with tied weights
SCREAMING_SNAKE_CASE__ : int = set(_lowerCamelCase ) - set(_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : Any = list(set(_lowerCamelCase ) ) + list(_lowerCamelCase )
# remove ".weight" from the keys
SCREAMING_SNAKE_CASE__ : Dict = [".weight", ".bias"]
SCREAMING_SNAKE_CASE__ : str = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
SCREAMING_SNAKE_CASE__ : Any = name.replace(_lowerCamelCase , "" )
filtered_module_names.append(_lowerCamelCase )
return filtered_module_names | 714 |
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def UpperCAmelCase ( _lowerCamelCase : str , _lowerCamelCase : str , _lowerCamelCase : str , _lowerCamelCase : PreTrainedTokenizer , _lowerCamelCase : int , _lowerCamelCase : Optional[int] = None , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = {}
if train_file is not None:
SCREAMING_SNAKE_CASE__ : Optional[Any] = [train_file]
if eval_file is not None:
SCREAMING_SNAKE_CASE__ : int = [eval_file]
if test_file is not None:
SCREAMING_SNAKE_CASE__ : int = [test_file]
SCREAMING_SNAKE_CASE__ : Optional[int] = datasets.load_dataset("csv" , data_files=_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : List[str] = list(ds[list(files.keys() )[0]].features.keys() )
SCREAMING_SNAKE_CASE__ : int = features_name.pop(_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = list(set(ds[list(files.keys() )[0]][label_name] ) )
SCREAMING_SNAKE_CASE__ : List[str] = {label: i for i, label in enumerate(_lowerCamelCase )}
SCREAMING_SNAKE_CASE__ : Optional[int] = tokenizer.model_input_names
SCREAMING_SNAKE_CASE__ : Any = {}
if len(_lowerCamelCase ) == 1:
for k in files.keys():
SCREAMING_SNAKE_CASE__ : List[Any] = ds[k].map(
lambda _lowerCamelCase : tokenizer.batch_encode_plus(
example[features_name[0]] , truncation=_lowerCamelCase , max_length=_lowerCamelCase , padding="max_length" ) , batched=_lowerCamelCase , )
elif len(_lowerCamelCase ) == 2:
for k in files.keys():
SCREAMING_SNAKE_CASE__ : Any = ds[k].map(
lambda _lowerCamelCase : tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]) , truncation=_lowerCamelCase , max_length=_lowerCamelCase , padding="max_length" , ) , batched=_lowerCamelCase , )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
SCREAMING_SNAKE_CASE__ : Tuple = {k: v for k, v in ex.items() if k in input_names}
SCREAMING_SNAKE_CASE__ : List[Any] = labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
SCREAMING_SNAKE_CASE__ : int = {k: v for k, v in ex.items() if k in input_names}
SCREAMING_SNAKE_CASE__ : Optional[int] = labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
SCREAMING_SNAKE_CASE__ : int = {k: v for k, v in ex.items() if k in input_names}
SCREAMING_SNAKE_CASE__ : Optional[Any] = labelaid[ex[label_name]]
yield (d, label)
SCREAMING_SNAKE_CASE__ : Tuple = (
tf.data.Dataset.from_generator(
_lowerCamelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
SCREAMING_SNAKE_CASE__ : Any = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) )
SCREAMING_SNAKE_CASE__ : Optional[Any] = (
tf.data.Dataset.from_generator(
_lowerCamelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) )
SCREAMING_SNAKE_CASE__ : Dict = (
tf.data.Dataset.from_generator(
_lowerCamelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
SCREAMING_SNAKE_CASE__ : Dict = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) )
return train_ds, val_ds, test_ds, labelaid
__lowercase :List[Any] = logging.getLogger(__name__)
@dataclass
class _a :
"""simple docstring"""
snake_case_ = field(metadata={"help": "Which column contains the label"} )
snake_case_ = field(default=lowercase__ , metadata={"help": "The path of the training file"} )
snake_case_ = field(default=lowercase__ , metadata={"help": "The path of the development file"} )
snake_case_ = field(default=lowercase__ , metadata={"help": "The path of the test file"} )
snake_case_ = field(
default=1_28 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
snake_case_ = field(
default=lowercase__ , metadata={"help": "Overwrite the cached training and evaluation sets"} )
@dataclass
class _a :
"""simple docstring"""
snake_case_ = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
snake_case_ = field(
default=lowercase__ , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
snake_case_ = field(
default=lowercase__ , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
snake_case_ = field(default=lowercase__ , metadata={"help": "Set this flag to use fast tokenization."} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
snake_case_ = field(
default=lowercase__ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
def UpperCAmelCase ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : List[Any] = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
" --overwrite_output_dir to overcome." )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO , )
logger.info(
f"""n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, """
f"""16-bits training: {training_args.fpaa}""" )
logger.info(f"""Training/evaluation parameters {training_args}""" )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
SCREAMING_SNAKE_CASE__ : Any = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Union[str, Any] = get_tfds(
train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=_lowerCamelCase , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , )
SCREAMING_SNAKE_CASE__ : str = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(_lowerCamelCase ) , labelaid=_lowerCamelCase , idalabel={id: label for label, id in labelaid.items()} , finetuning_task="text-classification" , cache_dir=model_args.cache_dir , )
with training_args.strategy.scope():
SCREAMING_SNAKE_CASE__ : Optional[Any] = TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_pt=bool(".bin" in model_args.model_name_or_path ) , config=_lowerCamelCase , cache_dir=model_args.cache_dir , )
def compute_metrics(_lowerCamelCase : EvalPrediction ) -> Dict:
SCREAMING_SNAKE_CASE__ : Dict = np.argmax(p.predictions , axis=1 )
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
SCREAMING_SNAKE_CASE__ : str = TFTrainer(
model=_lowerCamelCase , args=_lowerCamelCase , train_dataset=_lowerCamelCase , eval_dataset=_lowerCamelCase , compute_metrics=_lowerCamelCase , )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
SCREAMING_SNAKE_CASE__ : Dict = {}
if training_args.do_eval:
logger.info("*** Evaluate ***" )
SCREAMING_SNAKE_CASE__ : str = trainer.evaluate()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = os.path.join(training_args.output_dir , "eval_results.txt" )
with open(_lowerCamelCase , "w" ) as writer:
logger.info("***** Eval results *****" )
for key, value in result.items():
logger.info(f""" {key} = {value}""" )
writer.write(f"""{key} = {value}\n""" )
results.update(_lowerCamelCase )
return results
if __name__ == "__main__":
main() | 26 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase : int = logging.get_logger(__name__)
__lowerCAmelCase : Union[str, Any] = {
'''funnel-transformer/small''': '''https://huggingface.co/funnel-transformer/small/resolve/main/config.json''',
'''funnel-transformer/small-base''': '''https://huggingface.co/funnel-transformer/small-base/resolve/main/config.json''',
'''funnel-transformer/medium''': '''https://huggingface.co/funnel-transformer/medium/resolve/main/config.json''',
'''funnel-transformer/medium-base''': '''https://huggingface.co/funnel-transformer/medium-base/resolve/main/config.json''',
'''funnel-transformer/intermediate''': (
'''https://huggingface.co/funnel-transformer/intermediate/resolve/main/config.json'''
),
'''funnel-transformer/intermediate-base''': (
'''https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/config.json'''
),
'''funnel-transformer/large''': '''https://huggingface.co/funnel-transformer/large/resolve/main/config.json''',
'''funnel-transformer/large-base''': '''https://huggingface.co/funnel-transformer/large-base/resolve/main/config.json''',
'''funnel-transformer/xlarge''': '''https://huggingface.co/funnel-transformer/xlarge/resolve/main/config.json''',
'''funnel-transformer/xlarge-base''': '''https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/config.json''',
}
class _lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
_lowerCamelCase = '''funnel'''
_lowerCamelCase = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''n_head''',
}
def __init__( self , _lowercase=3_0_5_2_2 , _lowercase=[4, 4, 4] , _lowercase=None , _lowercase=2 , _lowercase=7_6_8 , _lowercase=1_2 , _lowercase=6_4 , _lowercase=3_0_7_2 , _lowercase="gelu_new" , _lowercase=0.1 , _lowercase=0.1 , _lowercase=0.0 , _lowercase=0.1 , _lowercase=None , _lowercase=1E-9 , _lowercase="mean" , _lowercase="relative_shift" , _lowercase=True , _lowercase=True , _lowercase=True , **_lowercase , ) -> List[Any]:
'''simple docstring'''
snake_case_ : List[str] = vocab_size
snake_case_ : Dict = block_sizes
snake_case_ : Optional[int] = [1] * len(__UpperCAmelCase ) if block_repeats is None else block_repeats
assert len(__UpperCAmelCase ) == len(
self.block_repeats ), "`block_sizes` and `block_repeats` should have the same length."
snake_case_ : Dict = num_decoder_layers
snake_case_ : Tuple = d_model
snake_case_ : List[Any] = n_head
snake_case_ : Tuple = d_head
snake_case_ : Union[str, Any] = d_inner
snake_case_ : Optional[Any] = hidden_act
snake_case_ : Dict = hidden_dropout
snake_case_ : Tuple = attention_dropout
snake_case_ : List[str] = activation_dropout
snake_case_ : str = initializer_range
snake_case_ : Optional[Any] = initializer_std
snake_case_ : Dict = layer_norm_eps
assert pooling_type in [
"mean",
"max",
], f'Got {pooling_type} for `pooling_type` but only \'mean\' and \'max\' are supported.'
snake_case_ : Optional[int] = pooling_type
assert attention_type in [
"relative_shift",
"factorized",
], f'Got {attention_type} for `attention_type` but only \'relative_shift\' and \'factorized\' are supported.'
snake_case_ : Optional[Any] = attention_type
snake_case_ : List[Any] = separate_cls
snake_case_ : List[Any] = truncate_seq
snake_case_ : List[str] = pool_q_only
super().__init__(**__UpperCAmelCase )
@property
def UpperCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
return sum(self.block_sizes )
@num_hidden_layers.setter
def UpperCAmelCase__ ( self , _lowercase ) -> Union[str, Any]:
'''simple docstring'''
raise NotImplementedError(
"""This model does not support the setting of `num_hidden_layers`. Please set `block_sizes`.""" )
@property
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
return len(self.block_sizes )
@num_blocks.setter
def UpperCAmelCase__ ( self , _lowercase ) -> Tuple:
'''simple docstring'''
raise NotImplementedError("""This model does not support the setting of `num_blocks`. Please set `block_sizes`.""" )
| 58 |
def a_ (__A ) -> Dict:
"""simple docstring"""
if not head:
return True
# split the list to two parts
__a , __a : Any = head.next, head
while fast and fast.next:
__a : Optional[int] = fast.next.next
__a : Optional[int] = slow.next
__a : Optional[int] = slow.next
__a : Tuple = None # Don't forget here! But forget still works!
# reverse the second part
__a : Any = None
while second:
__a : int = second.next
__a : int = node
__a : Union[str, Any] = second
__a : Union[str, Any] = nxt
# compare two parts
# second part has the same or one less node
while node:
if node.val != head.val:
return False
__a : int = node.next
__a : Dict = head.next
return True
def a_ (__A ) -> Union[str, Any]:
"""simple docstring"""
if not head or not head.next:
return True
# 1. Get the midpoint (slow)
__a : Union[str, Any] = head
while fast and fast.next:
__a , __a : List[str] = fast.next.next, slow.next
# 2. Push the second half into the stack
__a : Dict = [slow.val]
while slow.next:
__a : Optional[Any] = slow.next
stack.append(slow.val )
# 3. Comparison
while stack:
if stack.pop() != cur.val:
return False
__a : List[str] = cur.next
return True
def a_ (__A ) -> List[str]:
"""simple docstring"""
if not head or not head.next:
return True
__a : Optional[int] = {}
__a : Optional[int] = 0
while head:
if head.val in d:
d[head.val].append(__A )
else:
__a : Dict = [pos]
__a : Dict = head.next
pos += 1
__a : Dict = pos - 1
__a : Optional[int] = 0
for v in d.values():
if len(__A ) % 2 != 0:
middle += 1
else:
__a : Dict = 0
for i in range(0 , len(__A ) ):
if v[i] + v[len(__A ) - 1 - step] != checksum:
return False
step += 1
if middle > 1:
return False
return True
| 351 | 0 |
'''simple docstring'''
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
UpperCAmelCase : Dict = logging.get_logger(__name__)
UpperCAmelCase : Tuple = OrderedDict(
[
# Base model mapping
('albert', 'FlaxAlbertModel'),
('bart', 'FlaxBartModel'),
('beit', 'FlaxBeitModel'),
('bert', 'FlaxBertModel'),
('big_bird', 'FlaxBigBirdModel'),
('blenderbot', 'FlaxBlenderbotModel'),
('blenderbot-small', 'FlaxBlenderbotSmallModel'),
('clip', 'FlaxCLIPModel'),
('distilbert', 'FlaxDistilBertModel'),
('electra', 'FlaxElectraModel'),
('gpt-sw3', 'FlaxGPT2Model'),
('gpt2', 'FlaxGPT2Model'),
('gpt_neo', 'FlaxGPTNeoModel'),
('gptj', 'FlaxGPTJModel'),
('longt5', 'FlaxLongT5Model'),
('marian', 'FlaxMarianModel'),
('mbart', 'FlaxMBartModel'),
('mt5', 'FlaxMT5Model'),
('opt', 'FlaxOPTModel'),
('pegasus', 'FlaxPegasusModel'),
('regnet', 'FlaxRegNetModel'),
('resnet', 'FlaxResNetModel'),
('roberta', 'FlaxRobertaModel'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormModel'),
('roformer', 'FlaxRoFormerModel'),
('t5', 'FlaxT5Model'),
('vision-text-dual-encoder', 'FlaxVisionTextDualEncoderModel'),
('vit', 'FlaxViTModel'),
('wav2vec2', 'FlaxWav2Vec2Model'),
('whisper', 'FlaxWhisperModel'),
('xglm', 'FlaxXGLMModel'),
('xlm-roberta', 'FlaxXLMRobertaModel'),
]
)
UpperCAmelCase : str = OrderedDict(
[
# Model for pre-training mapping
('albert', 'FlaxAlbertForPreTraining'),
('bart', 'FlaxBartForConditionalGeneration'),
('bert', 'FlaxBertForPreTraining'),
('big_bird', 'FlaxBigBirdForPreTraining'),
('electra', 'FlaxElectraForPreTraining'),
('longt5', 'FlaxLongT5ForConditionalGeneration'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('mt5', 'FlaxMT5ForConditionalGeneration'),
('roberta', 'FlaxRobertaForMaskedLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMaskedLM'),
('roformer', 'FlaxRoFormerForMaskedLM'),
('t5', 'FlaxT5ForConditionalGeneration'),
('wav2vec2', 'FlaxWav2Vec2ForPreTraining'),
('whisper', 'FlaxWhisperForConditionalGeneration'),
('xlm-roberta', 'FlaxXLMRobertaForMaskedLM'),
]
)
UpperCAmelCase : List[Any] = OrderedDict(
[
# Model for Masked LM mapping
('albert', 'FlaxAlbertForMaskedLM'),
('bart', 'FlaxBartForConditionalGeneration'),
('bert', 'FlaxBertForMaskedLM'),
('big_bird', 'FlaxBigBirdForMaskedLM'),
('distilbert', 'FlaxDistilBertForMaskedLM'),
('electra', 'FlaxElectraForMaskedLM'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('roberta', 'FlaxRobertaForMaskedLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMaskedLM'),
('roformer', 'FlaxRoFormerForMaskedLM'),
('xlm-roberta', 'FlaxXLMRobertaForMaskedLM'),
]
)
UpperCAmelCase : Optional[int] = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
('bart', 'FlaxBartForConditionalGeneration'),
('blenderbot', 'FlaxBlenderbotForConditionalGeneration'),
('blenderbot-small', 'FlaxBlenderbotSmallForConditionalGeneration'),
('encoder-decoder', 'FlaxEncoderDecoderModel'),
('longt5', 'FlaxLongT5ForConditionalGeneration'),
('marian', 'FlaxMarianMTModel'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('mt5', 'FlaxMT5ForConditionalGeneration'),
('pegasus', 'FlaxPegasusForConditionalGeneration'),
('t5', 'FlaxT5ForConditionalGeneration'),
]
)
UpperCAmelCase : List[Any] = OrderedDict(
[
# Model for Image-classsification
('beit', 'FlaxBeitForImageClassification'),
('regnet', 'FlaxRegNetForImageClassification'),
('resnet', 'FlaxResNetForImageClassification'),
('vit', 'FlaxViTForImageClassification'),
]
)
UpperCAmelCase : Optional[Any] = OrderedDict(
[
('vision-encoder-decoder', 'FlaxVisionEncoderDecoderModel'),
]
)
UpperCAmelCase : Any = OrderedDict(
[
# Model for Causal LM mapping
('bart', 'FlaxBartForCausalLM'),
('bert', 'FlaxBertForCausalLM'),
('big_bird', 'FlaxBigBirdForCausalLM'),
('electra', 'FlaxElectraForCausalLM'),
('gpt-sw3', 'FlaxGPT2LMHeadModel'),
('gpt2', 'FlaxGPT2LMHeadModel'),
('gpt_neo', 'FlaxGPTNeoForCausalLM'),
('gptj', 'FlaxGPTJForCausalLM'),
('opt', 'FlaxOPTForCausalLM'),
('roberta', 'FlaxRobertaForCausalLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForCausalLM'),
('xglm', 'FlaxXGLMForCausalLM'),
('xlm-roberta', 'FlaxXLMRobertaForCausalLM'),
]
)
UpperCAmelCase : List[str] = OrderedDict(
[
# Model for Sequence Classification mapping
('albert', 'FlaxAlbertForSequenceClassification'),
('bart', 'FlaxBartForSequenceClassification'),
('bert', 'FlaxBertForSequenceClassification'),
('big_bird', 'FlaxBigBirdForSequenceClassification'),
('distilbert', 'FlaxDistilBertForSequenceClassification'),
('electra', 'FlaxElectraForSequenceClassification'),
('mbart', 'FlaxMBartForSequenceClassification'),
('roberta', 'FlaxRobertaForSequenceClassification'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForSequenceClassification'),
('roformer', 'FlaxRoFormerForSequenceClassification'),
('xlm-roberta', 'FlaxXLMRobertaForSequenceClassification'),
]
)
UpperCAmelCase : Tuple = OrderedDict(
[
# Model for Question Answering mapping
('albert', 'FlaxAlbertForQuestionAnswering'),
('bart', 'FlaxBartForQuestionAnswering'),
('bert', 'FlaxBertForQuestionAnswering'),
('big_bird', 'FlaxBigBirdForQuestionAnswering'),
('distilbert', 'FlaxDistilBertForQuestionAnswering'),
('electra', 'FlaxElectraForQuestionAnswering'),
('mbart', 'FlaxMBartForQuestionAnswering'),
('roberta', 'FlaxRobertaForQuestionAnswering'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForQuestionAnswering'),
('roformer', 'FlaxRoFormerForQuestionAnswering'),
('xlm-roberta', 'FlaxXLMRobertaForQuestionAnswering'),
]
)
UpperCAmelCase : List[str] = OrderedDict(
[
# Model for Token Classification mapping
('albert', 'FlaxAlbertForTokenClassification'),
('bert', 'FlaxBertForTokenClassification'),
('big_bird', 'FlaxBigBirdForTokenClassification'),
('distilbert', 'FlaxDistilBertForTokenClassification'),
('electra', 'FlaxElectraForTokenClassification'),
('roberta', 'FlaxRobertaForTokenClassification'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForTokenClassification'),
('roformer', 'FlaxRoFormerForTokenClassification'),
('xlm-roberta', 'FlaxXLMRobertaForTokenClassification'),
]
)
UpperCAmelCase : List[str] = OrderedDict(
[
# Model for Multiple Choice mapping
('albert', 'FlaxAlbertForMultipleChoice'),
('bert', 'FlaxBertForMultipleChoice'),
('big_bird', 'FlaxBigBirdForMultipleChoice'),
('distilbert', 'FlaxDistilBertForMultipleChoice'),
('electra', 'FlaxElectraForMultipleChoice'),
('roberta', 'FlaxRobertaForMultipleChoice'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMultipleChoice'),
('roformer', 'FlaxRoFormerForMultipleChoice'),
('xlm-roberta', 'FlaxXLMRobertaForMultipleChoice'),
]
)
UpperCAmelCase : Any = OrderedDict(
[
('bert', 'FlaxBertForNextSentencePrediction'),
]
)
UpperCAmelCase : str = OrderedDict(
[
('speech-encoder-decoder', 'FlaxSpeechEncoderDecoderModel'),
('whisper', 'FlaxWhisperForConditionalGeneration'),
]
)
UpperCAmelCase : Dict = OrderedDict(
[
('whisper', 'FlaxWhisperForAudioClassification'),
]
)
UpperCAmelCase : Optional[Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
UpperCAmelCase : Any = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
UpperCAmelCase : Dict = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
UpperCAmelCase : Dict = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
UpperCAmelCase : Union[str, Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
UpperCAmelCase : Optional[Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
UpperCAmelCase : int = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
UpperCAmelCase : Union[str, Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
UpperCAmelCase : Dict = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
UpperCAmelCase : int = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
UpperCAmelCase : List[Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
UpperCAmelCase : Optional[Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
UpperCAmelCase : Tuple = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
UpperCAmelCase : List[Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class lowerCamelCase (_BaseAutoModelClass ):
_lowercase : Optional[Any] = FLAX_MODEL_MAPPING
UpperCAmelCase : List[str] = auto_class_update(FlaxAutoModel)
class lowerCamelCase (_BaseAutoModelClass ):
_lowercase : List[Any] = FLAX_MODEL_FOR_PRETRAINING_MAPPING
UpperCAmelCase : str = auto_class_update(FlaxAutoModelForPreTraining, head_doc='pretraining')
class lowerCamelCase (_BaseAutoModelClass ):
_lowercase : Tuple = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
UpperCAmelCase : List[Any] = auto_class_update(FlaxAutoModelForCausalLM, head_doc='causal language modeling')
class lowerCamelCase (_BaseAutoModelClass ):
_lowercase : Dict = FLAX_MODEL_FOR_MASKED_LM_MAPPING
UpperCAmelCase : Optional[int] = auto_class_update(FlaxAutoModelForMaskedLM, head_doc='masked language modeling')
class lowerCamelCase (_BaseAutoModelClass ):
_lowercase : List[Any] = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
UpperCAmelCase : List[Any] = auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc='sequence-to-sequence language modeling', checkpoint_for_example='t5-base'
)
class lowerCamelCase (_BaseAutoModelClass ):
_lowercase : str = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
UpperCAmelCase : Optional[int] = auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc='sequence classification'
)
class lowerCamelCase (_BaseAutoModelClass ):
_lowercase : Dict = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
UpperCAmelCase : int = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc='question answering')
class lowerCamelCase (_BaseAutoModelClass ):
_lowercase : Any = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
UpperCAmelCase : Union[str, Any] = auto_class_update(
FlaxAutoModelForTokenClassification, head_doc='token classification'
)
class lowerCamelCase (_BaseAutoModelClass ):
_lowercase : int = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
UpperCAmelCase : List[str] = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc='multiple choice')
class lowerCamelCase (_BaseAutoModelClass ):
_lowercase : Optional[int] = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
UpperCAmelCase : Optional[int] = auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc='next sentence prediction'
)
class lowerCamelCase (_BaseAutoModelClass ):
_lowercase : Dict = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
UpperCAmelCase : List[str] = auto_class_update(
FlaxAutoModelForImageClassification, head_doc='image classification'
)
class lowerCamelCase (_BaseAutoModelClass ):
_lowercase : Tuple = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
UpperCAmelCase : Optional[Any] = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc='vision-to-text modeling')
class lowerCamelCase (_BaseAutoModelClass ):
_lowercase : Any = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
UpperCAmelCase : Optional[int] = auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc='sequence-to-sequence speech-to-text modeling'
)
| 47 |
'''simple docstring'''
import math
from numpy import inf
from scipy.integrate import quad
def _a ( lowerCAmelCase_ ):
"""simple docstring"""
if num <= 0:
raise ValueError('''math domain error''' )
return quad(lowerCAmelCase_ , 0 , lowerCAmelCase_ , args=(lowerCAmelCase_) )[0]
def _a ( lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
return math.pow(lowerCAmelCase_ , z - 1 ) * math.exp(-x )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 47 | 1 |
'''simple docstring'''
# Lint as: python3
import os
import re
import urllib.parse
from pathlib import Path
from typing import Callable, List, Optional, Union
from zipfile import ZipFile
from ..utils.file_utils import cached_path, hf_github_url
from ..utils.logging import get_logger
from ..utils.version import Version
__SCREAMING_SNAKE_CASE = get_logger(__name__)
class lowerCAmelCase__ :
"""simple docstring"""
__UpperCamelCase = "dummy_data"
__UpperCamelCase = "datasets"
__UpperCamelCase = False
def __init__( self : Any , A__ : str , A__ : str , A__ : Union[Version, str] , A__ : Optional[str] = None , A__ : bool = False , A__ : bool = True , A__ : Optional[List[Callable]] = None , ) -> int:
'''simple docstring'''
a__ : Tuple = 0
a__ : Any = dataset_name
a__ : int = cache_dir
a__ : str = use_local_dummy_data
a__ : List[str] = config
# download_callbacks take a single url as input
a__ : List[Callable] = download_callbacks or []
# if False, it doesn't load existing files and it returns the paths of the dummy files relative
# to the dummy_data zip file root
a__ : str = load_existing_dummy_data
# TODO(PVP, QL) might need to make this more general
a__ : Optional[Any] = str(A__ )
# to be downloaded
a__ : Tuple = None
a__ : Tuple = None
@property
def __lowerCAmelCase ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
if self._dummy_file is None:
a__ : Dict = self.download_dummy_data()
return self._dummy_file
@property
def __lowerCAmelCase ( self : Any ) -> Optional[int]:
'''simple docstring'''
if self.config is not None:
# structure is dummy / config_name / version_name
return os.path.join('''dummy''' , self.config.name , self.version_name )
# structure is dummy / version_name
return os.path.join('''dummy''' , self.version_name )
@property
def __lowerCAmelCase ( self : Optional[Any] ) -> Dict:
'''simple docstring'''
return os.path.join(self.dummy_data_folder , '''dummy_data.zip''' )
def __lowerCAmelCase ( self : str ) -> Union[str, Any]:
'''simple docstring'''
a__ : int = (
self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data
)
a__ : str = cached_path(
A__ , cache_dir=self.cache_dir , extract_compressed_file=A__ , force_extract=A__ )
return os.path.join(A__ , self.dummy_file_name )
@property
def __lowerCAmelCase ( self : int ) -> Optional[int]:
'''simple docstring'''
return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file )
@property
def __lowerCAmelCase ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
if self._bucket_url is None:
a__ : int = hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , '''/''' ) )
return self._bucket_url
@property
def __lowerCAmelCase ( self : List[Any] ) -> Dict:
'''simple docstring'''
if os.path.isdir(self.dummy_file ):
return self.dummy_file
# else cut off path to file -> example `xsum`.
return "/".join(self.dummy_file.replace(os.sep , '''/''' ).split('''/''' )[:-1] )
def __lowerCAmelCase ( self : Union[str, Any] , A__ : Optional[int] , *A__ : int ) -> Union[str, Any]:
'''simple docstring'''
if self.load_existing_dummy_data:
# dummy data is downloaded and tested
a__ : Tuple = self.dummy_file
else:
# dummy data cannot be downloaded and only the path to dummy file is returned
a__ : Union[str, Any] = self.dummy_file_name
# special case when data_url is a dict
if isinstance(A__ , A__ ):
return self.create_dummy_data_dict(A__ , A__ )
elif isinstance(A__ , (list, tuple) ):
return self.create_dummy_data_list(A__ , A__ )
else:
return self.create_dummy_data_single(A__ , A__ )
def __lowerCAmelCase ( self : List[str] , A__ : Any , *A__ : int ) -> Any:
'''simple docstring'''
return self.download_and_extract(A__ )
def __lowerCAmelCase ( self : Any , A__ : Optional[int] , A__ : Optional[Any] ) -> int:
'''simple docstring'''
return self.download_and_extract(A__ )
def __lowerCAmelCase ( self : Union[str, Any] , A__ : int , *A__ : List[Any] , **A__ : str ) -> Optional[Any]:
'''simple docstring'''
return path
def __lowerCAmelCase ( self : List[Any] ) -> str:
'''simple docstring'''
return {}
def __lowerCAmelCase ( self : int , A__ : Union[str, Any] , A__ : List[str] ) -> Any:
'''simple docstring'''
a__ : int = {}
for key, single_urls in data_url.items():
for download_callback in self.download_callbacks:
if isinstance(A__ , A__ ):
for single_url in single_urls:
download_callback(A__ )
else:
a__ : Dict = single_urls
download_callback(A__ )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
if isinstance(A__ , A__ ):
a__ : Optional[int] = [os.path.join(A__ , urllib.parse.quote_plus(Path(A__ ).name ) ) for x in single_urls]
else:
a__ : Optional[Any] = single_urls
a__ : Tuple = os.path.join(A__ , urllib.parse.quote_plus(Path(A__ ).name ) )
a__ : List[str] = value
# make sure that values are unique
if all(isinstance(A__ , A__ ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len(
dummy_data_dict.values() ):
# append key to value to make its name unique
a__ : Optional[int] = {key: value + key for key, value in dummy_data_dict.items()}
return dummy_data_dict
def __lowerCAmelCase ( self : Dict , A__ : str , A__ : Optional[int] ) -> Optional[int]:
'''simple docstring'''
a__ : str = []
# trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one
a__ : Union[str, Any] = all(bool(re.findall('''[0-9]{3,}-of-[0-9]{3,}''' , A__ ) ) for url in data_url )
a__ : Optional[Any] = all(
url.startswith('''https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed''' ) for url in data_url )
if data_url and (is_tf_records or is_pubmed_records):
a__ : Dict = [data_url[0]] * len(A__ )
for single_url in data_url:
for download_callback in self.download_callbacks:
download_callback(A__ )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
a__ : Optional[int] = os.path.join(A__ , urllib.parse.quote_plus(single_url.split('''/''' )[-1] ) )
dummy_data_list.append(A__ )
return dummy_data_list
def __lowerCAmelCase ( self : Dict , A__ : Dict , A__ : str ) -> Optional[int]:
'''simple docstring'''
for download_callback in self.download_callbacks:
download_callback(A__ )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
a__ : Union[str, Any] = os.path.join(A__ , urllib.parse.quote_plus(data_url.split('''/''' )[-1] ) )
if os.path.exists(A__ ) or not self.load_existing_dummy_data:
return value
else:
# Backward compatibility, maybe deprecate at one point.
# For many datasets with single url calls to dl_manager.download_and_extract,
# the dummy_data.zip file is actually the zipped downloaded file
# while now we expected the dummy_data.zip file to be a directory containing
# the downloaded file.
return path_to_dummy_data
def __lowerCAmelCase ( self : int ) -> str:
'''simple docstring'''
pass
def __lowerCAmelCase ( self : Optional[Any] ) -> Tuple:
'''simple docstring'''
pass
def __lowerCAmelCase ( self : Any , A__ : Tuple ) -> Any:
'''simple docstring'''
def _iter_archive_members(A__ : str ):
# this preserves the order of the members inside the ZIP archive
a__ : Dict = Path(self.dummy_file ).parent
a__ : Tuple = path.relative_to(A__ )
with ZipFile(self.local_path_to_dummy_data ) as zip_file:
a__ : Optional[Any] = zip_file.namelist()
for member in members:
if member.startswith(relative_path.as_posix() ):
yield dummy_parent_path.joinpath(A__ )
a__ : str = Path(A__ )
a__ : Optional[Any] = _iter_archive_members(A__ ) if self.use_local_dummy_data else path.rglob('''*''' )
for file_path in file_paths:
if file_path.is_file() and not file_path.name.startswith(('''.''', '''__''') ):
yield file_path.relative_to(A__ ).as_posix(), file_path.open('''rb''' )
def __lowerCAmelCase ( self : Tuple , A__ : Tuple ) -> Tuple:
'''simple docstring'''
if not isinstance(A__ , A__ ):
a__ : int = [paths]
for path in paths:
if os.path.isfile(A__ ):
if os.path.basename(A__ ).startswith(('''.''', '''__''') ):
return
yield path
else:
for dirpath, dirnames, filenames in os.walk(A__ ):
if os.path.basename(A__ ).startswith(('''.''', '''__''') ):
continue
dirnames.sort()
for filename in sorted(A__ ):
if filename.startswith(('''.''', '''__''') ):
continue
yield os.path.join(A__ , A__ )
| 688 |
'''simple docstring'''
import inspect
import unittest
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self : Dict ) -> Dict:
'''simple docstring'''
try:
import diffusers # noqa: F401
except ImportError:
assert False
def __lowerCAmelCase ( self : int ) -> str:
'''simple docstring'''
import diffusers
from diffusers.dependency_versions_table import deps
a__ : Optional[int] = inspect.getmembers(A__ , inspect.isclass )
for cls_name, cls_module in all_classes:
if "dummy_" in cls_module.__module__:
for backend in cls_module._backends:
if backend == "k_diffusion":
a__ : int = '''k-diffusion'''
elif backend == "invisible_watermark":
a__ : int = '''invisible-watermark'''
assert backend in deps, F'{backend} is not in the deps table!'
| 688 | 1 |
'''simple docstring'''
from __future__ import annotations
class snake_case__ :
def __init__( self : Union[str, Any] , __a : List[Any] = 0 ) -> Tuple:
'''simple docstring'''
__snake_case : List[Any] = key
def A_ ( self : List[str] , __a : Any , __a : Tuple ) -> list[str]:
'''simple docstring'''
assert isinstance(A__ , A__ ) and isinstance(A__ , A__ )
__snake_case : Dict = key or self.__key or 1
# make sure key is an appropriate size
key %= 255
return [chr(ord(A__ ) ^ key ) for ch in content]
def A_ ( self : List[Any] , __a : Any , __a : int ) -> list[str]:
'''simple docstring'''
assert isinstance(A__ , A__ ) and isinstance(A__ , A__ )
__snake_case : Optional[Any] = key or self.__key or 1
# make sure key is an appropriate size
key %= 255
return [chr(ord(A__ ) ^ key ) for ch in content]
def A_ ( self : Union[str, Any] , __a : int , __a : List[Any] = 0 ) -> str:
'''simple docstring'''
assert isinstance(A__ , A__ ) and isinstance(A__ , A__ )
__snake_case : Dict = key or self.__key or 1
# make sure key can be any size
while key > 255:
key -= 255
# This will be returned
__snake_case : Optional[Any] = ''
for ch in content:
ans += chr(ord(A__ ) ^ key )
return ans
def A_ ( self : Optional[Any] , __a : int , __a : List[Any] = 0 ) -> str:
'''simple docstring'''
assert isinstance(A__ , A__ ) and isinstance(A__ , A__ )
__snake_case : str = key or self.__key or 1
# make sure key can be any size
while key > 255:
key -= 255
# This will be returned
__snake_case : int = ''
for ch in content:
ans += chr(ord(A__ ) ^ key )
return ans
def A_ ( self : Tuple , __a : int , __a : List[str] = 0 ) -> bool:
'''simple docstring'''
assert isinstance(A__ , A__ ) and isinstance(A__ , A__ )
try:
with open(A__ ) as fin, open('encrypt.out' , 'w+' ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.encrypt_string(A__ , A__ ) )
except OSError:
return False
return True
def A_ ( self : Optional[int] , __a : Dict , __a : Optional[int] ) -> bool:
'''simple docstring'''
assert isinstance(A__ , A__ ) and isinstance(A__ , A__ )
try:
with open(A__ ) as fin, open('decrypt.out' , 'w+' ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.decrypt_string(A__ , A__ ) )
except OSError:
return False
return True
# Tests
# crypt = XORCipher()
# key = 67
# # test encrypt
# print(crypt.encrypt("hallo welt",key))
# # test decrypt
# print(crypt.decrypt(crypt.encrypt("hallo welt",key), key))
# # test encrypt_string
# print(crypt.encrypt_string("hallo welt",key))
# # test decrypt_string
# print(crypt.decrypt_string(crypt.encrypt_string("hallo welt",key),key))
# if (crypt.encrypt_file("test.txt",key)):
# print("encrypt successful")
# else:
# print("encrypt unsuccessful")
# if (crypt.decrypt_file("encrypt.out",key)):
# print("decrypt successful")
# else:
# print("decrypt unsuccessful")
| 700 |
'''simple docstring'''
import argparse
import torch
from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert
from transformers.utils import logging
logging.set_verbosity_info()
def a_ ( _UpperCAmelCase : Optional[Any] ,_UpperCAmelCase : int ,_UpperCAmelCase : Any ) -> Dict:
# Initialise PyTorch model
__snake_case : Tuple = BertConfig.from_json_file(_UpperCAmelCase )
print(f'''Building PyTorch model from configuration: {config}''' )
__snake_case : Optional[int] = BertForPreTraining(_UpperCAmelCase )
# Load weights from tf checkpoint
load_tf_weights_in_bert(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase )
# Save pytorch-model
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() ,_UpperCAmelCase )
if __name__ == "__main__":
A__ : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--bert_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained BERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
A__ : Tuple = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 124 | 0 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPImageProcessor, CLIPProcessor
@require_vision
class lowercase_ ( unittest.TestCase ):
"""simple docstring"""
def __UpperCAmelCase ( self : Optional[Any] ) -> int:
_A = tempfile.mkdtemp()
# fmt: off
_A = ['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>']
# fmt: on
_A = dict(zip(UpperCamelCase__, range(len(UpperCamelCase__ ) ) ) )
_A = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>', '']
_A = {'unk_token': '<unk>'}
_A = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['vocab_file'] )
_A = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file, 'w', encoding='utf-8' ) as fp:
fp.write(json.dumps(UpperCamelCase__ ) + '\n' )
with open(self.merges_file, 'w', encoding='utf-8' ) as fp:
fp.write('\n'.join(UpperCamelCase__ ) )
_A = {
'do_resize': True,
'size': 20,
'do_center_crop': True,
'crop_size': 18,
'do_normalize': True,
'image_mean': [0.48_145_466, 0.4_578_275, 0.40_821_073],
'image_std': [0.26_862_954, 0.26_130_258, 0.27_577_711],
}
_A = os.path.join(self.tmpdirname, UpperCamelCase__ )
with open(self.image_processor_file, 'w', encoding='utf-8' ) as fp:
json.dump(UpperCamelCase__, UpperCamelCase__ )
def __UpperCAmelCase ( self : Union[str, Any], **UpperCamelCase__ : List[Any] ) -> int:
return CLIPTokenizer.from_pretrained(self.tmpdirname, **UpperCamelCase__ )
def __UpperCAmelCase ( self : Dict, **UpperCamelCase__ : List[str] ) -> Any:
return CLIPTokenizerFast.from_pretrained(self.tmpdirname, **UpperCamelCase__ )
def __UpperCAmelCase ( self : Optional[int], **UpperCamelCase__ : List[Any] ) -> str:
return CLIPImageProcessor.from_pretrained(self.tmpdirname, **UpperCamelCase__ )
def __UpperCAmelCase ( self : Any ) -> int:
shutil.rmtree(self.tmpdirname )
def __UpperCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
_A = [np.random.randint(2_55, size=(3, 30, 4_00), dtype=np.uinta )]
_A = [Image.fromarray(np.moveaxis(UpperCamelCase__, 0, -1 ) ) for x in image_inputs]
return image_inputs
def __UpperCAmelCase ( self : Optional[int] ) -> Union[str, Any]:
_A = self.get_tokenizer()
_A = self.get_rust_tokenizer()
_A = self.get_image_processor()
_A = CLIPProcessor(tokenizer=UpperCamelCase__, image_processor=UpperCamelCase__ )
processor_slow.save_pretrained(self.tmpdirname )
_A = CLIPProcessor.from_pretrained(self.tmpdirname, use_fast=UpperCamelCase__ )
_A = CLIPProcessor(tokenizer=UpperCamelCase__, image_processor=UpperCamelCase__ )
processor_fast.save_pretrained(self.tmpdirname )
_A = CLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab(), tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab(), tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab(), tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer, UpperCamelCase__ )
self.assertIsInstance(processor_fast.tokenizer, UpperCamelCase__ )
self.assertEqual(processor_slow.image_processor.to_json_string(), image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string(), image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor, UpperCamelCase__ )
self.assertIsInstance(processor_fast.image_processor, UpperCamelCase__ )
def __UpperCAmelCase ( self : Any ) -> Dict:
_A = CLIPProcessor(tokenizer=self.get_tokenizer(), image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_A = self.get_tokenizer(bos_token='(BOS)', eos_token='(EOS)' )
_A = self.get_image_processor(do_normalize=UpperCamelCase__, padding_value=1.0 )
_A = CLIPProcessor.from_pretrained(
self.tmpdirname, bos_token='(BOS)', eos_token='(EOS)', do_normalize=UpperCamelCase__, padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer, UpperCamelCase__ )
self.assertEqual(processor.image_processor.to_json_string(), image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor, UpperCamelCase__ )
def __UpperCAmelCase ( self : str ) -> List[Any]:
_A = self.get_image_processor()
_A = self.get_tokenizer()
_A = CLIPProcessor(tokenizer=UpperCamelCase__, image_processor=UpperCamelCase__ )
_A = self.prepare_image_inputs()
_A = image_processor(UpperCamelCase__, return_tensors='np' )
_A = processor(images=UpperCamelCase__, return_tensors='np' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum(), input_processor[key].sum(), delta=1e-2 )
def __UpperCAmelCase ( self : List[str] ) -> Tuple:
_A = self.get_image_processor()
_A = self.get_tokenizer()
_A = CLIPProcessor(tokenizer=UpperCamelCase__, image_processor=UpperCamelCase__ )
_A = 'lower newer'
_A = processor(text=UpperCamelCase__ )
_A = tokenizer(UpperCamelCase__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key], encoded_processor[key] )
def __UpperCAmelCase ( self : Any ) -> List[Any]:
_A = self.get_image_processor()
_A = self.get_tokenizer()
_A = CLIPProcessor(tokenizer=UpperCamelCase__, image_processor=UpperCamelCase__ )
_A = 'lower newer'
_A = self.prepare_image_inputs()
_A = processor(text=UpperCamelCase__, images=UpperCamelCase__ )
self.assertListEqual(list(inputs.keys() ), ['input_ids', 'attention_mask', 'pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(UpperCamelCase__ ):
processor()
def __UpperCAmelCase ( self : Optional[int] ) -> Dict:
_A = self.get_image_processor()
_A = self.get_tokenizer()
_A = CLIPProcessor(tokenizer=UpperCamelCase__, image_processor=UpperCamelCase__ )
_A = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_A = processor.batch_decode(UpperCamelCase__ )
_A = tokenizer.batch_decode(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__, UpperCamelCase__ )
def __UpperCAmelCase ( self : Dict ) -> Dict:
_A = self.get_image_processor()
_A = self.get_tokenizer()
_A = CLIPProcessor(tokenizer=UpperCamelCase__, image_processor=UpperCamelCase__ )
_A = 'lower newer'
_A = self.prepare_image_inputs()
_A = processor(text=UpperCamelCase__, images=UpperCamelCase__ )
self.assertListEqual(list(inputs.keys() ), processor.model_input_names )
| 107 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"google/mobilenet_v1_1.0_224": "https://huggingface.co/google/mobilenet_v1_1.0_224/resolve/main/config.json",
"google/mobilenet_v1_0.75_192": "https://huggingface.co/google/mobilenet_v1_0.75_192/resolve/main/config.json",
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
}
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :int = "mobilenet_v1"
def __init__( self : Optional[int] , __a : List[str]=3 , __a : Union[str, Any]=224 , __a : Tuple=1.0 , __a : List[Any]=8 , __a : Union[str, Any]="relu6" , __a : Dict=True , __a : Tuple=0.9_99 , __a : Dict=0.02 , __a : Any=0.0_01 , **__a : Any , ) -> int:
super().__init__(**__a )
if depth_multiplier <= 0:
raise ValueError("depth_multiplier must be greater than zero." )
_UpperCamelCase : Any = num_channels
_UpperCamelCase : Optional[Any] = image_size
_UpperCamelCase : Optional[Any] = depth_multiplier
_UpperCamelCase : int = min_depth
_UpperCamelCase : List[str] = hidden_act
_UpperCamelCase : Union[str, Any] = tf_padding
_UpperCamelCase : int = classifier_dropout_prob
_UpperCamelCase : Optional[int] = initializer_range
_UpperCamelCase : Union[str, Any] = layer_norm_eps
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :List[Any] = version.parse("1.11" )
@property
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict([("pixel_values", {0: "batch"})] )
@property
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Mapping[str, Mapping[int, str]]:
if self.task == "image-classification":
return OrderedDict([("logits", {0: "batch"})] )
else:
return OrderedDict([("last_hidden_state", {0: "batch"}), ("pooler_output", {0: "batch"})] )
@property
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> float:
return 1e-4
| 624 | 0 |
'''simple docstring'''
import argparse
import json
import os
from collections import OrderedDict
import numpy as np
import tensorflow as tf
import torch
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Tuple ) -> str:
"""simple docstring"""
A__ : Optional[int] = os.path.join(args.tf_model_dir , '''parameters.json''' )
A__ : List[Any] = json.loads(open(_UpperCamelCase ).read() )
if not params:
raise ValueError(
F"It seems that the json file at {parameter_file} is empty. Make sure you have a correct json file." )
if not args.output.endswith('''.pt''' ):
A__ : List[Any] = args.output + '''.pt'''
A__ : str = OrderedDict()
with tf.device('''/CPU:0''' ):
A__ : Optional[Any] = tf.train.load_checkpoint(args.tf_model_dir )
A__ : Tuple = reader.get_variable_to_shape_map()
for key_name in shapes.keys():
A__ : Dict = reader.get_tensor(_UpperCamelCase ).astype(np.floataa )
if key_name.endswith('''/adam_m''' ) or key_name.endswith('''/adam_v''' ):
continue
if key_name.startswith('''pasts/''' ):
if key_name.startswith('''pasts/mlp''' ):
A__ : List[str] = int(key_name[9] )
elif key_name.startswith('''pasts/out''' ):
A__ : int = 8
A__ : Any = '''model.sqout.%d.weight''' % (player * 2) # enter to nn.Sequencial with Tanh, so 2 at a time
A__ : str = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
A__ : Optional[Any] = torch.tensor(_UpperCamelCase )
elif key_name.startswith('''model/moe''' ):
A__ : Tuple = int(key_name[9:].split('''/''' )[0] )
if key_name.endswith('''/switch_gating/kernel''' ):
A__ : Optional[int] = '''model.blocks.%d.feed_forward.mlp.router.classifier.weight''' % player
A__ : Tuple = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
A__ : List[Any] = torch.tensor(_UpperCamelCase )
elif key_name.endswith('''/softmlp/kernel''' ):
A__ : List[Any] = '''model.blocks.%d.feed_forward.soft_bypass_mlp.weight''' % player
A__ : Any = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
A__ : Any = torch.tensor(_UpperCamelCase )
elif key_name.endswith('''/wo/kernel''' ) or key_name.endswith('''/wi/kernel''' ):
A__ : Tuple = key_name[-9:-7]
for i in range(16 ):
A__ : List[str] = '''model.blocks.%d.feed_forward.mlp.experts.expert_%d.%s.weight''' % (player, i, nlayer)
A__ : Union[str, Any] = (
vnp[i].transpose([1, 0] ).copy()
) # In Mesh-Tensorflow, it is one array, so it is divided
A__ : List[Any] = torch.tensor(_UpperCamelCase )
elif key_name.startswith('''model/mlp''' ):
A__ : Dict = int(key_name[9:].split('''/''' )[0] )
if key_name.endswith('''/p1/kernel''' ):
A__ : Dict = '''model.blocks.%d.feed_forward.mlp.wi.weight''' % player
A__ : Optional[Any] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
A__ : List[str] = torch.tensor(_UpperCamelCase )
elif key_name.endswith('''/p1/bias''' ):
A__ : List[Any] = '''model.blocks.%d.feed_forward.mlp.wi.bias''' % player
A__ : List[Any] = vnp.copy() # same because it is one dimensional
A__ : str = torch.tensor(_UpperCamelCase )
elif key_name.endswith('''/p2/kernel''' ):
A__ : int = '''model.blocks.%d.feed_forward.mlp.wo.weight''' % player
A__ : Tuple = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
A__ : str = torch.tensor(_UpperCamelCase )
elif key_name.endswith('''/p2/bias''' ):
A__ : List[str] = '''model.blocks.%d.feed_forward.mlp.wo.bias''' % player
A__ : List[Any] = vnp.copy() # same because it is one dimensional
A__ : int = torch.tensor(_UpperCamelCase )
elif key_name.startswith('''model/ln''' ):
A__ : Optional[Any] = int(key_name[8:].split('''/''' )[0] )
if key_name.endswith('''/b''' ):
A__ : str = '''model.blocks.%d.feed_forward.norm.bias''' % player
A__ : Any = vnp.copy() # same because it is one dimensional
A__ : Any = torch.tensor(_UpperCamelCase )
elif key_name.endswith('''/g''' ):
A__ : int = '''model.blocks.%d.feed_forward.norm.weight''' % player
A__ : str = vnp.copy() # same because it is one dimensional
A__ : List[Any] = torch.tensor(_UpperCamelCase )
elif key_name.startswith('''model/att''' ):
A__ : Optional[Any] = int(key_name[9:].split('''/''' )[0] )
if key_name.endswith('''/qkv/kernel''' ):
A__ : int = vnp.copy() # Compute same dimension as Mesh-tensorflow using einsum
A__ : Dict = state[:, 0, :, :]
A__ : int = state[:, 1, :, :]
A__ : Optional[int] = state[:, 2, :, :]
A__ : Union[str, Any] = (
state_q.reshape([state_q.shape[0], state_q.shape[1] * state_q.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
A__ : int = (
state_k.reshape([state_k.shape[0], state_k.shape[1] * state_k.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
A__ : int = (
state_v.reshape([state_v.shape[0], state_v.shape[1] * state_v.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
A__ : int = '''model.blocks.%d.self_attn.self_attn.q_proj.weight''' % player
A__ : Dict = torch.tensor(_UpperCamelCase )
A__ : Optional[int] = '''model.blocks.%d.self_attn.self_attn.k_proj.weight''' % player
A__ : Dict = torch.tensor(_UpperCamelCase )
A__ : str = '''model.blocks.%d.self_attn.self_attn.v_proj.weight''' % player
A__ : Optional[Any] = torch.tensor(_UpperCamelCase )
elif key_name.endswith('''/o/kernel''' ):
A__ : Optional[Any] = '''model.blocks.%d.self_attn.self_attn.out_proj.weight''' % player
A__ : int = (
vnp.reshape([vnp.shape[0] * vnp.shape[1], vnp.shape[2]] ).transpose([1, 0] ).copy()
) # Mesh-Tensorflow is a diagonal matrix
A__ : List[str] = torch.tensor(_UpperCamelCase )
elif key_name.startswith('''model/an''' ):
A__ : List[str] = int(key_name[8:].split('''/''' )[0] )
if key_name.endswith('''/b''' ):
A__ : Dict = '''model.blocks.%d.self_attn.norm.bias''' % player
A__ : List[Any] = vnp.copy() # same because it is one dimensional
A__ : int = torch.tensor(_UpperCamelCase )
elif key_name.endswith('''/g''' ):
A__ : int = '''model.blocks.%d.self_attn.norm.weight''' % player
A__ : Optional[Any] = vnp.copy() # same because it is one dimensional
A__ : int = torch.tensor(_UpperCamelCase )
elif (
key_name.startswith('''model/wte''' )
or key_name.startswith('''model/wpe''' )
or key_name.startswith('''model/ete''' )
):
A__ : str = {'''wte''': '''embed_tokens''', '''wpe''': '''position_embeddings''', '''ete''': '''extra_position_embeddings'''}[
key_name[-3:]
]
A__ : Optional[Any] = '''model.%s.weight''' % nlayer
A__ : List[str] = vnp.copy() # same in embedded
A__ : Optional[int] = torch.tensor(_UpperCamelCase )
if key_name.startswith('''model/wte''' ):
A__ : List[str] = '''lm_head.weight'''
A__ : Optional[int] = vnp.copy() # same in embedded
A__ : List[Any] = torch.tensor(_UpperCamelCase )
elif key_name.startswith('''model/wob''' ):
A__ : Optional[int] = '''final_logits_bias'''
A__ : List[Any] = vnp.copy() # same in embedded
A__ : List[str] = state.reshape((1, -1) )
A__ : str = torch.tensor(_UpperCamelCase )
elif key_name == "model/dense/kernel":
A__ : int = '''model.last_project.weight'''
A__ : Any = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
A__ : List[Any] = torch.tensor(_UpperCamelCase )
elif key_name == "model/dense_1/bias":
A__ : Any = '''model.last_project.bias'''
A__ : Any = vnp.copy() # same because it is one dimensional
A__ : Optional[int] = torch.tensor(_UpperCamelCase )
torch.save(_UpperCamelCase , args.output )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : int = argparse.ArgumentParser(
description='model converter.', formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument('--tf_model_dir', metavar='PATH', type=str, required=True, help='import model')
parser.add_argument('--output', metavar='PATH', type=str, required=True, help='output model')
_SCREAMING_SNAKE_CASE : List[str] = parser.parse_args()
convert_tf_gptsan_to_pt(args) | 721 |
import sacrebleu as scb
from packaging import version
from sacrebleu import TER
import datasets
_SCREAMING_SNAKE_CASE : Union[str, Any] = '\\n@inproceedings{snover-etal-2006-study,\n title = "A Study of Translation Edit Rate with Targeted Human Annotation",\n author = "Snover, Matthew and\n Dorr, Bonnie and\n Schwartz, Rich and\n Micciulla, Linnea and\n Makhoul, John",\n booktitle = "Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers",\n month = aug # " 8-12",\n year = "2006",\n address = "Cambridge, Massachusetts, USA",\n publisher = "Association for Machine Translation in the Americas",\n url = "https://aclanthology.org/2006.amta-papers.25",\n pages = "223--231",\n}\n@inproceedings{post-2018-call,\n title = "A Call for Clarity in Reporting {BLEU} Scores",\n author = "Post, Matt",\n booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",\n month = oct,\n year = "2018",\n address = "Belgium, Brussels",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W18-6319",\n pages = "186--191",\n}\n'
_SCREAMING_SNAKE_CASE : Tuple = '\\nTER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a\nhypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu\n(https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found\nhere: https://github.com/jhclark/tercom.\n\nThe implementation here is slightly different from sacrebleu in terms of the required input format. The length of\nthe references and hypotheses lists need to be the same, so you may need to transpose your references compared to\nsacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534\n\nSee the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information.\n'
_SCREAMING_SNAKE_CASE : Optional[Any] = '\nProduces TER scores alongside the number of edits and reference length.\n\nArgs:\n predictions (list of str): The system stream (a sequence of segments).\n references (list of list of str): A list of one or more reference streams (each a sequence of segments).\n normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters,\n as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana.\n Only applies if `normalized = True`. Defaults to `False`.\n case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`.\n\nReturns:\n \'score\' (float): TER score (num_edits / sum_ref_lengths * 100)\n \'num_edits\' (int): The cumulative number of edits\n \'ref_length\' (float): The cumulative average reference length\n\nExamples:\n Example 1:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?",\n ... "What did the TER metric user say to the developer?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],\n ... ["Your jokes are...", "...TERrible"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {\'score\': 150.0, \'num_edits\': 15, \'ref_length\': 10.0}\n\n Example 2:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {\'score\': 62.5, \'num_edits\': 5, \'ref_length\': 8.0}\n\n Example 3:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... normalized=True,\n ... case_sensitive=True)\n >>> print(results)\n {\'score\': 57.14285714285714, \'num_edits\': 6, \'ref_length\': 10.5}\n\n Example 4:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {\'score\': 0.0, \'num_edits\': 0, \'ref_length\': 8.0}\n\n Example 5:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?",\n ... "What did the TER metric user say to the developer?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],\n ... ["Your jokes are...", "...TERrible"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {\'score\': 100.0, \'num_edits\': 10, \'ref_length\': 10.0}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class UpperCamelCase__ ( datasets.Metric ):
'''simple docstring'''
def __snake_case ( self ):
if version.parse(scb.__version__ ) < version.parse('''1.4.12''' ):
raise ImportWarning(
'''To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn\'t match this condition.\n'''
'''You can install it with `pip install "sacrebleu>=1.4.12"`.''' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''http://www.cs.umd.edu/~snover/tercom/''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Sequence(datasets.Value('''string''' , id='''sequence''' ) , id='''references''' ),
} ) , codebase_urls=['''https://github.com/mjpost/sacreBLEU#ter'''] , reference_urls=[
'''https://github.com/jhclark/tercom''',
] , )
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = False , UpperCamelCase__ = False , UpperCamelCase__ = False , UpperCamelCase__ = False , ):
A__ : List[Any] = len(references[0] )
if any(len(UpperCamelCase__ ) != references_per_prediction for refs in references ):
raise ValueError('''Sacrebleu requires the same number of references for each prediction''' )
A__ : Dict = [[refs[i] for refs in references] for i in range(UpperCamelCase__ )]
A__ : Optional[Any] = TER(
normalized=UpperCamelCase__ , no_punct=UpperCamelCase__ , asian_support=UpperCamelCase__ , case_sensitive=UpperCamelCase__ , )
A__ : str = sb_ter.corpus_score(UpperCamelCase__ , UpperCamelCase__ )
return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length} | 55 | 0 |
"""simple docstring"""
from ...processing_utils import ProcessorMixin
class __a ( _lowerCAmelCase ):
UpperCamelCase_ : Tuple = '''WhisperFeatureExtractor'''
UpperCamelCase_ : Tuple = '''WhisperTokenizer'''
def __init__( self : List[str] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Tuple )-> str:
"""simple docstring"""
super().__init__(UpperCAmelCase_ , UpperCAmelCase_ )
UpperCamelCase = self.feature_extractor
UpperCamelCase = False
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , UpperCAmelCase_ : int=None , UpperCAmelCase_ : Optional[int]=None , UpperCAmelCase_ : Optional[int]=True )-> List[str]:
"""simple docstring"""
return self.tokenizer.get_decoder_prompt_ids(task=UpperCAmelCase_ , language=UpperCAmelCase_ , no_timestamps=UpperCAmelCase_ )
def __call__( self : List[Any] , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : Optional[Any] )-> Union[str, Any]:
"""simple docstring"""
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*UpperCAmelCase_ , **UpperCAmelCase_ )
UpperCamelCase = kwargs.pop("audio" , UpperCAmelCase_ )
UpperCamelCase = kwargs.pop("sampling_rate" , UpperCAmelCase_ )
UpperCamelCase = kwargs.pop("text" , UpperCAmelCase_ )
if len(UpperCAmelCase_ ) > 0:
UpperCamelCase = args[0]
UpperCamelCase = args[1:]
if audio is None and text is None:
raise ValueError("You need to specify either an `audio` or `text` input to process." )
if audio is not None:
UpperCamelCase = self.feature_extractor(UpperCAmelCase_ , *UpperCAmelCase_ , sampling_rate=UpperCAmelCase_ , **UpperCAmelCase_ )
if text is not None:
UpperCamelCase = self.tokenizer(UpperCAmelCase_ , **UpperCAmelCase_ )
if text is None:
return inputs
elif audio is None:
return encodings
else:
UpperCamelCase = encodings["input_ids"]
return inputs
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , *UpperCAmelCase_ : int , **UpperCAmelCase_ : Optional[int] )-> Any:
"""simple docstring"""
return self.tokenizer.batch_decode(*UpperCAmelCase_ , **UpperCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Dict , *UpperCAmelCase_ : Optional[int] , **UpperCAmelCase_ : List[str] )-> List[Any]:
"""simple docstring"""
return self.tokenizer.decode(*UpperCAmelCase_ , **UpperCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Dict , UpperCAmelCase_ : str , UpperCAmelCase_ : str="np" )-> str:
"""simple docstring"""
return self.tokenizer.get_prompt_ids(UpperCAmelCase_ , return_tensors=UpperCAmelCase_ )
| 554 |
"""simple docstring"""
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class __a ( _lowerCAmelCase ):
@slow
@require_torch
def _SCREAMING_SNAKE_CASE ( self : Optional[int] )-> Dict:
"""simple docstring"""
UpperCamelCase = EncoderDecoderModel.from_encoder_decoder_pretrained("prajjwal1/bert-tiny" , "prajjwal1/bert-tiny" )
UpperCamelCase = BertTokenizer.from_pretrained("bert-base-uncased" )
UpperCamelCase = bertabert.config.encoder.vocab_size
UpperCamelCase = tokenizer.sep_token_id
UpperCamelCase = tokenizer.cls_token_id
UpperCamelCase = 128
UpperCamelCase = datasets.load_dataset("cnn_dailymail" , "3.0.0" , split="train[:1%]" )
UpperCamelCase = datasets.load_dataset("cnn_dailymail" , "3.0.0" , split="validation[:1%]" )
UpperCamelCase = train_dataset.select(range(32 ) )
UpperCamelCase = val_dataset.select(range(16 ) )
UpperCamelCase = 4
def _map_to_encoder_decoder_inputs(UpperCAmelCase_ : Any ):
# Tokenizer will automatically set [BOS] <text> [EOS]
UpperCamelCase = tokenizer(batch["article"] , padding="max_length" , truncation=UpperCAmelCase_ , max_length=512 )
UpperCamelCase = tokenizer(batch["highlights"] , padding="max_length" , truncation=UpperCAmelCase_ , max_length=128 )
UpperCamelCase = inputs.input_ids
UpperCamelCase = inputs.attention_mask
UpperCamelCase = outputs.input_ids
UpperCamelCase = outputs.input_ids.copy()
UpperCamelCase = [
[-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch["labels"]
]
UpperCamelCase = outputs.attention_mask
assert all(len(UpperCAmelCase_ ) == 512 for x in inputs.input_ids )
assert all(len(UpperCAmelCase_ ) == 128 for x in outputs.input_ids )
return batch
def _compute_metrics(UpperCAmelCase_ : Any ):
UpperCamelCase = pred.label_ids
UpperCamelCase = pred.predictions
# all unnecessary tokens are removed
UpperCamelCase = tokenizer.batch_decode(UpperCAmelCase_ , skip_special_tokens=UpperCAmelCase_ )
UpperCamelCase = tokenizer.batch_decode(UpperCAmelCase_ , skip_special_tokens=UpperCAmelCase_ )
UpperCamelCase = sum([int(pred_str[i] == label_str[i] ) for i in range(len(UpperCAmelCase_ ) )] ) / len(UpperCAmelCase_ )
return {"accuracy": accuracy}
# map train dataset
UpperCamelCase = train_dataset.map(
_map_to_encoder_decoder_inputs , batched=UpperCAmelCase_ , batch_size=UpperCAmelCase_ , remove_columns=["article", "highlights"] , )
train_dataset.set_format(
type="torch" , columns=["input_ids", "attention_mask", "decoder_input_ids", "decoder_attention_mask", "labels"] , )
# same for validation dataset
UpperCamelCase = val_dataset.map(
_map_to_encoder_decoder_inputs , batched=UpperCAmelCase_ , batch_size=UpperCAmelCase_ , remove_columns=["article", "highlights"] , )
val_dataset.set_format(
type="torch" , columns=["input_ids", "attention_mask", "decoder_input_ids", "decoder_attention_mask", "labels"] , )
UpperCamelCase = self.get_auto_remove_tmp_dir()
UpperCamelCase = SeqaSeqTrainingArguments(
output_dir=UpperCAmelCase_ , per_device_train_batch_size=UpperCAmelCase_ , per_device_eval_batch_size=UpperCAmelCase_ , predict_with_generate=UpperCAmelCase_ , evaluation_strategy="steps" , do_train=UpperCAmelCase_ , do_eval=UpperCAmelCase_ , warmup_steps=0 , eval_steps=2 , logging_steps=2 , )
# instantiate trainer
UpperCamelCase = SeqaSeqTrainer(
model=UpperCAmelCase_ , args=UpperCAmelCase_ , compute_metrics=_compute_metrics , train_dataset=UpperCAmelCase_ , eval_dataset=UpperCAmelCase_ , tokenizer=UpperCAmelCase_ , )
# start training
trainer.train()
| 554 | 1 |
'''simple docstring'''
def UpperCAmelCase_ ( lowercase__ , lowercase__ ):
'''simple docstring'''
return numa ^ numa < 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 717 |
'''simple docstring'''
import torch
from transformers import CamembertForMaskedLM, CamembertTokenizer
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ , lowercase__=5 ):
'''simple docstring'''
assert masked_input.count("<mask>" ) == 1
a_ =torch.tensor(tokenizer.encode(lowercase__ , add_special_tokens=lowercase__ ) ).unsqueeze(0 ) # Batch size 1
a_ =model(lowercase__ )[0] # The last hidden-state is the first element of the output tuple
a_ =(input_ids.squeeze() == tokenizer.mask_token_id).nonzero().item()
a_ =logits[0, masked_index, :]
a_ =logits.softmax(dim=0 )
a_ , a_ =prob.topk(k=lowercase__ , dim=0 )
a_ =" ".join(
[tokenizer.convert_ids_to_tokens(indices[i].item() ) for i in range(len(lowercase__ ) )] )
a_ =tokenizer.mask_token
a_ =[]
for index, predicted_token_bpe in enumerate(topk_predicted_token_bpe.split(" " ) ):
a_ =predicted_token_bpe.replace("\u2581" , " " )
if " {0}".format(lowercase__ ) in masked_input:
topk_filled_outputs.append(
(
masked_input.replace(" {0}".format(lowercase__ ) , lowercase__ ),
values[index].item(),
predicted_token,
) )
else:
topk_filled_outputs.append(
(
masked_input.replace(lowercase__ , lowercase__ ),
values[index].item(),
predicted_token,
) )
return topk_filled_outputs
lowercase = CamembertTokenizer.from_pretrained('''camembert-base''')
lowercase = CamembertForMaskedLM.from_pretrained('''camembert-base''')
model.eval()
lowercase = '''Le camembert est <mask> :)'''
print(fill_mask(masked_input, model, tokenizer, topk=3))
| 41 | 0 |
import importlib.metadata
import operator
import re
import sys
from typing import Optional
from packaging import version
_snake_case = {
"<": operator.lt,
"<=": operator.le,
"==": operator.eq,
"!=": operator.ne,
">=": operator.ge,
">": operator.gt,
}
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_,snake_case_,snake_case_,snake_case_ ):
if got_ver is None or want_ver is None:
raise ValueError(
f'''Unable to compare versions for {requirement}: need={want_ver} found={got_ver}. This is unusual. Consider'''
f''' reinstalling {pkg}.''' )
if not ops[op](version.parse(snake_case_ ),version.parse(snake_case_ ) ):
raise ImportError(
f'''{requirement} is required for a normal functioning of this module, but found {pkg}=={got_ver}.{hint}''' )
def lowerCAmelCase_ ( snake_case_,snake_case_ = None ):
_A : Tuple = f'''\n{hint}''' if hint is not None else """"""
# non-versioned check
if re.match(r"""^[\w_\-\d]+$""",snake_case_ ):
_A , _A , _A : Optional[int] = requirement, None, None
else:
_A : Any = re.findall(r"""^([^!=<>\s]+)([\s!=<>]{1,2}.+)""",snake_case_ )
if not match:
raise ValueError(
"""requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23, but"""
f''' got {requirement}''' )
_A , _A : str = match[0]
_A : Any = want_full.split(""",""" ) # there could be multiple requirements
_A : List[Any] = {}
for w in want_range:
_A : List[str] = re.findall(r"""^([\s!=<>]{1,2})(.+)""",snake_case_ )
if not match:
raise ValueError(
"""requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23,"""
f''' but got {requirement}''' )
_A , _A : Any = match[0]
_A : List[Any] = want_ver
if op not in ops:
raise ValueError(f'''{requirement}: need one of {list(ops.keys() )}, but got {op}''' )
# special case
if pkg == "python":
_A : Union[str, Any] = """.""".join([str(snake_case_ ) for x in sys.version_info[:3]] )
for op, want_ver in wanted.items():
_compare_versions(snake_case_,snake_case_,snake_case_,snake_case_,snake_case_,snake_case_ )
return
# check if any version is installed
try:
_A : Union[str, Any] = importlib.metadata.version(snake_case_ )
except importlib.metadata.PackageNotFoundError:
raise importlib.metadata.PackageNotFoundError(
f'''The \'{requirement}\' distribution was not found and is required by this application. {hint}''' )
# check that the right version is installed if version number or a range was provided
if want_ver is not None:
for op, want_ver in wanted.items():
_compare_versions(snake_case_,snake_case_,snake_case_,snake_case_,snake_case_,snake_case_ )
def lowerCAmelCase_ ( snake_case_ ):
_A : Optional[int] = """Try: pip install transformers -U or pip install -e '.[dev]' if you're working with git main"""
return require_version(snake_case_,snake_case_ )
| 307 |
from __future__ import annotations
import bisect
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ = 0,snake_case_ = -1 ):
if hi < 0:
_A : Optional[Any] = len(snake_case_ )
while lo < hi:
_A : Any = lo + (hi - lo) // 2
if sorted_collection[mid] < item:
_A : List[str] = mid + 1
else:
_A : Optional[int] = mid
return lo
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ = 0,snake_case_ = -1 ):
if hi < 0:
_A : Any = len(snake_case_ )
while lo < hi:
_A : Union[str, Any] = lo + (hi - lo) // 2
if sorted_collection[mid] <= item:
_A : Tuple = mid + 1
else:
_A : Optional[Any] = mid
return lo
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ = 0,snake_case_ = -1 ):
sorted_collection.insert(bisect_left(snake_case_,snake_case_,snake_case_,snake_case_ ),snake_case_ )
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ = 0,snake_case_ = -1 ):
sorted_collection.insert(bisect_right(snake_case_,snake_case_,snake_case_,snake_case_ ),snake_case_ )
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_A : Optional[Any] = 0
_A : str = len(snake_case_ ) - 1
while left <= right:
_A : Dict = left + (right - left) // 2
_A : Dict = sorted_collection[midpoint]
if current_item == item:
return midpoint
elif item < current_item:
_A : int = midpoint - 1
else:
_A : int = midpoint + 1
return None
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_A : str = bisect.bisect_left(snake_case_,snake_case_ )
if index != len(snake_case_ ) and sorted_collection[index] == item:
return index
return None
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_,snake_case_ ):
if right < left:
return None
_A : Dict = left + (right - left) // 2
if sorted_collection[midpoint] == item:
return midpoint
elif sorted_collection[midpoint] > item:
return binary_search_by_recursion(snake_case_,snake_case_,snake_case_,midpoint - 1 )
else:
return binary_search_by_recursion(snake_case_,snake_case_,midpoint + 1,snake_case_ )
if __name__ == "__main__":
_snake_case = input("Enter numbers separated by comma:\n").strip()
_snake_case = sorted(int(item) for item in user_input.split(","))
_snake_case = int(input("Enter a single number to be found in the list:\n"))
_snake_case = binary_search(collection, target)
if result is None:
print(f"""{target} was not found in {collection}.""")
else:
print(f"""{target} was found at position {result} in {collection}.""")
| 307 | 1 |
from typing import Dict, Iterable, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
_UpperCAmelCase : int =logging.get_logger(__name__)
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> Dict:
return [
int(1_000 * (box[0] / width) ),
int(1_000 * (box[1] / height) ),
int(1_000 * (box[2] / width) ),
int(1_000 * (box[3] / height) ),
]
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> List[Any]:
lowerCAmelCase_ : str = to_pil_image(lowerCAmelCase_ )
lowerCAmelCase_ : Tuple = pil_image.size
lowerCAmelCase_ : Any = pytesseract.image_to_data(lowerCAmelCase_ , lang=lowerCAmelCase_ , output_type='''dict''' , config=lowerCAmelCase_ )
lowerCAmelCase_ : Dict = data['''text'''], data['''left'''], data['''top'''], data['''width'''], data['''height''']
# filter empty words and corresponding coordinates
lowerCAmelCase_ : Dict = [idx for idx, word in enumerate(lowerCAmelCase_ ) if not word.strip()]
lowerCAmelCase_ : int = [word for idx, word in enumerate(lowerCAmelCase_ ) if idx not in irrelevant_indices]
lowerCAmelCase_ : Tuple = [coord for idx, coord in enumerate(lowerCAmelCase_ ) if idx not in irrelevant_indices]
lowerCAmelCase_ : List[Any] = [coord for idx, coord in enumerate(lowerCAmelCase_ ) if idx not in irrelevant_indices]
lowerCAmelCase_ : Optional[int] = [coord for idx, coord in enumerate(lowerCAmelCase_ ) if idx not in irrelevant_indices]
lowerCAmelCase_ : int = [coord for idx, coord in enumerate(lowerCAmelCase_ ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
lowerCAmelCase_ : str = []
for x, y, w, h in zip(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
lowerCAmelCase_ : str = [x, y, x + w, y + h]
actual_boxes.append(lowerCAmelCase_ )
# finally, normalize the bounding boxes
lowerCAmelCase_ : Optional[Any] = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) )
assert len(lowerCAmelCase_ ) == len(lowerCAmelCase_ ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] = ["""pixel_values"""]
def __init__( self , __lowercase = True , __lowercase = None , __lowercase = PILImageResampling.BILINEAR , __lowercase = True , __lowercase = 1 / 2_5_5 , __lowercase = True , __lowercase = None , __lowercase = None , __lowercase = True , __lowercase = None , __lowercase = "" , **__lowercase , ) -> None:
super().__init__(**__lowercase )
lowerCAmelCase_ : List[str] = size if size is not None else {'''height''': 2_2_4, '''width''': 2_2_4}
lowerCAmelCase_ : Any = get_size_dict(__lowercase )
lowerCAmelCase_ : Dict = do_resize
lowerCAmelCase_ : Optional[Any] = size
lowerCAmelCase_ : Optional[Any] = resample
lowerCAmelCase_ : Optional[int] = do_rescale
lowerCAmelCase_ : Union[str, Any] = rescale_value
lowerCAmelCase_ : List[Any] = do_normalize
lowerCAmelCase_ : Any = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowerCAmelCase_ : List[str] = image_std if image_std is not None else IMAGENET_STANDARD_STD
lowerCAmelCase_ : Tuple = apply_ocr
lowerCAmelCase_ : Dict = ocr_lang
lowerCAmelCase_ : Union[str, Any] = tesseract_config
def lowercase_ ( self , __lowercase , __lowercase , __lowercase = PILImageResampling.BILINEAR , __lowercase = None , **__lowercase , ) -> np.ndarray:
lowerCAmelCase_ : Union[str, Any] = get_size_dict(__lowercase )
if "height" not in size or "width" not in size:
raise ValueError(f"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" )
lowerCAmelCase_ : List[str] = (size['''height'''], size['''width'''])
return resize(__lowercase , size=__lowercase , resample=__lowercase , data_format=__lowercase , **__lowercase )
def lowercase_ ( self , __lowercase , __lowercase , __lowercase = None , **__lowercase , ) -> np.ndarray:
return rescale(__lowercase , scale=__lowercase , data_format=__lowercase , **__lowercase )
def lowercase_ ( self , __lowercase , __lowercase , __lowercase , __lowercase = None , **__lowercase , ) -> np.ndarray:
return normalize(__lowercase , mean=__lowercase , std=__lowercase , data_format=__lowercase , **__lowercase )
def lowercase_ ( self , __lowercase , __lowercase = None , __lowercase = None , __lowercase=None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = ChannelDimension.FIRST , **__lowercase , ) -> PIL.Image.Image:
lowerCAmelCase_ : List[Any] = do_resize if do_resize is not None else self.do_resize
lowerCAmelCase_ : List[str] = size if size is not None else self.size
lowerCAmelCase_ : str = get_size_dict(__lowercase )
lowerCAmelCase_ : str = resample if resample is not None else self.resample
lowerCAmelCase_ : Tuple = do_rescale if do_rescale is not None else self.do_rescale
lowerCAmelCase_ : Union[str, Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCAmelCase_ : Any = do_normalize if do_normalize is not None else self.do_normalize
lowerCAmelCase_ : Union[str, Any] = image_mean if image_mean is not None else self.image_mean
lowerCAmelCase_ : Optional[int] = image_std if image_std is not None else self.image_std
lowerCAmelCase_ : Optional[Any] = apply_ocr if apply_ocr is not None else self.apply_ocr
lowerCAmelCase_ : Dict = ocr_lang if ocr_lang is not None else self.ocr_lang
lowerCAmelCase_ : int = tesseract_config if tesseract_config is not None else self.tesseract_config
lowerCAmelCase_ : int = make_list_of_images(__lowercase )
if not valid_images(__lowercase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''If do_normalize is True, image_mean and image_std must be specified.''' )
# All transformations expect numpy arrays.
lowerCAmelCase_ : List[Any] = [to_numpy_array(__lowercase ) for image in images]
# Tesseract OCR to get words + normalized bounding boxes
if apply_ocr:
requires_backends(self , '''pytesseract''' )
lowerCAmelCase_ : Union[str, Any] = []
lowerCAmelCase_ : Any = []
for image in images:
lowerCAmelCase_ : List[str] = apply_tesseract(__lowercase , __lowercase , __lowercase )
words_batch.append(__lowercase )
boxes_batch.append(__lowercase )
if do_resize:
lowerCAmelCase_ : Union[str, Any] = [self.resize(image=__lowercase , size=__lowercase , resample=__lowercase ) for image in images]
if do_rescale:
lowerCAmelCase_ : List[Any] = [self.rescale(image=__lowercase , scale=__lowercase ) for image in images]
if do_normalize:
lowerCAmelCase_ : List[str] = [self.normalize(image=__lowercase , mean=__lowercase , std=__lowercase ) for image in images]
lowerCAmelCase_ : Union[str, Any] = [to_channel_dimension_format(__lowercase , __lowercase ) for image in images]
lowerCAmelCase_ : Union[str, Any] = BatchFeature(data={'''pixel_values''': images} , tensor_type=__lowercase )
if apply_ocr:
lowerCAmelCase_ : str = words_batch
lowerCAmelCase_ : str = boxes_batch
return data | 720 |
from math import sqrt
def lowerCAmelCase ( lowerCAmelCase_ )-> bool:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (
number >= 0
), "'number' must been an int and positive"
lowerCAmelCase_ : str = True
# 0 and 1 are none primes.
if number <= 1:
lowerCAmelCase_ : List[Any] = False
for divisor in range(2 , int(round(sqrt(lowerCAmelCase_ ) ) ) + 1 ):
# if 'number' divisible by 'divisor' then sets 'status'
# of false and break up the loop.
if number % divisor == 0:
lowerCAmelCase_ : Any = False
break
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'status' must been from type bool"
return status
def lowerCAmelCase ( lowerCAmelCase_ )-> List[str]:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n > 2), "'N' must been an int and > 2"
# beginList: contains all natural numbers from 2 up to N
lowerCAmelCase_ : Optional[Any] = list(range(2 , n + 1 ) )
lowerCAmelCase_ : List[Any] = [] # this list will be returns.
# actual sieve of erathostenes
for i in range(len(lowerCAmelCase_ ) ):
for j in range(i + 1 , len(lowerCAmelCase_ ) ):
if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0):
lowerCAmelCase_ : Tuple = 0
# filters actual prime numbers.
lowerCAmelCase_ : List[str] = [x for x in begin_list if x != 0]
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'ans' must been from type list"
return ans
def lowerCAmelCase ( lowerCAmelCase_ )-> int:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n > 2), "'N' must been an int and > 2"
lowerCAmelCase_ : List[str] = []
# iterates over all numbers between 2 up to N+1
# if a number is prime then appends to list 'ans'
for number in range(2 , n + 1 ):
if is_prime(lowerCAmelCase_ ):
ans.append(lowerCAmelCase_ )
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'ans' must been from type list"
return ans
def lowerCAmelCase ( lowerCAmelCase_ )-> Optional[Any]:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and number >= 0, "'number' must been an int and >= 0"
lowerCAmelCase_ : Union[str, Any] = [] # this list will be returns of the function.
# potential prime number factors.
lowerCAmelCase_ : Any = 2
lowerCAmelCase_ : List[str] = number
if number == 0 or number == 1:
ans.append(lowerCAmelCase_ )
# if 'number' not prime then builds the prime factorization of 'number'
elif not is_prime(lowerCAmelCase_ ):
while quotient != 1:
if is_prime(lowerCAmelCase_ ) and (quotient % factor == 0):
ans.append(lowerCAmelCase_ )
quotient /= factor
else:
factor += 1
else:
ans.append(lowerCAmelCase_ )
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'ans' must been from type list"
return ans
def lowerCAmelCase ( lowerCAmelCase_ )-> List[Any]:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (
number >= 0
), "'number' bust been an int and >= 0"
lowerCAmelCase_ : Union[str, Any] = 0
# prime factorization of 'number'
lowerCAmelCase_ : Optional[int] = prime_factorization(lowerCAmelCase_ )
lowerCAmelCase_ : Dict = max(lowerCAmelCase_ )
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'ans' must been from type int"
return ans
def lowerCAmelCase ( lowerCAmelCase_ )-> str:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (
number >= 0
), "'number' bust been an int and >= 0"
lowerCAmelCase_ : Union[str, Any] = 0
# prime factorization of 'number'
lowerCAmelCase_ : int = prime_factorization(lowerCAmelCase_ )
lowerCAmelCase_ : Union[str, Any] = min(lowerCAmelCase_ )
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'ans' must been from type int"
return ans
def lowerCAmelCase ( lowerCAmelCase_ )-> Dict:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'number' must been an int"
assert isinstance(number % 2 == 0 , lowerCAmelCase_ ), "compare bust been from type bool"
return number % 2 == 0
def lowerCAmelCase ( lowerCAmelCase_ )-> List[str]:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'number' must been an int"
assert isinstance(number % 2 != 0 , lowerCAmelCase_ ), "compare bust been from type bool"
return number % 2 != 0
def lowerCAmelCase ( lowerCAmelCase_ )-> List[Any]:
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (number > 2) and is_even(lowerCAmelCase_ )
), "'number' must been an int, even and > 2"
lowerCAmelCase_ : Union[str, Any] = [] # this list will returned
# creates a list of prime numbers between 2 up to 'number'
lowerCAmelCase_ : List[Any] = get_prime_numbers(lowerCAmelCase_ )
lowerCAmelCase_ : Any = len(lowerCAmelCase_ )
# run variable for while-loops.
lowerCAmelCase_ : List[Any] = 0
lowerCAmelCase_ : List[Any] = None
# exit variable. for break up the loops
lowerCAmelCase_ : int = True
while i < len_pn and loop:
lowerCAmelCase_ : Tuple = i + 1
while j < len_pn and loop:
if prime_numbers[i] + prime_numbers[j] == number:
lowerCAmelCase_ : Union[str, Any] = False
ans.append(prime_numbers[i] )
ans.append(prime_numbers[j] )
j += 1
i += 1
# precondition
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and (len(lowerCAmelCase_ ) == 2)
and (ans[0] + ans[1] == number)
and is_prime(ans[0] )
and is_prime(ans[1] )
), "'ans' must contains two primes. And sum of elements must been eq 'number'"
return ans
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> Optional[int]:
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and (numbera >= 0)
and (numbera >= 0)
), "'number1' and 'number2' must been positive integer."
lowerCAmelCase_ : List[str] = 0
while numbera != 0:
lowerCAmelCase_ : int = numbera % numbera
lowerCAmelCase_ : Union[str, Any] = numbera
lowerCAmelCase_ : Tuple = rest
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (
numbera >= 0
), "'number' must been from type int and positive"
return numbera
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> Any:
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and (numbera >= 1)
and (numbera >= 1)
), "'number1' and 'number2' must been positive integer."
lowerCAmelCase_ : Dict = 1 # actual answer that will be return.
# for kgV (x,1)
if numbera > 1 and numbera > 1:
# builds the prime factorization of 'number1' and 'number2'
lowerCAmelCase_ : Tuple = prime_factorization(lowerCAmelCase_ )
lowerCAmelCase_ : str = prime_factorization(lowerCAmelCase_ )
elif numbera == 1 or numbera == 1:
lowerCAmelCase_ : List[Any] = []
lowerCAmelCase_ : List[Any] = []
lowerCAmelCase_ : str = max(lowerCAmelCase_ , lowerCAmelCase_ )
lowerCAmelCase_ : str = 0
lowerCAmelCase_ : List[str] = 0
lowerCAmelCase_ : Any = [] # captured numbers int both 'primeFac1' and 'primeFac2'
# iterates through primeFac1
for n in prime_fac_a:
if n not in done:
if n in prime_fac_a:
lowerCAmelCase_ : str = prime_fac_a.count(lowerCAmelCase_ )
lowerCAmelCase_ : List[str] = prime_fac_a.count(lowerCAmelCase_ )
for _ in range(max(lowerCAmelCase_ , lowerCAmelCase_ ) ):
ans *= n
else:
lowerCAmelCase_ : Dict = prime_fac_a.count(lowerCAmelCase_ )
for _ in range(lowerCAmelCase_ ):
ans *= n
done.append(lowerCAmelCase_ )
# iterates through primeFac2
for n in prime_fac_a:
if n not in done:
lowerCAmelCase_ : Optional[int] = prime_fac_a.count(lowerCAmelCase_ )
for _ in range(lowerCAmelCase_ ):
ans *= n
done.append(lowerCAmelCase_ )
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (
ans >= 0
), "'ans' must been from type int and positive"
return ans
def lowerCAmelCase ( lowerCAmelCase_ )-> Dict:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n >= 0), "'number' must been a positive int"
lowerCAmelCase_ : Optional[int] = 0
lowerCAmelCase_ : Optional[int] = 2 # this variable holds the answer
while index < n:
index += 1
ans += 1 # counts to the next number
# if ans not prime then
# runs to the next prime number.
while not is_prime(lowerCAmelCase_ ):
ans += 1
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and is_prime(
lowerCAmelCase_ ), "'ans' must been a prime number and from type int"
return ans
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> Dict:
assert (
is_prime(lowerCAmelCase_ ) and is_prime(lowerCAmelCase_ ) and (p_number_a < p_number_a)
), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'"
lowerCAmelCase_ : str = p_number_a + 1 # jump to the next number
lowerCAmelCase_ : Dict = [] # this list will be returns.
# if number is not prime then
# fetch the next prime number.
while not is_prime(lowerCAmelCase_ ):
number += 1
while number < p_number_a:
ans.append(lowerCAmelCase_ )
number += 1
# fetch the next prime number.
while not is_prime(lowerCAmelCase_ ):
number += 1
# precondition
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and ans[0] != p_number_a
and ans[len(lowerCAmelCase_ ) - 1] != p_number_a
), "'ans' must been a list without the arguments"
# 'ans' contains not 'pNumber1' and 'pNumber2' !
return ans
def lowerCAmelCase ( lowerCAmelCase_ )-> List[str]:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n >= 1), "'n' must been int and >= 1"
lowerCAmelCase_ : List[str] = [] # will be returned.
for divisor in range(1 , n + 1 ):
if n % divisor == 0:
ans.append(lowerCAmelCase_ )
# precondition
assert ans[0] == 1 and ans[len(lowerCAmelCase_ ) - 1] == n, "Error in function getDivisiors(...)"
return ans
def lowerCAmelCase ( lowerCAmelCase_ )-> List[Any]:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (
number > 1
), "'number' must been an int and >= 1"
lowerCAmelCase_ : Tuple = get_divisors(lowerCAmelCase_ )
# precondition
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and (divisors[0] == 1)
and (divisors[len(lowerCAmelCase_ ) - 1] == number)
), "Error in help-function getDivisiors(...)"
# summed all divisors up to 'number' (exclusive), hence [:-1]
return sum(divisors[:-1] ) == number
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> Any:
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and (denominator != 0)
), "The arguments must been from type int and 'denominator' != 0"
# build the greatest common divisor of numerator and denominator.
lowerCAmelCase_ : List[str] = gcd(abs(lowerCAmelCase_ ) , abs(lowerCAmelCase_ ) )
# precondition
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and (numerator % gcd_of_fraction == 0)
and (denominator % gcd_of_fraction == 0)
), "Error in function gcd(...,...)"
return (numerator // gcd_of_fraction, denominator // gcd_of_fraction)
def lowerCAmelCase ( lowerCAmelCase_ )-> List[Any]:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n >= 0), "'n' must been a int and >= 0"
lowerCAmelCase_ : Tuple = 1 # this will be return.
for factor in range(1 , n + 1 ):
ans *= factor
return ans
def lowerCAmelCase ( lowerCAmelCase_ )-> Tuple:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n >= 0), "'n' must been an int and >= 0"
lowerCAmelCase_ : Tuple = 0
lowerCAmelCase_ : Optional[Any] = 1
lowerCAmelCase_ : Tuple = 1 # this will be return
for _ in range(n - 1 ):
lowerCAmelCase_ : Any = ans
ans += fiba
lowerCAmelCase_ : Dict = tmp
return ans | 619 | 0 |
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
UNetaDConditionModel,
VideoToVideoSDPipeline,
)
from diffusers.utils import floats_tensor, is_xformers_available, skip_mps
from diffusers.utils.testing_utils import enable_full_determinism, slow, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class lowerCAmelCase__ ( _lowerCamelCase , unittest.TestCase ):
A_ : int = VideoToVideoSDPipeline
A_ : Optional[Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS.union({'video'} ) - {'image', 'width', 'height'}
A_ : List[str] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'video'} ) - {'image'}
A_ : Tuple = PipelineTesterMixin.required_optional_params - {'latents'}
A_ : Dict = False
# No `output_type`.
A_ : str = frozenset(
[
'num_inference_steps',
'generator',
'latents',
'return_dict',
'callback',
'callback_steps',
] )
def __UpperCamelCase ( self : Optional[int] ) -> List[Any]:
torch.manual_seed(0 )
A = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('CrossAttnDownBlock3D', 'CrossAttnDownBlock3D', 'CrossAttnDownBlock3D', 'DownBlock3D') , up_block_types=('UpBlock3D', 'CrossAttnUpBlock3D', 'CrossAttnUpBlock3D', 'CrossAttnUpBlock3D') , cross_attention_dim=32 , attention_head_dim=4 , )
A = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='scaled_linear' , clip_sample=__UpperCamelCase , set_alpha_to_one=__UpperCamelCase , )
torch.manual_seed(0 )
A = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
A = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act='gelu' , projection_dim=512 , )
A = CLIPTextModel(__UpperCamelCase )
A = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
A = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
}
return components
def __UpperCamelCase ( self : Optional[Any] , __UpperCamelCase : Any , __UpperCamelCase : str=0 ) -> Optional[int]:
# 3 frames
A = floats_tensor((1, 3, 3, 32, 32) , rng=random.Random(__UpperCamelCase ) ).to(__UpperCamelCase )
if str(__UpperCamelCase ).startswith('mps' ):
A = torch.manual_seed(__UpperCamelCase )
else:
A = torch.Generator(device=__UpperCamelCase ).manual_seed(__UpperCamelCase )
A = {
'prompt': 'A painting of a squirrel eating a burger',
'video': video,
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'pt',
}
return inputs
def __UpperCamelCase ( self : Optional[int] ) -> str:
A = 'cpu' # ensure determinism for the device-dependent torch.Generator
A = self.get_dummy_components()
A = VideoToVideoSDPipeline(**__UpperCamelCase )
A = sd_pipe.to(__UpperCamelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCamelCase )
A = self.get_dummy_inputs(__UpperCamelCase )
A = 'np'
A = sd_pipe(**__UpperCamelCase ).frames
A = frames[0][-3:, -3:, -1]
assert frames[0].shape == (32, 32, 3)
A = np.array([106, 117, 113, 174, 137, 112, 148, 151, 131] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def __UpperCamelCase ( self : int ) -> Tuple:
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=__UpperCamelCase , expected_max_diff=5e-3 )
@unittest.skip(reason='Batching needs to be properly figured out first for this pipeline.' )
def __UpperCamelCase ( self : Optional[int] ) -> Any:
pass
@unittest.skip(reason='Batching needs to be properly figured out first for this pipeline.' )
def __UpperCamelCase ( self : str ) -> Any:
pass
@unittest.skip(reason='`num_images_per_prompt` argument is not supported for this pipeline.' )
def __UpperCamelCase ( self : Union[str, Any] ) -> Optional[Any]:
pass
def __UpperCamelCase ( self : Any ) -> Any:
return super().test_progress_bar()
@slow
@skip_mps
class lowerCAmelCase__ ( unittest.TestCase ):
def __UpperCamelCase ( self : Optional[Any] ) -> Dict:
A = VideoToVideoSDPipeline.from_pretrained('cerspense/zeroscope_v2_XL' , torch_dtype=torch.floataa )
pipe.enable_model_cpu_offload()
# 10 frames
A = torch.Generator(device='cpu' ).manual_seed(0 )
A = torch.randn((1, 10, 3, 1_024, 576) , generator=__UpperCamelCase )
A = video.to('cuda' )
A = 'Spiderman is surfing'
A = pipe(__UpperCamelCase , video=__UpperCamelCase , generator=__UpperCamelCase , num_inference_steps=3 , output_type='pt' ).frames
A = np.array([-1.0_4_5_8_9_8_4, -1.1_2_7_9_2_9_7, -0.9_6_6_3_0_8_6, -0.9_1_5_0_3_9_0_6, -0.7_5_0_9_7_6_5_6] )
assert np.abs(video_frames.cpu().numpy()[0, 0, 0, 0, -5:] - expected_array ).sum() < 1e-2 | 106 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ : List[str] = logging.get_logger(__name__)
UpperCAmelCase_ : str = {
'''microsoft/unispeech-large-1500h-cv''': (
'''https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json'''
),
# See all UniSpeech models at https://huggingface.co/models?filter=unispeech
}
class lowerCamelCase_ ( _lowercase ):
_lowercase : Dict = '''unispeech'''
def __init__( self : str , __A : Tuple=32 , __A : List[str]=768 , __A : Dict=12 , __A : Union[str, Any]=12 , __A : Tuple=3072 , __A : Any="gelu" , __A : int=0.1 , __A : Optional[int]=0.1 , __A : List[Any]=0.1 , __A : Any=0.0 , __A : List[str]=0.0 , __A : int=0.1 , __A : List[Any]=0.1 , __A : List[str]=0.0_2 , __A : List[str]=1e-5 , __A : List[Any]="group" , __A : int="gelu" , __A : Any=(512, 512, 512, 512, 512, 512, 512) , __A : Union[str, Any]=(5, 2, 2, 2, 2, 2, 2) , __A : Tuple=(10, 3, 3, 3, 3, 2, 2) , __A : Optional[int]=False , __A : Any=128 , __A : Union[str, Any]=16 , __A : Optional[Any]=False , __A : str=True , __A : Dict=0.0_5 , __A : Optional[Any]=10 , __A : Dict=2 , __A : int=0.0 , __A : List[str]=10 , __A : str=0 , __A : List[str]=320 , __A : List[Any]=2 , __A : Tuple=0.1 , __A : Optional[int]=100 , __A : Any=256 , __A : Dict=256 , __A : Tuple=0.1 , __A : List[str]="mean" , __A : int=False , __A : List[str]=False , __A : List[Any]=256 , __A : str=80 , __A : Tuple=0 , __A : Tuple=1 , __A : int=2 , __A : Dict=0.5 , **__A : List[Any] , ):
super().__init__(**__A , pad_token_id=__A , bos_token_id=__A , eos_token_id=__A )
__A : Dict = hidden_size
__A : Optional[Any] = feat_extract_norm
__A : List[Any] = feat_extract_activation
__A : str = list(__A )
__A : Optional[Any] = list(__A )
__A : Optional[int] = list(__A )
__A : List[Any] = conv_bias
__A : Optional[int] = num_conv_pos_embeddings
__A : List[Any] = num_conv_pos_embedding_groups
__A : int = len(self.conv_dim )
__A : Optional[Any] = num_hidden_layers
__A : List[str] = intermediate_size
__A : Union[str, Any] = hidden_act
__A : Optional[int] = num_attention_heads
__A : Tuple = hidden_dropout
__A : Optional[Any] = attention_dropout
__A : Union[str, Any] = activation_dropout
__A : Dict = feat_proj_dropout
__A : Optional[int] = final_dropout
__A : Dict = layerdrop
__A : Optional[int] = layer_norm_eps
__A : Optional[Any] = initializer_range
__A : Optional[int] = num_ctc_classes
__A : Dict = vocab_size
__A : List[str] = do_stable_layer_norm
__A : Tuple = use_weighted_layer_sum
__A : Any = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="""
""" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="""
F""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"""
F""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__A : Tuple = apply_spec_augment
__A : Union[str, Any] = mask_time_prob
__A : Optional[Any] = mask_time_length
__A : List[Any] = mask_time_min_masks
__A : List[Any] = mask_feature_prob
__A : Any = mask_feature_length
__A : List[Any] = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
__A : Any = num_codevectors_per_group
__A : Tuple = num_codevector_groups
__A : List[str] = contrastive_logits_temperature
__A : Optional[int] = feat_quantizer_dropout
__A : int = num_negatives
__A : List[str] = codevector_dim
__A : int = proj_codevector_dim
__A : Union[str, Any] = diversity_loss_weight
# ctc loss
__A : List[str] = ctc_loss_reduction
__A : Any = ctc_zero_infinity
# pretraining loss
__A : Union[str, Any] = replace_prob
@property
def lowerCAmelCase_ ( self : int ):
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 17 | 0 |
'''simple docstring'''
def A_ ( SCREAMING_SNAKE_CASE_ = 1_00 ) ->int:
lowercase_ = set()
lowercase_ = 0
lowercase_ = n + 1 # maximum limit
for a in range(2 , SCREAMING_SNAKE_CASE_ ):
for b in range(2 , SCREAMING_SNAKE_CASE_ ):
lowercase_ = a**b # calculates the current power
collect_powers.add(SCREAMING_SNAKE_CASE_ ) # adds the result to the set
return len(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
print("""Number of terms """, solution(int(str(input()).strip())))
| 603 | '''simple docstring'''
def A_ ( SCREAMING_SNAKE_CASE_ ) ->float:
return 10 - x * x
def A_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ->float:
# Bolzano theory in order to find if there is a root between a and b
if equation(SCREAMING_SNAKE_CASE_ ) * equation(SCREAMING_SNAKE_CASE_ ) >= 0:
raise ValueError("""Wrong space!""" )
lowercase_ = a
while (b - a) >= 0.01:
# Find middle point
lowercase_ = (a + b) / 2
# Check if middle point is root
if equation(SCREAMING_SNAKE_CASE_ ) == 0.0:
break
# Decide the side to repeat the steps
if equation(SCREAMING_SNAKE_CASE_ ) * equation(SCREAMING_SNAKE_CASE_ ) < 0:
lowercase_ = c
else:
lowercase_ = c
return c
if __name__ == "__main__":
import doctest
doctest.testmod()
print(bisection(-2, 5))
print(bisection(0, 6))
| 603 | 1 |
"""simple docstring"""
import re
import tempfile
from pathlib import Path
import pytest
import yaml
from datasets.utils.readme import ReadMe
# @pytest.fixture
# def example_yaml_structure():
lowercase = yaml.safe_load(
'''\\nname: ""\nallow_empty: false\nallow_empty_text: true\nsubsections:\n - name: "Dataset Card for X" # First-level markdown heading\n allow_empty: false\n allow_empty_text: true\n subsections:\n - name: "Table of Contents"\n allow_empty: false\n allow_empty_text: false\n subsections: null\n - name: "Dataset Description"\n allow_empty: false\n allow_empty_text: false\n subsections:\n - name: "Dataset Summary"\n allow_empty: false\n allow_empty_text: false\n subsections: null\n - name: "Supported Tasks and Leaderboards"\n allow_empty: true\n allow_empty_text: true\n subsections: null\n - name: Languages\n allow_empty: false\n allow_empty_text: true\n subsections: null\n'''
)
lowercase = {
'name': 'root',
'text': '',
'is_empty_text': True,
'subsections': [
{
'name': 'Dataset Card for My Dataset',
'text': '',
'is_empty_text': True,
'subsections': [
{'name': 'Table of Contents', 'text': 'Some text here.', 'is_empty_text': False, 'subsections': []},
{
'name': 'Dataset Description',
'text': 'Some text here.',
'is_empty_text': False,
'subsections': [
{
'name': 'Dataset Summary',
'text': 'Some text here.',
'is_empty_text': False,
'subsections': [],
},
{
'name': 'Supported Tasks and Leaderboards',
'text': '',
'is_empty_text': True,
'subsections': [],
},
{'name': 'Languages', 'text': 'Language Text', 'is_empty_text': False, 'subsections': []},
],
},
],
}
],
}
lowercase = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n'
lowercase = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n#### Extra Ignored Subsection\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n'
lowercase = {
'name': 'root',
'text': '',
'is_empty_text': True,
'subsections': [
{
'name': 'Dataset Card for My Dataset',
'text': '',
'is_empty_text': True,
'subsections': [
{'name': 'Table of Contents', 'text': 'Some text here.', 'is_empty_text': False, 'subsections': []},
{
'name': 'Dataset Description',
'text': 'Some text here.',
'is_empty_text': False,
'subsections': [
{
'name': 'Dataset Summary',
'text': 'Some text here.',
'is_empty_text': False,
'subsections': [
{
'name': 'Extra Ignored Subsection',
'text': '',
'is_empty_text': True,
'subsections': [],
}
],
},
{
'name': 'Supported Tasks and Leaderboards',
'text': '',
'is_empty_text': True,
'subsections': [],
},
{'name': 'Languages', 'text': 'Language Text', 'is_empty_text': False, 'subsections': []},
],
},
],
}
],
}
lowercase = '\\n---\n---\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n'
lowercase = (
'The following issues were found for the README at `{path}`:\n-\tEmpty YAML markers are present in the README.'
)
lowercase = '\\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n'
lowercase = (
'The following issues were found for the README at `{path}`:\n-\tNo YAML markers are present in the README.'
)
lowercase = '\\n---\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n'
lowercase = 'The following issues were found for the README at `{path}`:\n-\tOnly the start of YAML tags present in the README.'
lowercase = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n'
lowercase = 'The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Summary` but it is empty.\n-\tExpected some text in section `Dataset Summary` but it is empty (text in subsections are ignored).'
lowercase = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n'
lowercase = 'The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Card for My Dataset` but it is empty.\n-\tSection `Dataset Card for My Dataset` expected the following subsections: `Table of Contents`, `Dataset Description`. Found \'None\'.'
lowercase = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Languages\nLanguage Text\n'
lowercase = 'The following issues were found for the README at `{path}`:\n-\tSection `Dataset Description` is missing subsection: `Supported Tasks and Leaderboards`.'
lowercase = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\n'
lowercase = 'The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Languages` but it is empty.'
lowercase = '\\n---\nlanguage:\n- zh\n- en\n---\n\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n'
lowercase = 'The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.'
lowercase = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n# Dataset Card My Dataset\n'
lowercase = 'The following issues were found for the README at `{path}`:\n-\tThe README has several first-level headings: `Dataset Card for My Dataset`, `Dataset Card My Dataset`. Only one heading is expected. Skipping further validation for this README.'
lowercase = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n'
lowercase = 'The following issues were found for the README at `{path}`:\n-\tNo first-level heading starting with `Dataset Card for` found in README. Skipping further validation for this README.'
lowercase = ''
lowercase = 'The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.\n-\tNo YAML markers are present in the README.'
lowercase = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n'
lowercase = 'The following issues were found while parsing the README at `{path}`:\n-\tMultiple sections with the same heading `Dataset Card for My Dataset` have been found. Please keep only one of these sections.'
@pytest.mark.parametrize(
'readme_md, expected_dict' , [
(README_CORRECT, CORRECT_DICT),
(README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL),
] , )
def UpperCAmelCase ( A : int , A : int ):
'''simple docstring'''
assert ReadMe.from_string(__A , __A ).to_dict() == expected_dict
@pytest.mark.parametrize(
'readme_md, expected_error' , [
(README_NO_YAML, EXPECTED_ERROR_README_NO_YAML),
(README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML),
(README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML),
(README_EMPTY, EXPECTED_ERROR_README_EMPTY),
(README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION),
(README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL),
(README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION),
(README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT),
(README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL),
(README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL),
(README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT),
] , )
def UpperCAmelCase ( A : Tuple , A : int ):
'''simple docstring'''
with pytest.raises(__A , match=re.escape(expected_error.format(path='root' ) ) ):
_UpperCAmelCase = ReadMe.from_string(__A , __A )
readme.validate()
@pytest.mark.parametrize(
'readme_md, expected_error' , [
(README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1),
] , )
def UpperCAmelCase ( A : int , A : Any ):
'''simple docstring'''
with pytest.raises(__A , match=re.escape(expected_error.format(path='root' ) ) ):
ReadMe.from_string(__A , __A )
@pytest.mark.parametrize(
'readme_md,' , [
(README_MULTIPLE_SAME_HEADING_1),
] , )
def UpperCAmelCase ( A : List[str] ):
'''simple docstring'''
ReadMe.from_string(__A , __A , suppress_parsing_errors=__A )
@pytest.mark.parametrize(
'readme_md, expected_dict' , [
(README_CORRECT, CORRECT_DICT),
(README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL),
] , )
def UpperCAmelCase ( A : str , A : Dict ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
_UpperCAmelCase = Path(__A ) / 'README.md'
with open(__A , 'w+' ) as readme_file:
readme_file.write(__A )
_UpperCAmelCase = ReadMe.from_readme(__A , __A ).to_dict()
assert out["name"] == path
assert out["text"] == ""
assert out["is_empty_text"]
assert out["subsections"] == expected_dict["subsections"]
@pytest.mark.parametrize(
'readme_md, expected_error' , [
(README_NO_YAML, EXPECTED_ERROR_README_NO_YAML),
(README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML),
(README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML),
(README_EMPTY, EXPECTED_ERROR_README_EMPTY),
(README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION),
(README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL),
(README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION),
(README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT),
(README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL),
(README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL),
(README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT),
] , )
def UpperCAmelCase ( A : str , A : List[Any] ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
_UpperCAmelCase = Path(__A ) / 'README.md'
with open(__A , 'w+' ) as readme_file:
readme_file.write(__A )
_UpperCAmelCase = expected_error.format(path=__A )
with pytest.raises(__A , match=re.escape(__A ) ):
_UpperCAmelCase = ReadMe.from_readme(__A , __A )
readme.validate()
@pytest.mark.parametrize(
'readme_md, expected_error' , [
(README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1),
] , )
def UpperCAmelCase ( A : Dict , A : Union[str, Any] ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
_UpperCAmelCase = Path(__A ) / 'README.md'
with open(__A , 'w+' ) as readme_file:
readme_file.write(__A )
_UpperCAmelCase = expected_error.format(path=__A )
with pytest.raises(__A , match=re.escape(__A ) ):
ReadMe.from_readme(__A , __A )
@pytest.mark.parametrize(
'readme_md,' , [
(README_MULTIPLE_SAME_HEADING_1),
] , )
def UpperCAmelCase ( A : List[Any] ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
_UpperCAmelCase = Path(__A ) / 'README.md'
with open(__A , 'w+' ) as readme_file:
readme_file.write(__A )
ReadMe.from_readme(__A , __A , suppress_parsing_errors=__A )
| 573 |
"""simple docstring"""
import argparse
import json
from tqdm import tqdm
def __snake_case ( ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--src_path' , type=__A , default='biencoder-nq-dev.json' , help='Path to raw DPR training data' , )
parser.add_argument(
'--evaluation_set' , type=__A , help='where to store parsed evaluation_set file' , )
parser.add_argument(
'--gold_data_path' , type=__A , help='where to store parsed gold_data_path file' , )
SCREAMING_SNAKE_CASE : Tuple = parser.parse_args()
with open(args.src_path , 'r' ) as src_file, open(args.evaluation_set , 'w' ) as eval_file, open(
args.gold_data_path , 'w' ) as gold_file:
SCREAMING_SNAKE_CASE : Tuple = json.load(__A )
for dpr_record in tqdm(__A ):
SCREAMING_SNAKE_CASE : Tuple = dpr_record['question']
SCREAMING_SNAKE_CASE : Optional[int] = [context['title'] for context in dpr_record['positive_ctxs']]
eval_file.write(question + '\n' )
gold_file.write('\t'.join(__A ) + '\n' )
if __name__ == "__main__":
main()
| 265 | 0 |
import os
import unittest
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
BertTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class A_ ( a_ , unittest.TestCase ):
_SCREAMING_SNAKE_CASE = BertTokenizer
_SCREAMING_SNAKE_CASE = BertTokenizerFast
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = filter_non_english
def _UpperCAmelCase ( self : str ):
super().setUp()
__a = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
__a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def _UpperCAmelCase ( self : Any , __SCREAMING_SNAKE_CASE : List[str] ):
__a = "UNwant\u00E9d,running"
__a = "unwanted, running"
return input_text, output_text
def _UpperCAmelCase ( self : Tuple ):
__a = self.tokenizer_class(self.vocab_file )
__a = tokenizer.tokenize("UNwant\u00E9d,running" )
self.assertListEqual(__SCREAMING_SNAKE_CASE , ["un", "##want", "##ed", ",", "runn", "##ing"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE ) , [9, 6, 7, 12, 10, 11] )
def _UpperCAmelCase ( self : List[str] ):
if not self.test_rust_tokenizer:
return
__a = self.get_tokenizer()
__a = self.get_rust_tokenizer()
__a = "UNwant\u00E9d,running"
__a = tokenizer.tokenize(__SCREAMING_SNAKE_CASE )
__a = rust_tokenizer.tokenize(__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__a = tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
__a = rust_tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__a = self.get_rust_tokenizer()
__a = tokenizer.encode(__SCREAMING_SNAKE_CASE )
__a = rust_tokenizer.encode(__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# With lower casing
__a = self.get_tokenizer(do_lower_case=__SCREAMING_SNAKE_CASE )
__a = self.get_rust_tokenizer(do_lower_case=__SCREAMING_SNAKE_CASE )
__a = "UNwant\u00E9d,running"
__a = tokenizer.tokenize(__SCREAMING_SNAKE_CASE )
__a = rust_tokenizer.tokenize(__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__a = tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
__a = rust_tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__a = self.get_rust_tokenizer()
__a = tokenizer.encode(__SCREAMING_SNAKE_CASE )
__a = rust_tokenizer.encode(__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( self : int ):
__a = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize("ah\u535A\u63A8zz" ) , ["ah", "\u535A", "\u63A8", "zz"] )
def _UpperCAmelCase ( self : List[str] ):
__a = BasicTokenizer(do_lower_case=__SCREAMING_SNAKE_CASE )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["hello", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def _UpperCAmelCase ( self : List[str] ):
__a = BasicTokenizer(do_lower_case=__SCREAMING_SNAKE_CASE , strip_accents=__SCREAMING_SNAKE_CASE )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hällo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["h\u00E9llo"] )
def _UpperCAmelCase ( self : Optional[int] ):
__a = BasicTokenizer(do_lower_case=__SCREAMING_SNAKE_CASE , strip_accents=__SCREAMING_SNAKE_CASE )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def _UpperCAmelCase ( self : List[Any] ):
__a = BasicTokenizer(do_lower_case=__SCREAMING_SNAKE_CASE )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def _UpperCAmelCase ( self : List[Any] ):
__a = BasicTokenizer(do_lower_case=__SCREAMING_SNAKE_CASE )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["HeLLo", "!", "how", "Are", "yoU", "?"] )
def _UpperCAmelCase ( self : Optional[int] ):
__a = BasicTokenizer(do_lower_case=__SCREAMING_SNAKE_CASE , strip_accents=__SCREAMING_SNAKE_CASE )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HäLLo", "!", "how", "Are", "yoU", "?"] )
def _UpperCAmelCase ( self : List[Any] ):
__a = BasicTokenizer(do_lower_case=__SCREAMING_SNAKE_CASE , strip_accents=__SCREAMING_SNAKE_CASE )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HaLLo", "!", "how", "Are", "yoU", "?"] )
def _UpperCAmelCase ( self : Optional[int] ):
__a = BasicTokenizer(do_lower_case=__SCREAMING_SNAKE_CASE , never_split=["[UNK]"] )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? [UNK]" ) , ["HeLLo", "!", "how", "Are", "yoU", "?", "[UNK]"] )
def _UpperCAmelCase ( self : Any ):
__a = BasicTokenizer()
__a = "a\n'll !!to?'d of, can't."
__a = ["a", "'", "ll", "!", "!", "to", "?", "'", "d", "of", ",", "can", "'", "t", "."]
self.assertListEqual(tokenizer.tokenize(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( self : Dict ):
__a = ["[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing"]
__a = {}
for i, token in enumerate(__SCREAMING_SNAKE_CASE ):
__a = i
__a = WordpieceTokenizer(vocab=__SCREAMING_SNAKE_CASE , unk_token="[UNK]" )
self.assertListEqual(tokenizer.tokenize("" ) , [] )
self.assertListEqual(tokenizer.tokenize("unwanted running" ) , ["un", "##want", "##ed", "runn", "##ing"] )
self.assertListEqual(tokenizer.tokenize("unwantedX running" ) , ["[UNK]", "runn", "##ing"] )
def _UpperCAmelCase ( self : Any ):
self.assertTrue(_is_whitespace(" " ) )
self.assertTrue(_is_whitespace("\t" ) )
self.assertTrue(_is_whitespace("\r" ) )
self.assertTrue(_is_whitespace("\n" ) )
self.assertTrue(_is_whitespace("\u00A0" ) )
self.assertFalse(_is_whitespace("A" ) )
self.assertFalse(_is_whitespace("-" ) )
def _UpperCAmelCase ( self : Tuple ):
self.assertTrue(_is_control("\u0005" ) )
self.assertFalse(_is_control("A" ) )
self.assertFalse(_is_control(" " ) )
self.assertFalse(_is_control("\t" ) )
self.assertFalse(_is_control("\r" ) )
def _UpperCAmelCase ( self : str ):
self.assertTrue(_is_punctuation("-" ) )
self.assertTrue(_is_punctuation("$" ) )
self.assertTrue(_is_punctuation("`" ) )
self.assertTrue(_is_punctuation("." ) )
self.assertFalse(_is_punctuation("A" ) )
self.assertFalse(_is_punctuation(" " ) )
def _UpperCAmelCase ( self : int ):
__a = self.get_tokenizer()
__a = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(__SCREAMING_SNAKE_CASE ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] )
self.assertListEqual(
[rust_tokenizer.tokenize(__SCREAMING_SNAKE_CASE ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] )
@slow
def _UpperCAmelCase ( self : str ):
__a = self.tokenizer_class.from_pretrained("bert-base-uncased" )
__a = tokenizer.encode("sequence builders" , add_special_tokens=__SCREAMING_SNAKE_CASE )
__a = tokenizer.encode("multi-sequence build" , add_special_tokens=__SCREAMING_SNAKE_CASE )
__a = tokenizer.build_inputs_with_special_tokens(__SCREAMING_SNAKE_CASE )
__a = tokenizer.build_inputs_with_special_tokens(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
assert encoded_sentence == [1_01] + text + [1_02]
assert encoded_pair == [1_01] + text + [1_02] + text_a + [1_02]
def _UpperCAmelCase ( self : str ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
__a = self.rust_tokenizer_class.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
__a = f"""A, naïve {tokenizer_r.mask_token} AllenNLP sentence."""
__a = tokenizer_r.encode_plus(
__SCREAMING_SNAKE_CASE , return_attention_mask=__SCREAMING_SNAKE_CASE , return_token_type_ids=__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , )
__a = tokenizer_r.do_lower_case if hasattr(__SCREAMING_SNAKE_CASE , "do_lower_case" ) else False
__a = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), "A"),
((1, 2), ","),
((3, 5), "na"),
((5, 6), "##ï"),
((6, 8), "##ve"),
((9, 15), tokenizer_r.mask_token),
((16, 21), "Allen"),
((21, 23), "##NL"),
((23, 24), "##P"),
((25, 33), "sentence"),
((33, 34), "."),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), "a"),
((1, 2), ","),
((3, 8), "naive"),
((9, 15), tokenizer_r.mask_token),
((16, 21), "allen"),
((21, 23), "##nl"),
((23, 24), "##p"),
((25, 33), "sentence"),
((33, 34), "."),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["input_ids"] ) )
self.assertEqual([e[0] for e in expected_results] , tokens["offset_mapping"] )
def _UpperCAmelCase ( self : Union[str, Any] ):
__a = ["的", "人", "有"]
__a = "".join(__SCREAMING_SNAKE_CASE )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
__a = True
__a = self.tokenizer_class.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
__a = self.rust_tokenizer_class.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
__a = tokenizer_p.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
__a = tokenizer_r.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
__a = tokenizer_r.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE )
__a = tokenizer_p.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__a = False
__a = self.rust_tokenizer_class.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
__a = self.tokenizer_class.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
__a = tokenizer_r.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
__a = tokenizer_p.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
__a = tokenizer_r.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE )
__a = tokenizer_p.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE )
# it is expected that only the first Chinese character is not preceded by "##".
__a = [
f"""##{token}""" if idx != 0 else token for idx, token in enumerate(__SCREAMING_SNAKE_CASE )
]
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
| 715 | from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(""">=""", """4.25.0""")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 525 | 0 |
import comet # From: unbabel-comet
import torch
import datasets
SCREAMING_SNAKE_CASE__ : Optional[int] = datasets.logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : List[str] = """\
@inproceedings{rei-EtAl:2020:WMT,
author = {Rei, Ricardo and Stewart, Craig and Farinha, Ana C and Lavie, Alon},
title = {Unbabel's Participation in the WMT20 Metrics Shared Task},
booktitle = {Proceedings of the Fifth Conference on Machine Translation},
month = {November},
year = {2020},
address = {Online},
publisher = {Association for Computational Linguistics},
pages = {909--918},
}
@inproceedings{rei-etal-2020-comet,
title = \"{COMET}: A Neural Framework for {MT} Evaluation\",
author = \"Rei, Ricardo and
Stewart, Craig and
Farinha, Ana C and
Lavie, Alon\",
booktitle = \"Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)\",
month = nov,
year = \"2020\",
address = \"Online\",
publisher = \"Association for Computational Linguistics\",
url = \"https://www.aclweb.org/anthology/2020.emnlp-main.213\",
pages = \"2685--2702\",
}
"""
SCREAMING_SNAKE_CASE__ : List[Any] = """\
Crosslingual Optimized Metric for Evaluation of Translation (COMET) is an open-source framework used to train Machine Translation metrics that achieve high levels of correlation with different types of human judgments (HTER, DA's or MQM).
With the release of the framework the authors also released fully trained models that were used to compete in the WMT20 Metrics Shared Task achieving SOTA in that years competition.
See the [README.md] file at https://unbabel.github.io/COMET/html/models.html for more information.
"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = """
COMET score.
Args:
`sources` (list of str): Source sentences
`predictions` (list of str): candidate translations
`references` (list of str): reference translations
`cuda` (bool): If set to True, runs COMET using GPU
`show_progress` (bool): Shows progress
`model`: COMET model to be used. Will default to `wmt-large-da-estimator-1719` if None.
Returns:
`samples`: List of dictionaries with `src`, `mt`, `ref` and `score`.
`scores`: List of scores.
Examples:
>>> comet_metric = datasets.load_metric('comet')
>>> # comet_metric = load_metric('comet', 'wmt20-comet-da') # you can also choose which model to use
>>> source = [\"Dem Feuer konnte Einhalt geboten werden\", \"Schulen und Kindergärten wurden eröffnet.\"]
>>> hypothesis = [\"The fire could be stopped\", \"Schools and kindergartens were open\"]
>>> reference = [\"They were able to control the fire.\", \"Schools and kindergartens opened\"]
>>> results = comet_metric.compute(predictions=hypothesis, references=reference, sources=source)
>>> print([round(v, 2) for v in results[\"scores\"]])
[0.19, 0.92]
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCamelCase_ ( datasets.Metric ):
def A ( self ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''https://unbabel.github.io/COMET/html/index.html''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''sources''': datasets.Value('''string''' , id='''sequence''' ),
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/Unbabel/COMET'''] , reference_urls=[
'''https://github.com/Unbabel/COMET''',
'''https://www.aclweb.org/anthology/2020.emnlp-main.213/''',
'''http://www.statmt.org/wmt20/pdf/2020.wmt-1.101.pdf6''',
] , )
def A ( self , __lowerCAmelCase ):
"""simple docstring"""
if self.config_name == "default":
__magic_name__ :str = comet.load_from_checkpoint(comet.download_model('''wmt20-comet-da''' ) )
else:
__magic_name__ :Dict = comet.load_from_checkpoint(comet.download_model(self.config_name ) )
def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=False ):
"""simple docstring"""
if gpus is None:
__magic_name__ :Optional[Any] = 1 if torch.cuda.is_available() else 0
__magic_name__ :List[Any] = {'''src''': sources, '''mt''': predictions, '''ref''': references}
__magic_name__ :List[str] = [dict(zip(__lowerCAmelCase , __lowerCAmelCase ) ) for t in zip(*data.values() )]
__magic_name__ , __magic_name__ :Optional[Any] = self.scorer.predict(__lowerCAmelCase , gpus=__lowerCAmelCase , progress_bar=__lowerCAmelCase )
return {"mean_score": mean_score, "scores": scores}
| 0 |
"""simple docstring"""
def UpperCAmelCase_ ( __a : list ):
'''simple docstring'''
if len(__a ) <= 1:
return lst
_lowerCamelCase : str = 1
while i < len(__a ):
if lst[i - 1] <= lst[i]:
i += 1
else:
_lowerCamelCase , _lowerCamelCase : Optional[Any] = lst[i], lst[i - 1]
i -= 1
if i == 0:
_lowerCamelCase : Dict = 1
return lst
if __name__ == "__main__":
a_ = input("""Enter numbers separated by a comma:\n""").strip()
a_ = [int(item) for item in user_input.split(""",""")]
print(gnome_sort(unsorted))
| 437 | 0 |
"""simple docstring"""
import argparse
from pathlib import Path
import fairseq
import torch
from fairseq.models.xmod import XMODModel as FairseqXmodModel
from packaging import version
from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse('0.12.2'):
raise Exception('requires fairseq >= 0.12.2')
if version.parse(fairseq.__version__) > version.parse('2'):
raise Exception('requires fairseq < v2')
logging.set_verbosity_info()
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = 'Hello, World!'
_UpperCamelCase = 'en_XX'
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : bool ):
'''simple docstring'''
__lowerCamelCase : Tuple =Path('''data_bin''' )
__lowerCamelCase : List[str] =FairseqXmodModel.from_pretrained(
model_name_or_path=str(Path(SCREAMING_SNAKE_CASE ).parent ) , checkpoint_file=Path(SCREAMING_SNAKE_CASE ).name , _name='''xmod_base''' , arch='''xmod_base''' , task='''multilingual_masked_lm''' , data_name_or_path=str(SCREAMING_SNAKE_CASE ) , bpe='''sentencepiece''' , sentencepiece_model=str(Path(SCREAMING_SNAKE_CASE ).parent / '''sentencepiece.bpe.model''' ) , src_dict=str(data_dir / '''dict.txt''' ) , )
xmod.eval() # disable dropout
print(SCREAMING_SNAKE_CASE )
__lowerCamelCase : Union[str, Any] =xmod.model.encoder.sentence_encoder
__lowerCamelCase : List[str] =XmodConfig(
vocab_size=xmod_sent_encoder.embed_tokens.num_embeddings , hidden_size=xmod.cfg.model.encoder_embed_dim , num_hidden_layers=xmod.cfg.model.encoder_layers , num_attention_heads=xmod.cfg.model.encoder_attention_heads , intermediate_size=xmod.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=514 , type_vocab_size=1 , layer_norm_eps=1E-5 , pre_norm=xmod.cfg.model.encoder_normalize_before , adapter_reduction_factor=getattr(xmod.cfg.model , '''bottleneck''' , 2 ) , adapter_layer_norm=xmod.cfg.model.adapter_layer_norm , adapter_reuse_layer_norm=xmod.cfg.model.adapter_reuse_layer_norm , ln_before_adapter=xmod.cfg.model.ln_before_adapter , languages=xmod.cfg.model.languages , )
if classification_head:
__lowerCamelCase : str =xmod.model.classification_heads['''mnli'''].out_proj.weight.shape[0]
print('''Our X-MOD config:''' , SCREAMING_SNAKE_CASE )
__lowerCamelCase : Optional[Any] =XmodForSequenceClassification(SCREAMING_SNAKE_CASE ) if classification_head else XmodForMaskedLM(SCREAMING_SNAKE_CASE )
model.eval()
# Now let's copy all the weights.
# Embeddings
__lowerCamelCase : Dict =xmod_sent_encoder.embed_tokens.weight
__lowerCamelCase : Optional[Any] =xmod_sent_encoder.embed_positions.weight
__lowerCamelCase : Union[str, Any] =torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c xmod doesn't use them.
__lowerCamelCase : Optional[Any] =xmod_sent_encoder.layernorm_embedding.weight
__lowerCamelCase : Any =xmod_sent_encoder.layernorm_embedding.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
__lowerCamelCase : Optional[Any] =model.roberta.encoder.layer[i]
__lowerCamelCase : Optional[Any] =xmod_sent_encoder.layers[i]
# self attention
__lowerCamelCase : int =layer.attention.self
if not (
xmod_layer.self_attn.k_proj.weight.data.shape
== xmod_layer.self_attn.q_proj.weight.data.shape
== xmod_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
):
raise AssertionError('''Dimensions of self-attention weights do not match.''' )
__lowerCamelCase : Optional[Any] =xmod_layer.self_attn.q_proj.weight
__lowerCamelCase : List[Any] =xmod_layer.self_attn.q_proj.bias
__lowerCamelCase : str =xmod_layer.self_attn.k_proj.weight
__lowerCamelCase : List[Any] =xmod_layer.self_attn.k_proj.bias
__lowerCamelCase : Optional[int] =xmod_layer.self_attn.v_proj.weight
__lowerCamelCase : List[Any] =xmod_layer.self_attn.v_proj.bias
# self-attention output
__lowerCamelCase : Optional[Any] =layer.attention.output
if self_output.dense.weight.shape != xmod_layer.self_attn.out_proj.weight.shape:
raise AssertionError('''Dimensions of self-attention output weights do not match.''' )
__lowerCamelCase : List[str] =xmod_layer.self_attn.out_proj.weight
__lowerCamelCase : Optional[int] =xmod_layer.self_attn.out_proj.bias
__lowerCamelCase : int =xmod_layer.self_attn_layer_norm.weight
__lowerCamelCase : Union[str, Any] =xmod_layer.self_attn_layer_norm.bias
# intermediate
__lowerCamelCase : List[Any] =layer.intermediate
if intermediate.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError('''Dimensions of intermediate weights do not match.''' )
__lowerCamelCase : List[str] =xmod_layer.fca.weight
__lowerCamelCase : str =xmod_layer.fca.bias
# output
__lowerCamelCase : Optional[int] =layer.output
if bert_output.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError('''Dimensions of feed-forward weights do not match.''' )
__lowerCamelCase : Dict =xmod_layer.fca.weight
__lowerCamelCase : List[str] =xmod_layer.fca.bias
__lowerCamelCase : List[str] =xmod_layer.final_layer_norm.weight
__lowerCamelCase : Dict =xmod_layer.final_layer_norm.bias
if bert_output.adapter_layer_norm is not None:
__lowerCamelCase : List[str] =xmod_layer.adapter_layer_norm.weight
__lowerCamelCase : List[str] =xmod_layer.adapter_layer_norm.bias
if sorted(bert_output.adapter_modules.keys() ) != sorted(xmod_layer.adapter_modules.keys() ):
raise AssertionError('''Lists of language adapters do not match.''' )
for lang_code, adapter in xmod_layer.adapter_modules.items():
__lowerCamelCase : List[str] =bert_output.adapter_modules[lang_code]
__lowerCamelCase : Tuple =xmod_layer.adapter_modules[lang_code]
__lowerCamelCase : Tuple =from_adapter.fca.weight
__lowerCamelCase : Dict =from_adapter.fca.bias
__lowerCamelCase : str =from_adapter.fca.weight
__lowerCamelCase : int =from_adapter.fca.bias
# end of layer
if xmod_sent_encoder.layer_norm is not None:
__lowerCamelCase : Optional[int] =xmod_sent_encoder.layer_norm.weight
__lowerCamelCase : Optional[Any] =xmod_sent_encoder.layer_norm.bias
if classification_head:
__lowerCamelCase : Tuple =xmod.model.classification_heads['''mnli'''].dense.weight
__lowerCamelCase : List[Any] =xmod.model.classification_heads['''mnli'''].dense.bias
__lowerCamelCase : Any =xmod.model.classification_heads['''mnli'''].out_proj.weight
__lowerCamelCase : List[str] =xmod.model.classification_heads['''mnli'''].out_proj.bias
else:
# LM Head
__lowerCamelCase : Optional[int] =xmod.model.encoder.lm_head.dense.weight
__lowerCamelCase : Tuple =xmod.model.encoder.lm_head.dense.bias
__lowerCamelCase : List[str] =xmod.model.encoder.lm_head.layer_norm.weight
__lowerCamelCase : int =xmod.model.encoder.lm_head.layer_norm.bias
__lowerCamelCase : str =xmod.model.encoder.lm_head.weight
__lowerCamelCase : int =xmod.model.encoder.lm_head.bias
# Let's check that we get the same results.
__lowerCamelCase : Tuple =xmod.encode(SCREAMING_SNAKE_CASE ).unsqueeze(0 ) # batch of size 1
model.roberta.set_default_language(SCREAMING_SNAKE_CASE )
__lowerCamelCase : int =model(SCREAMING_SNAKE_CASE )[0]
if classification_head:
__lowerCamelCase : Tuple =xmod.model.classification_heads['''mnli'''](xmod.extract_features(SCREAMING_SNAKE_CASE ) )
else:
__lowerCamelCase : str =xmod.model(SCREAMING_SNAKE_CASE , lang_id=[SAMPLE_LANGUAGE] )[0]
print(our_output.shape , their_output.shape )
__lowerCamelCase : Union[str, Any] =torch.max(torch.abs(our_output - their_output ) ).item()
print(F'max_absolute_diff = {max_absolute_diff}' ) # ~ 1e-7
__lowerCamelCase : Optional[Any] =torch.allclose(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , atol=1E-3 )
print('''Do both models output the same tensors?''' , '''🔥''' if success else '''💩''' )
if not success:
raise Exception('''Something went wRoNg''' )
Path(SCREAMING_SNAKE_CASE ).mkdir(parents=SCREAMING_SNAKE_CASE , exist_ok=SCREAMING_SNAKE_CASE )
print(F'Saving model to {pytorch_dump_folder_path}' )
model.save_pretrained(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
_UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--xmod_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--classification_head', action='store_true', help='Whether to convert a final classification head.'
)
_UpperCamelCase = parser.parse_args()
convert_xmod_checkpoint_to_pytorch(
args.xmod_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 715 |
"""simple docstring"""
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
_UpperCamelCase = '\\n@misc{wu2016googles,\n title={Google\'s Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n'
_UpperCamelCase = '\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe \'GLEU score\'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore\'s range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n'
_UpperCamelCase = '\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n \'google_bleu\': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results["google_bleu"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results["google_bleu"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results["google_bleu"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results["google_bleu"], 2))\n 0.4\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE_ ( datasets.Metric ):
"""simple docstring"""
def __lowercase ( self :Optional[Any] ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''string''' , id='''token''' ) , id='''sequence''' ),
'''references''': datasets.Sequence(
datasets.Sequence(datasets.Value('''string''' , id='''token''' ) , id='''sequence''' ) , id='''references''' ),
} ) , )
def __lowercase ( self :Optional[Any] , __lowercase :List[List[List[str]]] , __lowercase :List[List[str]] , __lowercase :int = 1 , __lowercase :int = 4 , ):
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=__lowercase , hypotheses=__lowercase , min_len=__lowercase , max_len=__lowercase )
}
| 363 | 0 |
import itertools
import random
import unittest
import numpy as np
from transformers import ASTFeatureExtractor
from transformers.testing_utils import require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
lowercase : int = random.Random()
if is_torch_available():
import torch
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : Any , _lowerCamelCase : Tuple=1.0 , _lowerCamelCase : int=None , _lowerCamelCase : Optional[Any]=None) -> Optional[Any]:
'''simple docstring'''
if rng is None:
__UpperCamelCase : Any = global_rng
__UpperCamelCase : int = []
for batch_idx in range(shape[0]):
values.append([])
for _ in range(shape[1]):
values[-1].append(rng.random() * scale)
return values
class lowerCamelCase__ ( unittest.TestCase):
'''simple docstring'''
def __init__( self :Union[str, Any] , a :Optional[int] , a :List[str]=7 , a :Union[str, Any]=4_0_0 , a :List[Any]=2_0_0_0 , a :Any=1 , a :int=0.0 , a :Union[str, Any]=1_6_0_0_0 , a :Optional[int]=True , a :Union[str, Any]=True , ) -> int:
__UpperCamelCase : Optional[int] = parent
__UpperCamelCase : Any = batch_size
__UpperCamelCase : int = min_seq_length
__UpperCamelCase : Optional[Any] = max_seq_length
__UpperCamelCase : str = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
__UpperCamelCase : str = feature_size
__UpperCamelCase : Any = padding_value
__UpperCamelCase : Tuple = sampling_rate
__UpperCamelCase : List[str] = return_attention_mask
__UpperCamelCase : Tuple = do_normalize
def _lowerCamelCase ( self :Optional[Any] ) -> Any:
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def _lowerCamelCase ( self :List[Any] , a :Any=False , a :List[str]=False ) -> List[Any]:
def _flatten(a :List[str] ):
return list(itertools.chain(*a ) )
if equal_length:
__UpperCamelCase : Tuple = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
__UpperCamelCase : Tuple = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
__UpperCamelCase : Dict = [np.asarray(a ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class lowerCamelCase__ ( __lowercase , unittest.TestCase):
'''simple docstring'''
_A = ASTFeatureExtractor
def _lowerCamelCase ( self :Dict ) -> Any:
__UpperCamelCase : List[str] = ASTFeatureExtractionTester(self )
def _lowerCamelCase ( self :Union[str, Any] ) -> Union[str, Any]:
# Tests that all call wrap to encode_plus and batch_encode_plus
__UpperCamelCase : Optional[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
__UpperCamelCase : Optional[int] = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
__UpperCamelCase : Optional[Any] = [np.asarray(a ) for speech_input in speech_inputs]
# Test not batched input
__UpperCamelCase : Union[str, Any] = feat_extract(speech_inputs[0] , return_tensors="np" ).input_values
__UpperCamelCase : Dict = feat_extract(np_speech_inputs[0] , return_tensors="np" ).input_values
self.assertTrue(np.allclose(a , a , atol=1E-3 ) )
# Test batched
__UpperCamelCase : List[str] = feat_extract(a , padding=a , return_tensors="np" ).input_values
__UpperCamelCase : str = feat_extract(a , padding=a , return_tensors="np" ).input_values
for enc_seq_a, enc_seq_a in zip(a , a ):
self.assertTrue(np.allclose(a , a , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
__UpperCamelCase : Optional[int] = [floats_list((1, x) )[0] for x in (8_0_0, 8_0_0, 8_0_0)]
__UpperCamelCase : Optional[Any] = np.asarray(a )
__UpperCamelCase : Tuple = feat_extract(a , return_tensors="np" ).input_values
__UpperCamelCase : List[str] = feat_extract(a , return_tensors="np" ).input_values
for enc_seq_a, enc_seq_a in zip(a , a ):
self.assertTrue(np.allclose(a , a , atol=1E-3 ) )
@require_torch
def _lowerCamelCase ( self :Dict ) -> Union[str, Any]:
import torch
__UpperCamelCase : List[str] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__UpperCamelCase : Optional[Any] = np.random.rand(1_0_0 ).astype(np.floataa )
__UpperCamelCase : str = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
__UpperCamelCase : Optional[Any] = feature_extractor.pad([{"input_values": inputs}] , return_tensors="np" )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
__UpperCamelCase : Dict = feature_extractor.pad([{"input_values": inputs}] , return_tensors="pt" )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def _lowerCamelCase ( self :Optional[Any] , a :Optional[int] ) -> Dict:
from datasets import load_dataset
__UpperCamelCase : Any = load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" )
# automatic decoding with librispeech
__UpperCamelCase : Tuple = ds.sort("id" ).select(range(a ) )[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
@require_torch
def _lowerCamelCase ( self :Union[str, Any] ) -> Tuple:
# fmt: off
__UpperCamelCase : Optional[int] = torch.tensor(
[-0.9894, -1.2776, -0.9066, -1.2776, -0.9349, -1.2609, -1.0386, -1.2776,
-1.1561, -1.2776, -1.2052, -1.2723, -1.2190, -1.2132, -1.2776, -1.1133,
-1.1953, -1.1343, -1.1584, -1.2203, -1.1770, -1.2474, -1.2381, -1.1936,
-0.9270, -0.8317, -0.8049, -0.7706, -0.7565, -0.7869] )
# fmt: on
__UpperCamelCase : List[Any] = self._load_datasamples(1 )
__UpperCamelCase : Any = ASTFeatureExtractor()
__UpperCamelCase : Any = feature_extractor(a , return_tensors="pt" ).input_values
self.assertEquals(input_values.shape , (1, 1_0_2_4, 1_2_8) )
self.assertTrue(torch.allclose(input_values[0, 0, :3_0] , a , atol=1E-4 ) ) | 557 |
from __future__ import annotations
lowercase : Dict = tuple[int, int, int]
lowercase : List[str] = tuple[str, str, str]
# used alphabet --------------------------
# from string.ascii_uppercase
lowercase : int = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
# -------------------------- default selection --------------------------
# rotors --------------------------
lowercase : Optional[int] = 'EGZWVONAHDCLFQMSIPJBYUKXTR'
lowercase : Optional[Any] = 'FOBHMDKEXQNRAULPGSJVTYICZW'
lowercase : str = 'ZJXESIUQLHAVRMDOYGTNFWPBKC'
# reflector --------------------------
lowercase : Tuple = {
'A': 'N',
'N': 'A',
'B': 'O',
'O': 'B',
'C': 'P',
'P': 'C',
'D': 'Q',
'Q': 'D',
'E': 'R',
'R': 'E',
'F': 'S',
'S': 'F',
'G': 'T',
'T': 'G',
'H': 'U',
'U': 'H',
'I': 'V',
'V': 'I',
'J': 'W',
'W': 'J',
'K': 'X',
'X': 'K',
'L': 'Y',
'Y': 'L',
'M': 'Z',
'Z': 'M',
}
# -------------------------- extra rotors --------------------------
lowercase : Optional[int] = 'RMDJXFUWGISLHVTCQNKYPBEZOA'
lowercase : List[Any] = 'SGLCPQWZHKXAREONTFBVIYJUDM'
lowercase : Dict = 'HVSICLTYKQUBXDWAJZOMFGPREN'
lowercase : List[Any] = 'RZWQHFMVDBKICJLNTUXAGYPSOE'
lowercase : Optional[Any] = 'LFKIJODBEGAMQPXVUHYSTCZRWN'
lowercase : Tuple = 'KOAEGVDHXPQZMLFTYWJNBRCIUS'
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : RotorPositionT , _lowerCamelCase : RotorSelectionT , _lowerCamelCase : str) -> tuple[RotorPositionT, RotorSelectionT, dict[str, str]]:
'''simple docstring'''
if (unique_rotsel := len(set(_lowerCamelCase))) < 3:
__UpperCamelCase : Tuple = F'Please use 3 unique rotors (not {unique_rotsel})'
raise Exception(_lowerCamelCase)
# Checks if rotor positions are valid
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase : str = rotpos
if not 0 < rotorposa <= len(_lowerCamelCase):
__UpperCamelCase : Union[str, Any] = F'First rotor position is not within range of 1..26 ({rotorposa}'
raise ValueError(_lowerCamelCase)
if not 0 < rotorposa <= len(_lowerCamelCase):
__UpperCamelCase : int = F'Second rotor position is not within range of 1..26 ({rotorposa})'
raise ValueError(_lowerCamelCase)
if not 0 < rotorposa <= len(_lowerCamelCase):
__UpperCamelCase : List[Any] = F'Third rotor position is not within range of 1..26 ({rotorposa})'
raise ValueError(_lowerCamelCase)
# Validates string and returns dict
__UpperCamelCase : str = _plugboard(_lowerCamelCase)
return rotpos, rotsel, pbdict
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : str) -> dict[str, str]:
'''simple docstring'''
if not isinstance(_lowerCamelCase , _lowerCamelCase):
__UpperCamelCase : Union[str, Any] = F'Plugboard setting isn\'t type string ({type(_lowerCamelCase)})'
raise TypeError(_lowerCamelCase)
elif len(_lowerCamelCase) % 2 != 0:
__UpperCamelCase : int = F'Odd number of symbols ({len(_lowerCamelCase)})'
raise Exception(_lowerCamelCase)
elif pbstring == "":
return {}
pbstring.replace(" " , "")
# Checks if all characters are unique
__UpperCamelCase : Optional[int] = set()
for i in pbstring:
if i not in abc:
__UpperCamelCase : Tuple = F'\'{i}\' not in list of symbols'
raise Exception(_lowerCamelCase)
elif i in tmppbl:
__UpperCamelCase : Tuple = F'Duplicate symbol ({i})'
raise Exception(_lowerCamelCase)
else:
tmppbl.add(_lowerCamelCase)
del tmppbl
# Created the dictionary
__UpperCamelCase : Union[str, Any] = {}
for j in range(0 , len(_lowerCamelCase) - 1 , 2):
__UpperCamelCase : Union[str, Any] = pbstring[j + 1]
__UpperCamelCase : str = pbstring[j]
return pb
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : str , _lowerCamelCase : RotorPositionT , _lowerCamelCase : RotorSelectionT = (rotora, rotora, rotora) , _lowerCamelCase : str = "" , ) -> str:
'''simple docstring'''
__UpperCamelCase : List[Any] = text.upper()
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase : Optional[int] = _validator(
_lowerCamelCase , _lowerCamelCase , plugb.upper())
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase : Tuple = rotor_position
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase : List[Any] = rotor_selection
rotorposa -= 1
rotorposa -= 1
rotorposa -= 1
__UpperCamelCase : List[str] = []
# encryption/decryption process --------------------------
for symbol in text:
if symbol in abc:
# 1st plugboard --------------------------
if symbol in plugboard:
__UpperCamelCase : Dict = plugboard[symbol]
# rotor ra --------------------------
__UpperCamelCase : Optional[int] = abc.index(_lowerCamelCase) + rotorposa
__UpperCamelCase : List[Any] = rotora[index % len(_lowerCamelCase)]
# rotor rb --------------------------
__UpperCamelCase : Dict = abc.index(_lowerCamelCase) + rotorposa
__UpperCamelCase : Any = rotora[index % len(_lowerCamelCase)]
# rotor rc --------------------------
__UpperCamelCase : str = abc.index(_lowerCamelCase) + rotorposa
__UpperCamelCase : Union[str, Any] = rotora[index % len(_lowerCamelCase)]
# reflector --------------------------
# this is the reason you don't need another machine to decipher
__UpperCamelCase : Union[str, Any] = reflector[symbol]
# 2nd rotors
__UpperCamelCase : Optional[int] = abc[rotora.index(_lowerCamelCase) - rotorposa]
__UpperCamelCase : Optional[Any] = abc[rotora.index(_lowerCamelCase) - rotorposa]
__UpperCamelCase : str = abc[rotora.index(_lowerCamelCase) - rotorposa]
# 2nd plugboard
if symbol in plugboard:
__UpperCamelCase : Optional[Any] = plugboard[symbol]
# moves/resets rotor positions
rotorposa += 1
if rotorposa >= len(_lowerCamelCase):
__UpperCamelCase : Any = 0
rotorposa += 1
if rotorposa >= len(_lowerCamelCase):
__UpperCamelCase : Tuple = 0
rotorposa += 1
if rotorposa >= len(_lowerCamelCase):
__UpperCamelCase : List[str] = 0
# else:
# pass
# Error could be also raised
# raise ValueError(
# 'Invalid symbol('+repr(symbol)+')')
result.append(_lowerCamelCase)
return "".join(_lowerCamelCase)
if __name__ == "__main__":
lowercase : Optional[Any] = 'This is my Python script that emulates the Enigma machine from WWII.'
lowercase : int = (1, 1, 1)
lowercase : Optional[Any] = 'pictures'
lowercase : Optional[Any] = (rotora, rotora, rotora)
lowercase : Optional[Any] = enigma(message, rotor_pos, rotor_sel, pb)
print('Encrypted message:', en)
print('Decrypted message:', enigma(en, rotor_pos, rotor_sel, pb)) | 557 | 1 |
"""simple docstring"""
import importlib.util
import json
import os
import warnings
from dataclasses import dataclass, field
import torch
from ..training_args import TrainingArguments
from ..utils import cached_property, is_sagemaker_dp_enabled, logging
_UpperCamelCase : str = logging.get_logger(__name__)
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowercase = os.getenv('SM_HP_MP_PARAMETERS' , '{}' )
try:
# Parse it and check the field "partitions" is included, it is required for model parallel.
lowercase = json.loads(__snake_case )
if "partitions" not in smp_options:
return False
except json.JSONDecodeError:
return False
# Get the sagemaker specific framework parameters from mpi_options variable.
lowercase = os.getenv('SM_FRAMEWORK_PARAMS' , '{}' )
try:
# Parse it and check the field "sagemaker_distributed_dataparallel_enabled".
lowercase = json.loads(__snake_case )
if not mpi_options.get('sagemaker_mpi_enabled' , __snake_case ):
return False
except json.JSONDecodeError:
return False
# Lastly, check if the `smdistributed` module is present.
return importlib.util.find_spec('smdistributed' ) is not None
if is_sagemaker_model_parallel_available():
import smdistributed.modelparallel.torch as smp
smp.init()
@dataclass
class a ( a_ ):
UpperCAmelCase_ : str =field(
default="", metadata={"help": "Used by the SageMaker launcher to send mp-specific args. Ignored in SageMakerTrainer"}, )
def UpperCamelCase_ ( self ):
super().__post_init__()
warnings.warn(
'`SageMakerTrainingArguments` is deprecated and will be removed in v5 of Transformers. You can use '
'`TrainingArguments` instead.' , _lowerCamelCase , )
@cached_property
def UpperCamelCase_ ( self ):
logger.info('PyTorch: setting up devices' )
if torch.distributed.is_available() and torch.distributed.is_initialized() and self.local_rank == -1:
logger.warning(
'torch.distributed process group is initialized, but local_rank == -1. '
'In order to use Torch DDP, launch your script with `python -m torch.distributed.launch' )
if self.no_cuda:
lowercase = torch.device('cpu' )
lowercase = 0
elif is_sagemaker_model_parallel_available():
lowercase = smp.local_rank()
lowercase = torch.device('cuda' , _lowerCamelCase )
lowercase = 1
elif is_sagemaker_dp_enabled():
import smdistributed.dataparallel.torch.torch_smddp # noqa: F401
torch.distributed.init_process_group(backend='smddp' , timeout=self.ddp_timeout_delta )
lowercase = int(os.getenv('SMDATAPARALLEL_LOCAL_RANK' ) )
lowercase = torch.device('cuda' , self.local_rank )
lowercase = 1
elif self.local_rank == -1:
# if n_gpu is > 1 we'll use nn.DataParallel.
# If you only want to use a specific subset of GPUs use `CUDA_VISIBLE_DEVICES=0`
# Explicitly set CUDA to the first (index 0) CUDA device, otherwise `set_device` will
# trigger an error that a device index is missing. Index 0 takes into account the
# GPUs available in the environment, so `CUDA_VISIBLE_DEVICES=1,2` with `cuda:0`
# will use the first GPU in that env, i.e. GPU#1
lowercase = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu' )
# Sometimes the line in the postinit has not been run before we end up here, so just checking we're not at
# the default value.
lowercase = torch.cuda.device_count()
else:
# Here, we'll use torch.distributed.
# Initializes the distributed backend which will take care of synchronizing nodes/GPUs
if not torch.distributed.is_initialized():
torch.distributed.init_process_group(backend='nccl' , timeout=self.ddp_timeout_delta )
lowercase = torch.device('cuda' , self.local_rank )
lowercase = 1
if device.type == "cuda":
torch.cuda.set_device(_lowerCamelCase )
return device
@property
def UpperCamelCase_ ( self ):
if is_sagemaker_model_parallel_available():
return smp.dp_size()
return super().world_size
@property
def UpperCamelCase_ ( self ):
return not is_sagemaker_model_parallel_available()
@property
def UpperCamelCase_ ( self ):
return False
| 134 |
"""simple docstring"""
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert import BertTokenizer
_UpperCamelCase : int = logging.get_logger(__name__)
_UpperCamelCase : Optional[int] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
_UpperCamelCase : Any = {
'vocab_file': {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json'
),
},
}
_UpperCamelCase : int = {
'vocab_file': {
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json'
),
},
}
_UpperCamelCase : Union[str, Any] = {
'vocab_file': {
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json'
),
},
}
_UpperCamelCase : Any = {
'facebook/dpr-ctx_encoder-single-nq-base': 5_1_2,
'facebook/dpr-ctx_encoder-multiset-base': 5_1_2,
}
_UpperCamelCase : Any = {
'facebook/dpr-question_encoder-single-nq-base': 5_1_2,
'facebook/dpr-question_encoder-multiset-base': 5_1_2,
}
_UpperCamelCase : Union[str, Any] = {
'facebook/dpr-reader-single-nq-base': 5_1_2,
'facebook/dpr-reader-multiset-base': 5_1_2,
}
_UpperCamelCase : Tuple = {
'facebook/dpr-ctx_encoder-single-nq-base': {'do_lower_case': True},
'facebook/dpr-ctx_encoder-multiset-base': {'do_lower_case': True},
}
_UpperCamelCase : int = {
'facebook/dpr-question_encoder-single-nq-base': {'do_lower_case': True},
'facebook/dpr-question_encoder-multiset-base': {'do_lower_case': True},
}
_UpperCamelCase : Tuple = {
'facebook/dpr-reader-single-nq-base': {'do_lower_case': True},
'facebook/dpr-reader-multiset-base': {'do_lower_case': True},
}
class a ( a_ ):
UpperCAmelCase_ : str =VOCAB_FILES_NAMES
UpperCAmelCase_ : Optional[int] =CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase_ : Union[str, Any] =CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase_ : int =CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
class a ( a_ ):
UpperCAmelCase_ : Union[str, Any] =VOCAB_FILES_NAMES
UpperCAmelCase_ : List[Any] =QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase_ : Dict =QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase_ : Dict =QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
_UpperCamelCase : Optional[int] = collections.namedtuple(
'DPRSpanPrediction', ['span_score', 'relevance_score', 'doc_id', 'start_index', 'end_index', 'text']
)
_UpperCamelCase : Tuple = collections.namedtuple('DPRReaderOutput', ['start_logits', 'end_logits', 'relevance_logits'])
_UpperCamelCase : int = R'\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n ```\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n ```\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `\'tf\'`: Return TensorFlow `tf.constant` objects.\n - `\'pt\'`: Return PyTorch `torch.Tensor` objects.\n - `\'np\'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer\'s default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Returns:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n '
@add_start_docstrings(a_ )
class a :
def __call__( self , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = False , _lowerCamelCase = False , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , **_lowerCamelCase , ):
if titles is None and texts is None:
return super().__call__(
_lowerCamelCase , padding=_lowerCamelCase , truncation=_lowerCamelCase , max_length=_lowerCamelCase , return_tensors=_lowerCamelCase , return_attention_mask=_lowerCamelCase , **_lowerCamelCase , )
elif titles is None or texts is None:
lowercase = titles if texts is None else texts
return super().__call__(
_lowerCamelCase , _lowerCamelCase , padding=_lowerCamelCase , truncation=_lowerCamelCase , max_length=_lowerCamelCase , return_tensors=_lowerCamelCase , return_attention_mask=_lowerCamelCase , **_lowerCamelCase , )
lowercase = titles if not isinstance(_lowerCamelCase , _lowerCamelCase ) else [titles]
lowercase = texts if not isinstance(_lowerCamelCase , _lowerCamelCase ) else [texts]
lowercase = len(_lowerCamelCase )
lowercase = questions if not isinstance(_lowerCamelCase , _lowerCamelCase ) else [questions] * n_passages
if len(_lowerCamelCase ) != len(_lowerCamelCase ):
raise ValueError(
F'There should be as many titles than texts but got {len(_lowerCamelCase )} titles and {len(_lowerCamelCase )} texts.' )
lowercase = super().__call__(_lowerCamelCase , _lowerCamelCase , padding=_lowerCamelCase , truncation=_lowerCamelCase )['input_ids']
lowercase = super().__call__(_lowerCamelCase , add_special_tokens=_lowerCamelCase , padding=_lowerCamelCase , truncation=_lowerCamelCase )['input_ids']
lowercase = {
'input_ids': [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(_lowerCamelCase , _lowerCamelCase )
]
}
if return_attention_mask is not False:
lowercase = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
lowercase = attention_mask
return self.pad(_lowerCamelCase , padding=_lowerCamelCase , max_length=_lowerCamelCase , return_tensors=_lowerCamelCase )
def UpperCamelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = 1_6 , _lowerCamelCase = 6_4 , _lowerCamelCase = 4 , ):
lowercase = reader_input['input_ids']
lowercase , lowercase , lowercase = reader_output[:3]
lowercase = len(_lowerCamelCase )
lowercase = sorted(range(_lowerCamelCase ) , reverse=_lowerCamelCase , key=relevance_logits.__getitem__ )
lowercase = []
for doc_id in sorted_docs:
lowercase = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
lowercase = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
lowercase = sequence_ids.index(self.pad_token_id )
else:
lowercase = len(_lowerCamelCase )
lowercase = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=_lowerCamelCase , top_spans=_lowerCamelCase , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=_lowerCamelCase , start_index=_lowerCamelCase , end_index=_lowerCamelCase , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(_lowerCamelCase ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def UpperCamelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ):
lowercase = []
for start_index, start_score in enumerate(_lowerCamelCase ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
lowercase = sorted(_lowerCamelCase , key=lambda _lowerCamelCase : x[1] , reverse=_lowerCamelCase )
lowercase = []
for (start_index, end_index), score in scores:
if start_index > end_index:
raise ValueError(F'Wrong span indices: [{start_index}:{end_index}]' )
lowercase = end_index - start_index + 1
if length > max_answer_length:
raise ValueError(F'Span is too long: {length} > {max_answer_length}' )
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(_lowerCamelCase ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(a_ )
class a ( a_, a_ ):
UpperCAmelCase_ : Union[str, Any] =VOCAB_FILES_NAMES
UpperCAmelCase_ : Union[str, Any] =READER_PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase_ : Optional[int] =READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase_ : List[Any] =READER_PRETRAINED_INIT_CONFIGURATION
UpperCAmelCase_ : Optional[int] =["input_ids", "attention_mask"]
| 134 | 1 |
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class a ( _SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE__ : Any = ['''image_processor''', '''tokenizer''']
SCREAMING_SNAKE_CASE__ : List[str] = '''BlipImageProcessor'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = ('''BertTokenizer''', '''BertTokenizerFast''')
def __init__( self , _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: List[Any] = False
super().__init__(_lowerCAmelCase , _lowerCAmelCase )
__SCREAMING_SNAKE_CASE: Union[str, Any] = self.image_processor
def __call__( self , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = True , _lowerCAmelCase = False , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = 0 , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = False , _lowerCAmelCase = False , _lowerCAmelCase = False , _lowerCAmelCase = False , _lowerCAmelCase = False , _lowerCAmelCase = True , _lowerCAmelCase = None , **_lowerCAmelCase , ):
"""simple docstring"""
if images is None and text is None:
raise ValueError('''You have to specify either images or text.''' )
# Get only text
if images is None:
__SCREAMING_SNAKE_CASE: Union[str, Any] = self.tokenizer
__SCREAMING_SNAKE_CASE: Any = self.tokenizer(
text=_lowerCAmelCase , add_special_tokens=_lowerCAmelCase , padding=_lowerCAmelCase , truncation=_lowerCAmelCase , max_length=_lowerCAmelCase , stride=_lowerCAmelCase , pad_to_multiple_of=_lowerCAmelCase , return_attention_mask=_lowerCAmelCase , return_overflowing_tokens=_lowerCAmelCase , return_special_tokens_mask=_lowerCAmelCase , return_offsets_mapping=_lowerCAmelCase , return_token_type_ids=_lowerCAmelCase , return_length=_lowerCAmelCase , verbose=_lowerCAmelCase , return_tensors=_lowerCAmelCase , **_lowerCAmelCase , )
return text_encoding
# add pixel_values
__SCREAMING_SNAKE_CASE: Tuple = self.image_processor(_lowerCAmelCase , return_tensors=_lowerCAmelCase )
if text is not None:
__SCREAMING_SNAKE_CASE: Dict = self.tokenizer(
text=_lowerCAmelCase , add_special_tokens=_lowerCAmelCase , padding=_lowerCAmelCase , truncation=_lowerCAmelCase , max_length=_lowerCAmelCase , stride=_lowerCAmelCase , pad_to_multiple_of=_lowerCAmelCase , return_attention_mask=_lowerCAmelCase , return_overflowing_tokens=_lowerCAmelCase , return_special_tokens_mask=_lowerCAmelCase , return_offsets_mapping=_lowerCAmelCase , return_token_type_ids=_lowerCAmelCase , return_length=_lowerCAmelCase , verbose=_lowerCAmelCase , return_tensors=_lowerCAmelCase , **_lowerCAmelCase , )
else:
__SCREAMING_SNAKE_CASE: Optional[Any] = None
if text_encoding is not None:
encoding_image_processor.update(_lowerCAmelCase )
return encoding_image_processor
def snake_case_ ( self , *_lowerCAmelCase , **_lowerCAmelCase ):
"""simple docstring"""
return self.tokenizer.batch_decode(*_lowerCAmelCase , **_lowerCAmelCase )
def snake_case_ ( self , *_lowerCAmelCase , **_lowerCAmelCase ):
"""simple docstring"""
return self.tokenizer.decode(*_lowerCAmelCase , **_lowerCAmelCase )
@property
def snake_case_ ( self ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Dict = self.tokenizer.model_input_names
__SCREAMING_SNAKE_CASE: Optional[Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 202 |
import torch
from transformers import PreTrainedModel, XLMRobertaConfig, XLMRobertaModel
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = "M-CLIP"
def __init__( self : Tuple , _lowerCAmelCase : List[str]=1_024 , _lowerCAmelCase : str=768 , **_lowerCAmelCase : Union[str, Any] ):
SCREAMING_SNAKE_CASE_ = transformerDimSize
SCREAMING_SNAKE_CASE_ = imageDimSize
super().__init__(**_lowerCAmelCase )
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = MCLIPConfig
def __init__( self : Dict , _lowerCAmelCase : Union[str, Any] , *_lowerCAmelCase : str , **_lowerCAmelCase : str ):
super().__init__(_lowerCAmelCase , *_lowerCAmelCase , **_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = XLMRobertaModel(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = torch.nn.Linear(
in_features=config.transformerDimensions , out_features=config.numDims )
def lowerCAmelCase_ ( self : List[Any] , _lowerCAmelCase : Any , _lowerCAmelCase : Union[str, Any] ):
SCREAMING_SNAKE_CASE_ = self.transformer(input_ids=_lowerCAmelCase , attention_mask=_lowerCAmelCase )[0]
SCREAMING_SNAKE_CASE_ = (embs * attention_mask.unsqueeze(2 )).sum(dim=1 ) / attention_mask.sum(dim=1 )[:, None]
return self.LinearTransformation(_lowerCAmelCase ), embs | 31 | 0 |
"""simple docstring"""
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class a__ ( UpperCamelCase_ ):
snake_case__ = ['''image_processor''', '''tokenizer''']
snake_case__ = '''CLIPImageProcessor'''
snake_case__ = ('''XLMRobertaTokenizer''', '''XLMRobertaTokenizerFast''')
def __init__( self : Optional[int] ,a__ : List[str]=None ,a__ : List[str]=None ,**a__ : str) -> Tuple:
"""simple docstring"""
_lowerCAmelCase:str = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' ,a__ ,)
_lowerCAmelCase:Optional[int] = kwargs.pop('''feature_extractor''')
_lowerCAmelCase:int = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''')
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''')
super().__init__(a__ ,a__)
def __call__( self : Optional[Any] ,a__ : List[Any]=None ,a__ : Any=None ,a__ : Any=None ,**a__ : List[Any]) -> Any:
"""simple docstring"""
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''')
if text is not None:
_lowerCAmelCase:List[Any] = self.tokenizer(a__ ,return_tensors=a__ ,**a__)
if images is not None:
_lowerCAmelCase:int = self.image_processor(a__ ,return_tensors=a__ ,**a__)
if text is not None and images is not None:
_lowerCAmelCase:List[Any] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**a__) ,tensor_type=a__)
def __UpperCamelCase ( self : Optional[int] ,*a__ : int ,**a__ : str) -> List[str]:
"""simple docstring"""
return self.tokenizer.batch_decode(*a__ ,**a__)
def __UpperCamelCase ( self : Dict ,*a__ : Union[str, Any] ,**a__ : List[Any]) -> Optional[Any]:
"""simple docstring"""
return self.tokenizer.decode(*a__ ,**a__)
@property
def __UpperCamelCase ( self : Optional[Any]) -> Dict:
"""simple docstring"""
_lowerCAmelCase:Optional[Any] = self.tokenizer.model_input_names
_lowerCAmelCase:Optional[Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
| 439 |
"""simple docstring"""
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline # noqa: F401
deprecate(
'''stable diffusion controlnet''',
'''0.22.0''',
'''Importing `FlaxStableDiffusionControlNetPipeline` from diffusers.pipelines.stable_diffusion.flax_pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import FlaxStableDiffusionControlNetPipeline` instead.''',
standard_warn=False,
stacklevel=3,
)
| 439 | 1 |
'''simple docstring'''
from unittest.mock import Mock, patch
from file_transfer.send_file import send_file
@patch('socket.socket' )
@patch('builtins.open' )
def UpperCAmelCase__ ( UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : int ) -> List[Any]:
# ===== initialization =====
__lowerCamelCase : Dict = Mock()
__lowerCamelCase : Union[str, Any] = conn, Mock()
__lowerCamelCase : Dict = iter([1, None] )
__lowerCamelCase : Tuple = lambda UpperCAmelCase_ : next(UpperCAmelCase_ )
# ===== invoke =====
send_file(filename='mytext.txt' , testing=UpperCAmelCase_ )
# ===== ensurance =====
sock.assert_called_once()
sock.return_value.bind.assert_called_once()
sock.return_value.listen.assert_called_once()
sock.return_value.accept.assert_called_once()
conn.recv.assert_called_once()
file.return_value.__enter__.assert_called_once()
file.return_value.__enter__.return_value.read.assert_called()
conn.send.assert_called_once()
conn.close.assert_called_once()
sock.return_value.shutdown.assert_called_once()
sock.return_value.close.assert_called_once()
| 13 |
import argparse
import os
import torch
from transformers import (
XLNetConfig,
XLNetForQuestionAnswering,
XLNetForSequenceClassification,
XLNetLMHeadModel,
load_tf_weights_in_xlnet,
)
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
UpperCamelCase__ ={
'cola': 2,
'mnli': 3,
'mrpc': 2,
'sst-2': 2,
'sts-b': 1,
'qqp': 2,
'qnli': 2,
'rte': 2,
'wnli': 2,
}
logging.set_verbosity_info()
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase=None ):
# Initialise PyTorch model
_SCREAMING_SNAKE_CASE : Dict = XLNetConfig.from_json_file(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[Any] = finetuning_task.lower() if finetuning_task is not None else ""
if finetuning_task in GLUE_TASKS_NUM_LABELS:
print(f"""Building PyTorch XLNetForSequenceClassification model from configuration: {config}""" )
_SCREAMING_SNAKE_CASE : Tuple = finetuning_task
_SCREAMING_SNAKE_CASE : List[Any] = GLUE_TASKS_NUM_LABELS[finetuning_task]
_SCREAMING_SNAKE_CASE : Optional[int] = XLNetForSequenceClassification(__lowerCamelCase )
elif "squad" in finetuning_task:
_SCREAMING_SNAKE_CASE : Union[str, Any] = finetuning_task
_SCREAMING_SNAKE_CASE : Tuple = XLNetForQuestionAnswering(__lowerCamelCase )
else:
_SCREAMING_SNAKE_CASE : Optional[Any] = XLNetLMHeadModel(__lowerCamelCase )
# Load weights from tf checkpoint
load_tf_weights_in_xlnet(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
# Save pytorch-model
_SCREAMING_SNAKE_CASE : int = os.path.join(__lowerCamelCase, __lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[str] = os.path.join(__lowerCamelCase, __lowerCamelCase )
print(f"""Save PyTorch model to {os.path.abspath(__lowerCamelCase )}""" )
torch.save(model.state_dict(), __lowerCamelCase )
print(f"""Save configuration file to {os.path.abspath(__lowerCamelCase )}""" )
with open(__lowerCamelCase, "w", encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
UpperCamelCase__ =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--xlnet_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained XLNet model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=True,
help='Path to the folder to store the PyTorch model or dataset/vocab.',
)
parser.add_argument(
'--finetuning_task',
default=None,
type=str,
help='Name of a task on which the XLNet TensorFlow model was fine-tuned',
)
UpperCamelCase__ =parser.parse_args()
print(args)
convert_xlnet_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task
) | 249 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ....utils import _LazyModule
_lowerCAmelCase : Any = {'tokenization_tapex': ['TapexTokenizer']}
if TYPE_CHECKING:
from .tokenization_tapex import TapexTokenizer
else:
import sys
_lowerCAmelCase : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 646 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase : str = logging.get_logger(__name__)
_lowerCAmelCase : Tuple = {
's-JoL/Open-Llama-V1': 'https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json',
}
class lowerCAmelCase ( a ):
_lowerCamelCase : Union[str, Any] = """open-llama"""
def __init__( self , snake_case__=10_0000 , snake_case__=4096 , snake_case__=1_1008 , snake_case__=32 , snake_case__=32 , snake_case__="silu" , snake_case__=2048 , snake_case__=0.0_2 , snake_case__=1e-6 , snake_case__=True , snake_case__=0 , snake_case__=1 , snake_case__=2 , snake_case__=False , snake_case__=True , snake_case__=0.1 , snake_case__=0.1 , snake_case__=True , snake_case__=True , snake_case__=None , **snake_case__ , ):
lowerCAmelCase : Tuple = vocab_size
lowerCAmelCase : Optional[Any] = max_position_embeddings
lowerCAmelCase : List[Any] = hidden_size
lowerCAmelCase : List[Any] = intermediate_size
lowerCAmelCase : Tuple = num_hidden_layers
lowerCAmelCase : List[Any] = num_attention_heads
lowerCAmelCase : List[Any] = hidden_act
lowerCAmelCase : Union[str, Any] = initializer_range
lowerCAmelCase : str = rms_norm_eps
lowerCAmelCase : Optional[int] = use_cache
lowerCAmelCase : Dict = kwargs.pop(
'use_memorry_efficient_attention' , snake_case__ )
lowerCAmelCase : Optional[int] = hidden_dropout_prob
lowerCAmelCase : Optional[Any] = attention_dropout_prob
lowerCAmelCase : Union[str, Any] = use_stable_embedding
lowerCAmelCase : Tuple = shared_input_output_embedding
lowerCAmelCase : Tuple = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , tie_word_embeddings=snake_case__ , **snake_case__ , )
def lowercase ( self ):
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , snake_case__ ) or len(self.rope_scaling ) != 2:
raise ValueError(
'`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '
f"got {self.rope_scaling}" )
lowerCAmelCase : List[Any] = self.rope_scaling.get('type' , snake_case__ )
lowerCAmelCase : List[str] = self.rope_scaling.get('factor' , snake_case__ )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f"`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}" )
if rope_scaling_factor is None or not isinstance(snake_case__ , snake_case__ ) or rope_scaling_factor <= 1.0:
raise ValueError(f"`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}" )
| 646 | 1 |
"""simple docstring"""
def _A ( _a : int = 1_0_0_0_0_0_0 ):
"""simple docstring"""
A = 1
A = 1
A = {1: 1}
for inputa in range(2 , _UpperCAmelCase ):
A = 0
A = inputa
while True:
if number in counters:
counter += counters[number]
break
if number % 2 == 0:
number //= 2
counter += 1
else:
A = (3 * number) + 1
counter += 1
if inputa not in counters:
A = counter
if counter > pre_counter:
A = inputa
A = counter
return largest_number
if __name__ == "__main__":
print(solution(int(input().strip())))
| 617 |
'''simple docstring'''
import gc
import unittest
from parameterized import parameterized
from diffusers import FlaxUNetaDConditionModel
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
@slow
@require_flax
class snake_case__ ( unittest.TestCase ):
def A_ ( self : int , __a : Optional[int] , __a : Union[str, Any] ) -> str:
'''simple docstring'''
return f'''gaussian_noise_s={seed}_shape={"_".join([str(__a ) for s in shape] )}.npy'''
def A_ ( self : Any ) -> List[str]:
'''simple docstring'''
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def A_ ( self : List[Any] , __a : Union[str, Any]=0 , __a : List[str]=(4, 4, 64, 64) , __a : Optional[Any]=False ) -> List[str]:
'''simple docstring'''
__snake_case : Optional[Any] = jnp.bfloataa if fpaa else jnp.floataa
__snake_case : List[Any] = jnp.array(load_hf_numpy(self.get_file_format(__a , __a ) ) , dtype=__a )
return image
def A_ ( self : Any , __a : Any=False , __a : Dict="CompVis/stable-diffusion-v1-4" ) -> Optional[int]:
'''simple docstring'''
__snake_case : Tuple = jnp.bfloataa if fpaa else jnp.floataa
__snake_case : int = 'bf16' if fpaa else None
__snake_case , __snake_case : Union[str, Any] = FlaxUNetaDConditionModel.from_pretrained(
__a , subfolder='unet' , dtype=__a , revision=__a )
return model, params
def A_ ( self : Any , __a : Dict=0 , __a : Dict=(4, 77, 768) , __a : List[str]=False ) -> List[Any]:
'''simple docstring'''
__snake_case : List[Any] = jnp.bfloataa if fpaa else jnp.floataa
__snake_case : Any = jnp.array(load_hf_numpy(self.get_file_format(__a , __a ) ) , dtype=__a )
return hidden_states
@parameterized.expand(
[
# fmt: off
[83, 4, [-0.2_3_2_3, -0.1_3_0_4, 0.0_8_1_3, -0.3_0_9_3, -0.0_9_1_9, -0.1_5_7_1, -0.1_1_2_5, -0.5_8_0_6]],
[17, 0.5_5, [-0.0_8_3_1, -0.2_4_4_3, 0.0_9_0_1, -0.0_9_1_9, 0.3_3_9_6, 0.0_1_0_3, -0.3_7_4_3, 0.0_7_0_1]],
[8, 0.8_9, [-0.4_8_6_3, 0.0_8_5_9, 0.0_8_7_5, -0.1_6_5_8, 0.9_1_9_9, -0.0_1_1_4, 0.4_8_3_9, 0.4_6_3_9]],
[3, 1000, [-0.5_6_4_9, 0.2_4_0_2, -0.5_5_1_8, 0.1_2_4_8, 1.1_3_2_8, -0.2_4_4_3, -0.0_3_2_5, -1.0_0_7_8]],
# fmt: on
] )
def A_ ( self : List[Any] , __a : List[str] , __a : Union[str, Any] , __a : Any ) -> Dict:
'''simple docstring'''
__snake_case , __snake_case : Tuple = self.get_unet_model(model_id='CompVis/stable-diffusion-v1-4' , fpaa=__a )
__snake_case : Tuple = self.get_latents(__a , fpaa=__a )
__snake_case : int = self.get_encoder_hidden_states(__a , fpaa=__a )
__snake_case : List[str] = model.apply(
{'params': params} , __a , jnp.array(__a , dtype=jnp.intaa ) , encoder_hidden_states=__a , ).sample
assert sample.shape == latents.shape
__snake_case : List[Any] = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
__snake_case : str = jnp.array(__a , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, in the same hardware
assert jnp.allclose(__a , __a , atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[83, 4, [0.1_5_1_4, 0.0_8_0_7, 0.1_6_2_4, 0.1_0_1_6, -0.1_8_9_6, 0.0_2_6_3, 0.0_6_7_7, 0.2_3_1_0]],
[17, 0.5_5, [0.1_1_6_4, -0.0_2_1_6, 0.0_1_7_0, 0.1_5_8_9, -0.3_1_2_0, 0.1_0_0_5, -0.0_5_8_1, -0.1_4_5_8]],
[8, 0.8_9, [-0.1_7_5_8, -0.0_1_6_9, 0.1_0_0_4, -0.1_4_1_1, 0.1_3_1_2, 0.1_1_0_3, -0.1_9_9_6, 0.2_1_3_9]],
[3, 1000, [0.1_2_1_4, 0.0_3_5_2, -0.0_7_3_1, -0.1_5_6_2, -0.0_9_9_4, -0.0_9_0_6, -0.2_3_4_0, -0.0_5_3_9]],
# fmt: on
] )
def A_ ( self : str , __a : Optional[int] , __a : Union[str, Any] , __a : Tuple ) -> Optional[Any]:
'''simple docstring'''
__snake_case , __snake_case : int = self.get_unet_model(model_id='stabilityai/stable-diffusion-2' , fpaa=__a )
__snake_case : int = self.get_latents(__a , shape=(4, 4, 96, 96) , fpaa=__a )
__snake_case : Optional[Any] = self.get_encoder_hidden_states(__a , shape=(4, 77, 1024) , fpaa=__a )
__snake_case : List[str] = model.apply(
{'params': params} , __a , jnp.array(__a , dtype=jnp.intaa ) , encoder_hidden_states=__a , ).sample
assert sample.shape == latents.shape
__snake_case : List[Any] = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
__snake_case : int = jnp.array(__a , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, on the same hardware
assert jnp.allclose(__a , __a , atol=1e-2 )
| 286 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase__ = {
'''configuration_instructblip''': [
'''INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''InstructBlipConfig''',
'''InstructBlipQFormerConfig''',
'''InstructBlipVisionConfig''',
],
'''processing_instructblip''': ['''InstructBlipProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
'''INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''InstructBlipQFormerModel''',
'''InstructBlipPreTrainedModel''',
'''InstructBlipForConditionalGeneration''',
'''InstructBlipVisionModel''',
]
if TYPE_CHECKING:
from .configuration_instructblip import (
INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
InstructBlipConfig,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
)
from .processing_instructblip import InstructBlipProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_instructblip import (
INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
InstructBlipForConditionalGeneration,
InstructBlipPreTrainedModel,
InstructBlipQFormerModel,
InstructBlipVisionModel,
)
else:
import sys
UpperCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 143 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase__ = {
'''configuration_time_series_transformer''': [
'''TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''TimeSeriesTransformerConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
'''TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TimeSeriesTransformerForPrediction''',
'''TimeSeriesTransformerModel''',
'''TimeSeriesTransformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TimeSeriesTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimeSeriesTransformerForPrediction,
TimeSeriesTransformerModel,
TimeSeriesTransformerPreTrainedModel,
)
else:
import sys
UpperCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 143 | 1 |
'''simple docstring'''
import os
import textwrap
import pyarrow as pa
import pytest
from datasets import ClassLabel, Features, Image
from datasets.packaged_modules.csv.csv import Csv
from ..utils import require_pil
@pytest.fixture
def __a ( _UpperCamelCase: Any ) -> List[str]:
"""simple docstring"""
_snake_case = tmp_path / "file.csv"
_snake_case = textwrap.dedent(
"\\n header1,header2\n 1,2\n 10,20\n " )
with open(_UpperCamelCase , "w" ) as f:
f.write(_UpperCamelCase )
return str(_UpperCamelCase )
@pytest.fixture
def __a ( _UpperCamelCase: Dict ) -> Dict:
"""simple docstring"""
_snake_case = tmp_path / "malformed_file.csv"
_snake_case = textwrap.dedent(
"\\n header1,header2\n 1,2\n 10,20,\n " )
with open(_UpperCamelCase , "w" ) as f:
f.write(_UpperCamelCase )
return str(_UpperCamelCase )
@pytest.fixture
def __a ( _UpperCamelCase: Any , _UpperCamelCase: Tuple ) -> Tuple:
"""simple docstring"""
_snake_case = tmp_path / "csv_with_image.csv"
_snake_case = textwrap.dedent(
F"""\
image
{image_file}
""" )
with open(_UpperCamelCase , "w" ) as f:
f.write(_UpperCamelCase )
return str(_UpperCamelCase )
@pytest.fixture
def __a ( _UpperCamelCase: Tuple ) -> Dict:
"""simple docstring"""
_snake_case = tmp_path / "csv_with_label.csv"
_snake_case = textwrap.dedent(
"\\n label\n good\n bad\n good\n " )
with open(_UpperCamelCase , "w" ) as f:
f.write(_UpperCamelCase )
return str(_UpperCamelCase )
@pytest.fixture
def __a ( _UpperCamelCase: str ) -> Tuple:
"""simple docstring"""
_snake_case = tmp_path / "csv_with_int_list.csv"
_snake_case = textwrap.dedent(
"\\n int_list\n 1 2 3\n 4 5 6\n 7 8 9\n " )
with open(_UpperCamelCase , "w" ) as f:
f.write(_UpperCamelCase )
return str(_UpperCamelCase )
def __a ( _UpperCamelCase: List[Any] , _UpperCamelCase: str , _UpperCamelCase: Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
_snake_case = Csv()
_snake_case = csv._generate_tables([[csv_file, malformed_csv_file]] )
with pytest.raises(_UpperCamelCase , match="Error tokenizing data" ):
for _ in generator:
pass
assert any(
record.levelname == "ERROR"
and "Failed to read file" in record.message
and os.path.basename(_UpperCamelCase ) in record.message
for record in caplog.records )
@require_pil
def __a ( _UpperCamelCase: Optional[Any] ) -> Any:
"""simple docstring"""
with open(_UpperCamelCase , encoding="utf-8" ) as f:
_snake_case = f.read().splitlines()[1]
_snake_case = Csv(encoding="utf-8" , features=Features({"image": Image()} ) )
_snake_case = csv._generate_tables([[csv_file_with_image]] )
_snake_case = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field("image" ).type == Image()()
_snake_case = pa_table.to_pydict()["image"]
assert generated_content == [{"path": image_file, "bytes": None}]
def __a ( _UpperCamelCase: Dict ) -> Optional[Any]:
"""simple docstring"""
with open(_UpperCamelCase , encoding="utf-8" ) as f:
_snake_case = f.read().splitlines()[1:]
_snake_case = Csv(encoding="utf-8" , features=Features({"label": ClassLabel(names=["good", "bad"] )} ) )
_snake_case = csv._generate_tables([[csv_file_with_label]] )
_snake_case = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field("label" ).type == ClassLabel(names=["good", "bad"] )()
_snake_case = pa_table.to_pydict()["label"]
assert generated_content == [ClassLabel(names=["good", "bad"] ).straint(_UpperCamelCase ) for label in labels]
def __a ( _UpperCamelCase: str ) -> Tuple:
"""simple docstring"""
_snake_case = Csv(encoding="utf-8" , sep="," , converters={"int_list": lambda _UpperCamelCase : [int(_UpperCamelCase ) for i in x.split()]} )
_snake_case = csv._generate_tables([[csv_file_with_int_list]] )
_snake_case = pa.concat_tables([table for _, table in generator] )
assert pa.types.is_list(pa_table.schema.field("int_list" ).type )
_snake_case = pa_table.to_pydict()["int_list"]
assert generated_content == [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
| 185 |
'''simple docstring'''
import argparse
import torch
from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase_ : int = logging.get_logger(__name__)
UpperCamelCase_ : Tuple = [
['''attention''', '''attn'''],
['''encoder_attention''', '''encoder_attn'''],
['''q_lin''', '''q_proj'''],
['''k_lin''', '''k_proj'''],
['''v_lin''', '''v_proj'''],
['''out_lin''', '''out_proj'''],
['''norm_embeddings''', '''layernorm_embedding'''],
['''position_embeddings''', '''embed_positions'''],
['''embeddings''', '''embed_tokens'''],
['''ffn.lin''', '''fc'''],
]
def __a ( _UpperCamelCase: List[Any] ) -> Union[str, Any]:
"""simple docstring"""
if k == "embeddings.weight":
return "shared.weight"
for parlai_name, hf_name in PATTERNS:
_snake_case = k.replace(_UpperCamelCase , _UpperCamelCase )
if k.startswith("encoder" ):
_snake_case = k.replace(".attn" , ".self_attn" )
_snake_case = k.replace("norm1" , "self_attn_layer_norm" )
_snake_case = k.replace("norm2" , "final_layer_norm" )
elif k.startswith("decoder" ):
_snake_case = k.replace("norm1" , "self_attn_layer_norm" )
_snake_case = k.replace("norm2" , "encoder_attn_layer_norm" )
_snake_case = k.replace("norm3" , "final_layer_norm" )
return k
def __a ( _UpperCamelCase: Dict ) -> Optional[int]:
"""simple docstring"""
_snake_case = [
"model.encoder.layernorm_embedding.weight",
"model.encoder.layernorm_embedding.bias",
"model.decoder.layernorm_embedding.weight",
"model.decoder.layernorm_embedding.bias",
]
for k in keys:
_snake_case = sd.pop(_UpperCamelCase )
_snake_case = k.replace("layernorm_embedding" , "layer_norm" )
assert new_k not in sd
_snake_case = v
UpperCamelCase_ : Union[str, Any] = ['''START''']
@torch.no_grad()
def __a ( _UpperCamelCase: Any , _UpperCamelCase: Union[str, Any] , _UpperCamelCase: Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
_snake_case = torch.load(_UpperCamelCase , map_location="cpu" )
_snake_case = model["model"]
_snake_case = BlenderbotConfig.from_json_file(_UpperCamelCase )
_snake_case = BlenderbotForConditionalGeneration(_UpperCamelCase )
_snake_case = m.model.state_dict().keys()
_snake_case = []
_snake_case = {}
for k, v in sd.items():
if k in IGNORE_KEYS:
continue
_snake_case = rename_state_dict_key(_UpperCamelCase )
if new_k not in valid_keys:
failures.append([k, new_k] )
else:
_snake_case = v
if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm
rename_layernorm_keys(_UpperCamelCase )
m.model.load_state_dict(_UpperCamelCase , strict=_UpperCamelCase )
m.half()
m.save_pretrained(_UpperCamelCase )
if __name__ == "__main__":
UpperCamelCase_ : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--src_path''', type=str, help='''like blenderbot-model.bin''')
parser.add_argument('''--save_dir''', default='''hf_blenderbot''', type=str, help='''Where to save converted model.''')
parser.add_argument(
'''--hf_config_json''', default='''blenderbot-3b-config.json''', type=str, help='''Path to config to use'''
)
UpperCamelCase_ : Optional[int] = parser.parse_args()
convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
| 185 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
A_ = {
"configuration_albert": ["ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "AlbertConfig", "AlbertOnnxConfig"],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = ["AlbertTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = ["AlbertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
"ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"AlbertForMaskedLM",
"AlbertForMultipleChoice",
"AlbertForPreTraining",
"AlbertForQuestionAnswering",
"AlbertForSequenceClassification",
"AlbertForTokenClassification",
"AlbertModel",
"AlbertPreTrainedModel",
"load_tf_weights_in_albert",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
"TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFAlbertForMaskedLM",
"TFAlbertForMultipleChoice",
"TFAlbertForPreTraining",
"TFAlbertForQuestionAnswering",
"TFAlbertForSequenceClassification",
"TFAlbertForTokenClassification",
"TFAlbertMainLayer",
"TFAlbertModel",
"TFAlbertPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
"FlaxAlbertForMaskedLM",
"FlaxAlbertForMultipleChoice",
"FlaxAlbertForPreTraining",
"FlaxAlbertForQuestionAnswering",
"FlaxAlbertForSequenceClassification",
"FlaxAlbertForTokenClassification",
"FlaxAlbertModel",
"FlaxAlbertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_albert import ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, AlbertConfig, AlbertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_albert import AlbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_albert_fast import AlbertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_albert import (
ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
AlbertPreTrainedModel,
load_tf_weights_in_albert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_albert import (
TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFAlbertForMaskedLM,
TFAlbertForMultipleChoice,
TFAlbertForPreTraining,
TFAlbertForQuestionAnswering,
TFAlbertForSequenceClassification,
TFAlbertForTokenClassification,
TFAlbertMainLayer,
TFAlbertModel,
TFAlbertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
FlaxAlbertPreTrainedModel,
)
else:
import sys
A_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 703 |
'''simple docstring'''
def A_ ( snake_case , snake_case ):
SCREAMING_SNAKE_CASE:Optional[int] = word.split()
def justify(snake_case , snake_case , snake_case ) -> str:
SCREAMING_SNAKE_CASE:str = max_width - width
SCREAMING_SNAKE_CASE:Dict = len(snake_case )
if len(snake_case ) == 1:
# if there is only word in line
# just insert overall_spaces_count for the remainder of line
return line[0] + " " * overall_spaces_count
else:
SCREAMING_SNAKE_CASE:Optional[int] = words_count - 1
# num_spaces_between_words_list[i] : tells you to insert
# num_spaces_between_words_list[i] spaces
# after word on line[i]
SCREAMING_SNAKE_CASE:List[Any] = spaces_to_insert_between_words * [
overall_spaces_count // spaces_to_insert_between_words
]
SCREAMING_SNAKE_CASE:List[Any] = (
overall_spaces_count % spaces_to_insert_between_words
)
# distribute spaces via round robin to the left words
for i in range(snake_case ):
num_spaces_between_words_list[i] += 1
SCREAMING_SNAKE_CASE:str = []
for i in range(snake_case ):
# add the word
aligned_words_list.append(line[i] )
# add the spaces to insert
aligned_words_list.append(num_spaces_between_words_list[i] * " " )
# just add the last word to the sentence
aligned_words_list.append(line[-1] )
# join the aligned words list to form a justified line
return "".join(snake_case )
SCREAMING_SNAKE_CASE:Any = []
SCREAMING_SNAKE_CASE:list[str] = []
SCREAMING_SNAKE_CASE:List[str] = 0
for word in words:
if width + len(snake_case ) + len(snake_case ) <= max_width:
# keep adding words until we can fill out max_width
# width = sum of length of all words (without overall_spaces_count)
# len(word) = length of current word
# len(line) = number of overall_spaces_count to insert between words
line.append(snake_case )
width += len(snake_case )
else:
# justify the line and add it to result
answer.append(justify(snake_case , snake_case , snake_case ) )
# reset new line and new width
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE:Tuple = [word], len(snake_case )
SCREAMING_SNAKE_CASE:Optional[int] = max_width - width - len(snake_case )
answer.append(" ".join(snake_case ) + (remaining_spaces + 1) * " " )
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 465 | 0 |
"""simple docstring"""
def __lowercase ( snake_case_ : float ,snake_case_ : float ) ->float:
'''simple docstring'''
if mass < 0:
raise ValueError('''The mass of a body cannot be negative''' )
return 0.5 * mass * abs(snake_case_ ) * abs(snake_case_ )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 177 |
"""simple docstring"""
import absl # noqa: F401 # Here to have a nice missing dependency error message early on
import nltk # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import six # noqa: F401 # Here to have a nice missing dependency error message early on
from rouge_score import rouge_scorer, scoring
import datasets
a_ = """\
@inproceedings{lin-2004-rouge,
title = \"{ROUGE}: A Package for Automatic Evaluation of Summaries\",
author = \"Lin, Chin-Yew\",
booktitle = \"Text Summarization Branches Out\",
month = jul,
year = \"2004\",
address = \"Barcelona, Spain\",
publisher = \"Association for Computational Linguistics\",
url = \"https://www.aclweb.org/anthology/W04-1013\",
pages = \"74--81\",
}
"""
a_ = """\
ROUGE, or Recall-Oriented Understudy for Gisting Evaluation, is a set of metrics and a software package used for
evaluating automatic summarization and machine translation software in natural language processing.
The metrics compare an automatically produced summary or translation against a reference or a set of references (human-produced) summary or translation.
Note that ROUGE is case insensitive, meaning that upper case letters are treated the same way as lower case letters.
This metrics is a wrapper around Google Research reimplementation of ROUGE:
https://github.com/google-research/google-research/tree/master/rouge
"""
a_ = """
Calculates average rouge scores for a list of hypotheses and references
Args:
predictions: list of predictions to score. Each prediction
should be a string with tokens separated by spaces.
references: list of reference for each prediction. Each
reference should be a string with tokens separated by spaces.
rouge_types: A list of rouge types to calculate.
Valid names:
`\"rouge{n}\"` (e.g. `\"rouge1\"`, `\"rouge2\"`) where: {n} is the n-gram based scoring,
`\"rougeL\"`: Longest common subsequence based scoring.
`\"rougeLSum\"`: rougeLsum splits text using `\"\n\"`.
See details in https://github.com/huggingface/datasets/issues/617
use_stemmer: Bool indicating whether Porter stemmer should be used to strip word suffixes.
use_aggregator: Return aggregates if this is set to True
Returns:
rouge1: rouge_1 (precision, recall, f1),
rouge2: rouge_2 (precision, recall, f1),
rougeL: rouge_l (precision, recall, f1),
rougeLsum: rouge_lsum (precision, recall, f1)
Examples:
>>> rouge = datasets.load_metric('rouge')
>>> predictions = [\"hello there\", \"general kenobi\"]
>>> references = [\"hello there\", \"general kenobi\"]
>>> results = rouge.compute(predictions=predictions, references=references)
>>> print(list(results.keys()))
['rouge1', 'rouge2', 'rougeL', 'rougeLsum']
>>> print(results[\"rouge1\"])
AggregateScore(low=Score(precision=1.0, recall=1.0, fmeasure=1.0), mid=Score(precision=1.0, recall=1.0, fmeasure=1.0), high=Score(precision=1.0, recall=1.0, fmeasure=1.0))
>>> print(results[\"rouge1\"].mid.fmeasure)
1.0
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __snake_case ( datasets.Metric ):
"""simple docstring"""
def UpperCamelCase__( self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/google-research/google-research/tree/master/rouge'''] , reference_urls=[
'''https://en.wikipedia.org/wiki/ROUGE_(metric)''',
'''https://github.com/google-research/google-research/tree/master/rouge''',
] , )
def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=None , __lowerCamelCase=True , __lowerCamelCase=False ):
'''simple docstring'''
if rouge_types is None:
__A : Dict = ['''rouge1''', '''rouge2''', '''rougeL''', '''rougeLsum''']
__A : Dict = rouge_scorer.RougeScorer(rouge_types=__lowerCamelCase , use_stemmer=__lowerCamelCase )
if use_aggregator:
__A : Dict = scoring.BootstrapAggregator()
else:
__A : Optional[Any] = []
for ref, pred in zip(__lowerCamelCase , __lowerCamelCase ):
__A : List[Any] = scorer.score(__lowerCamelCase , __lowerCamelCase )
if use_aggregator:
aggregator.add_scores(__lowerCamelCase )
else:
scores.append(__lowerCamelCase )
if use_aggregator:
__A : Optional[Any] = aggregator.aggregate()
else:
__A : List[Any] = {}
for key in scores[0]:
__A : Tuple = [score[key] for score in scores]
return result
| 177 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ : Optional[Any] = logging.get_logger(__name__)
UpperCAmelCase_ : Optional[int] = {
'RWKV/rwkv-4-169m-pile': 'https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json',
'RWKV/rwkv-4-430m-pile': 'https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json',
'RWKV/rwkv-4-1b5-pile': 'https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json',
'RWKV/rwkv-4-3b-pile': 'https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json',
'RWKV/rwkv-4-7b-pile': 'https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json',
'RWKV/rwkv-4-14b-pile': 'https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json',
'RWKV/rwkv-raven-1b5': 'https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json',
'RWKV/rwkv-raven-3b': 'https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json',
'RWKV/rwkv-raven-7b': 'https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json',
'RWKV/rwkv-raven-14b': 'https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json',
}
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = 'rwkv'
lowerCAmelCase_ = {'max_position_embeddings': 'context_length'}
def __init__( self : Optional[Any],__A : List[Any]=5_0_2_7_7,__A : Any=1_0_2_4,__A : Optional[int]=4_0_9_6,__A : int=3_2,__A : Optional[int]=None,__A : Optional[int]=None,__A : Tuple=1e-5,__A : Union[str, Any]=0,__A : Tuple=0,__A : Union[str, Any]=6,__A : int=False,__A : str=True,**__A : str,):
_lowerCamelCase : str = vocab_size
_lowerCamelCase : List[Any] = context_length
_lowerCamelCase : Dict = hidden_size
_lowerCamelCase : Tuple = num_hidden_layers
_lowerCamelCase : Any = attention_hidden_size if attention_hidden_size is not None else hidden_size
_lowerCamelCase : Optional[int] = intermediate_size if intermediate_size is not None else 4 * hidden_size
_lowerCamelCase : int = layer_norm_epsilon
_lowerCamelCase : int = rescale_every
_lowerCamelCase : str = use_cache
_lowerCamelCase : Dict = bos_token_id
_lowerCamelCase : int = eos_token_id
super().__init__(
tie_word_embeddings=__A,bos_token_id=__A,eos_token_id=__A,**__A ) | 11 |
'''simple docstring'''
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
UpperCAmelCase_ : Optional[Any] = logging.get_logger(__name__)
UpperCAmelCase_ : Any = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
UpperCAmelCase_ : str = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class UpperCAmelCase__ :
lowerCAmelCase_ = field(
default=A , metadata={'help': 'Model type selected in the list: ' + ', '.join(A )} )
lowerCAmelCase_ = field(
default=A , metadata={'help': 'The input data dir. Should contain the .json files for the SQuAD task.'} )
lowerCAmelCase_ = field(
default=128 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
lowerCAmelCase_ = field(
default=128 , metadata={'help': 'When splitting up a long document into chunks, how much stride to take between chunks.'} , )
lowerCAmelCase_ = field(
default=64 , metadata={
'help': (
'The maximum number of tokens for the question. Questions longer than this will '
'be truncated to this length.'
)
} , )
lowerCAmelCase_ = field(
default=30 , metadata={
'help': (
'The maximum length of an answer that can be generated. This is needed because the start '
'and end predictions are not conditioned on one another.'
)
} , )
lowerCAmelCase_ = field(
default=A , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
lowerCAmelCase_ = field(
default=A , metadata={'help': 'If true, the SQuAD examples contain some that do not have an answer.'} )
lowerCAmelCase_ = field(
default=0.0 , metadata={'help': 'If null_score - best_non_null is greater than the threshold predict null.'} )
lowerCAmelCase_ = field(
default=20 , metadata={'help': 'If null_score - best_non_null is greater than the threshold predict null.'} )
lowerCAmelCase_ = field(
default=0 , metadata={
'help': (
'language id of input for language-specific xlm models (see'
' tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)'
)
} , )
lowerCAmelCase_ = field(default=1 , metadata={'help': 'multiple threads for converting example to features'} )
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = 'train'
lowerCAmelCase_ = 'dev'
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
def __init__( self : Optional[int],__A : SquadDataTrainingArguments,__A : PreTrainedTokenizer,__A : Optional[int] = None,__A : Union[str, Split] = Split.train,__A : Optional[bool] = False,__A : Optional[str] = None,__A : Optional[str] = "pt",):
_lowerCamelCase : Tuple = args
_lowerCamelCase : List[str] = is_language_sensitive
_lowerCamelCase : Dict = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(__A,__A ):
try:
_lowerCamelCase : Union[str, Any] = Split[mode]
except KeyError:
raise KeyError("mode is not a valid split name" )
_lowerCamelCase : str = mode
# Load data features from cache or dataset file
_lowerCamelCase : str = "v2" if args.version_2_with_negative else "v1"
_lowerCamelCase : Optional[Any] = os.path.join(
cache_dir if cache_dir is not None else args.data_dir,f'cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}',)
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
_lowerCamelCase : Tuple = cached_features_file + ".lock"
with FileLock(__A ):
if os.path.exists(__A ) and not args.overwrite_cache:
_lowerCamelCase : int = time.time()
_lowerCamelCase : int = torch.load(__A )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
_lowerCamelCase : Union[str, Any] = self.old_features["features"]
_lowerCamelCase : List[Any] = self.old_features.get("dataset",__A )
_lowerCamelCase : List[Any] = self.old_features.get("examples",__A )
logger.info(
f'Loading features from cached file {cached_features_file} [took %.3f s]',time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
f'Deleting cached file {cached_features_file} will allow dataset and examples to be cached in'
" future run" )
else:
if mode == Split.dev:
_lowerCamelCase : Dict = self.processor.get_dev_examples(args.data_dir )
else:
_lowerCamelCase : Dict = self.processor.get_train_examples(args.data_dir )
_lowerCamelCase , _lowerCamelCase : Dict = squad_convert_examples_to_features(
examples=self.examples,tokenizer=__A,max_seq_length=args.max_seq_length,doc_stride=args.doc_stride,max_query_length=args.max_query_length,is_training=mode == Split.train,threads=args.threads,return_dataset=__A,)
_lowerCamelCase : List[Any] = time.time()
torch.save(
{"features": self.features, "dataset": self.dataset, "examples": self.examples},__A,)
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f'Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]' )
def __len__( self : Optional[Any] ):
return len(self.features )
def __getitem__( self : Tuple,__A : str ):
# Convert to Tensors and build dataset
_lowerCamelCase : List[str] = self.features[i]
_lowerCamelCase : List[str] = torch.tensor(feature.input_ids,dtype=torch.long )
_lowerCamelCase : Optional[int] = torch.tensor(feature.attention_mask,dtype=torch.long )
_lowerCamelCase : Union[str, Any] = torch.tensor(feature.token_type_ids,dtype=torch.long )
_lowerCamelCase : str = torch.tensor(feature.cls_index,dtype=torch.long )
_lowerCamelCase : str = torch.tensor(feature.p_mask,dtype=torch.float )
_lowerCamelCase : Tuple = torch.tensor(feature.is_impossible,dtype=torch.float )
_lowerCamelCase : Optional[Any] = {
"input_ids": input_ids,
"attention_mask": attention_mask,
"token_type_ids": token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({"cls_index": cls_index, "p_mask": p_mask} )
if self.args.version_2_with_negative:
inputs.update({"is_impossible": is_impossible} )
if self.is_language_sensitive:
inputs.update({"langs": (torch.ones(input_ids.shape,dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
_lowerCamelCase : List[str] = torch.tensor(feature.start_position,dtype=torch.long )
_lowerCamelCase : Any = torch.tensor(feature.end_position,dtype=torch.long )
inputs.update({"start_positions": start_positions, "end_positions": end_positions} )
return inputs | 11 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowerCAmelCase = {
'''configuration_roberta_prelayernorm''': [
'''ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''RobertaPreLayerNormConfig''',
'''RobertaPreLayerNormOnnxConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = [
'''ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''RobertaPreLayerNormForCausalLM''',
'''RobertaPreLayerNormForMaskedLM''',
'''RobertaPreLayerNormForMultipleChoice''',
'''RobertaPreLayerNormForQuestionAnswering''',
'''RobertaPreLayerNormForSequenceClassification''',
'''RobertaPreLayerNormForTokenClassification''',
'''RobertaPreLayerNormModel''',
'''RobertaPreLayerNormPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = [
'''TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFRobertaPreLayerNormForCausalLM''',
'''TFRobertaPreLayerNormForMaskedLM''',
'''TFRobertaPreLayerNormForMultipleChoice''',
'''TFRobertaPreLayerNormForQuestionAnswering''',
'''TFRobertaPreLayerNormForSequenceClassification''',
'''TFRobertaPreLayerNormForTokenClassification''',
'''TFRobertaPreLayerNormMainLayer''',
'''TFRobertaPreLayerNormModel''',
'''TFRobertaPreLayerNormPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = [
'''FlaxRobertaPreLayerNormForCausalLM''',
'''FlaxRobertaPreLayerNormForMaskedLM''',
'''FlaxRobertaPreLayerNormForMultipleChoice''',
'''FlaxRobertaPreLayerNormForQuestionAnswering''',
'''FlaxRobertaPreLayerNormForSequenceClassification''',
'''FlaxRobertaPreLayerNormForTokenClassification''',
'''FlaxRobertaPreLayerNormModel''',
'''FlaxRobertaPreLayerNormPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP,
RobertaPreLayerNormConfig,
RobertaPreLayerNormOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaPreLayerNormForCausalLM,
RobertaPreLayerNormForMaskedLM,
RobertaPreLayerNormForMultipleChoice,
RobertaPreLayerNormForQuestionAnswering,
RobertaPreLayerNormForSequenceClassification,
RobertaPreLayerNormForTokenClassification,
RobertaPreLayerNormModel,
RobertaPreLayerNormPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta_prelayernorm import (
TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaPreLayerNormForCausalLM,
TFRobertaPreLayerNormForMaskedLM,
TFRobertaPreLayerNormForMultipleChoice,
TFRobertaPreLayerNormForQuestionAnswering,
TFRobertaPreLayerNormForSequenceClassification,
TFRobertaPreLayerNormForTokenClassification,
TFRobertaPreLayerNormMainLayer,
TFRobertaPreLayerNormModel,
TFRobertaPreLayerNormPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormPreTrainedModel,
)
else:
import sys
lowerCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 292 |
from ...utils import is_note_seq_available, is_transformers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .notes_encoder import SpectrogramNotesEncoder
from .continous_encoder import SpectrogramContEncoder
from .pipeline_spectrogram_diffusion import (
SpectrogramContEncoder,
SpectrogramDiffusionPipeline,
TaFilmDecoder,
)
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .midi_utils import MidiProcessor
| 335 | 0 |
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class __A ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : str =torch.nn.Linear(10 , 10 )
__UpperCamelCase : List[str] =torch.optim.SGD(model.parameters() , 0.1 )
__UpperCamelCase : List[Any] =Accelerator()
__UpperCamelCase : Any =accelerator.prepare(lowerCamelCase__ )
try:
pickle.loads(pickle.dumps(lowerCamelCase__ ) )
except Exception as e:
self.fail(f'Accelerated optimizer pickling failed with {e}' )
AcceleratorState._reset_state()
| 154 |
from __future__ import annotations
A_ :Optional[int] = list[tuple[int, int]]
A_ :str = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
A_ :Union[str, Any] = ([-1, 0], [0, -1], [1, 0], [0, 1]) # up, left, down, right
class __A :
"""simple docstring"""
def __init__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , ):
"""simple docstring"""
__UpperCamelCase : List[str] =pos_x
__UpperCamelCase : Dict =pos_y
__UpperCamelCase : Any =(pos_y, pos_x)
__UpperCamelCase : Optional[int] =goal_x
__UpperCamelCase : Optional[int] =goal_y
__UpperCamelCase : Union[str, Any] =g_cost
__UpperCamelCase : Any =parent
__UpperCamelCase : List[Any] =self.calculate_heuristic()
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : Dict =abs(self.pos_x - self.goal_x )
__UpperCamelCase : Optional[Any] =abs(self.pos_y - self.goal_y )
return dx + dy
def __lt__( self , lowerCamelCase__ ):
"""simple docstring"""
return self.f_cost < other.f_cost
class __A :
"""simple docstring"""
def __init__( self , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase : str =Node(start[1] , start[0] , goal[1] , goal[0] , 0 , lowerCamelCase__ )
__UpperCamelCase : Any =Node(goal[1] , goal[0] , goal[1] , goal[0] , 99999 , lowerCamelCase__ )
__UpperCamelCase : str =[self.start]
__UpperCamelCase : list[Node] =[]
__UpperCamelCase : Dict =False
def __lowercase ( self ):
"""simple docstring"""
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
__UpperCamelCase : Dict =self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
__UpperCamelCase : Any =True
return self.retrace_path(lowerCamelCase__ )
self.closed_nodes.append(lowerCamelCase__ )
__UpperCamelCase : Dict =self.get_successors(lowerCamelCase__ )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(lowerCamelCase__ )
else:
# retrieve the best current path
__UpperCamelCase : List[str] =self.open_nodes.pop(self.open_nodes.index(lowerCamelCase__ ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(lowerCamelCase__ )
else:
self.open_nodes.append(lowerCamelCase__ )
if not self.reached:
return [self.start.pos]
return None
def __lowercase ( self , lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase : Any =[]
for action in delta:
__UpperCamelCase : Tuple =parent.pos_x + action[1]
__UpperCamelCase : str =parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(lowerCamelCase__ ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
lowerCamelCase__ , lowerCamelCase__ , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , lowerCamelCase__ , ) )
return successors
def __lowercase ( self , lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase : Any =node
__UpperCamelCase : Any =[]
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
__UpperCamelCase : List[Any] =current_node.parent
path.reverse()
return path
if __name__ == "__main__":
A_ :Optional[Any] = (0, 0)
A_ :Optional[int] = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
print('''------''')
A_ :List[str] = GreedyBestFirst(init, goal)
A_ :int = greedy_bf.search()
if path:
for pos_x, pos_y in path:
A_ :List[Any] = 2
for elem in grid:
print(elem)
| 154 | 1 |
'''simple docstring'''
import numpy as np
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModelWithProjection, PreTrainedModel
from ...utils import logging
a__ : Dict =logging.get_logger(__name__)
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple =CLIPConfig
SCREAMING_SNAKE_CASE_ : str =["CLIPEncoderLayer"]
def __init__( self : Tuple , __A : CLIPConfig ):
super().__init__(__A )
__UpperCamelCase = CLIPVisionModelWithProjection(config.vision_config )
__UpperCamelCase = nn.Linear(config.vision_config.projection_dim , 1 )
__UpperCamelCase = nn.Linear(config.vision_config.projection_dim , 1 )
@torch.no_grad()
def _lowerCamelCase ( self : Optional[int] , __A : Optional[int] , __A : Optional[int] , __A : Any=0.5 , __A : int=0.5 ):
__UpperCamelCase = self.vision_model(__A )[0]
__UpperCamelCase = self.p_head(__A )
__UpperCamelCase = nsfw_detected.flatten()
__UpperCamelCase = nsfw_detected > p_threshold
__UpperCamelCase = nsfw_detected.tolist()
if any(__A ):
logger.warning(
'Potential NSFW content was detected in one or more images. A black image will be returned instead.'
' Try again with a different prompt and/or seed.' )
for idx, nsfw_detected_ in enumerate(__A ):
if nsfw_detected_:
__UpperCamelCase = np.zeros(images[idx].shape )
__UpperCamelCase = self.w_head(__A )
__UpperCamelCase = watermark_detected.flatten()
__UpperCamelCase = watermark_detected > w_threshold
__UpperCamelCase = watermark_detected.tolist()
if any(__A ):
logger.warning(
'Potential watermarked content was detected in one or more images. A black image will be returned instead.'
' Try again with a different prompt and/or seed.' )
for idx, watermark_detected_ in enumerate(__A ):
if watermark_detected_:
__UpperCamelCase = np.zeros(images[idx].shape )
return images, nsfw_detected, watermark_detected
| 399 |
'''simple docstring'''
def lowercase__ ( ) -> List[str]:
"""simple docstring"""
for n in range(1 , 1000000 ):
yield n * (n + 1) // 2
def lowercase__ ( __lowercase : Any ) -> Union[str, Any]:
"""simple docstring"""
__UpperCamelCase = 1
__UpperCamelCase = 2
while i * i <= n:
__UpperCamelCase = 0
while n % i == 0:
n //= i
multiplicity += 1
divisors_count *= multiplicity + 1
i += 1
if n > 1:
divisors_count *= 2
return divisors_count
def lowercase__ ( ) -> List[Any]:
"""simple docstring"""
return next(i for i in triangle_number_generator() if count_divisors(__lowercase ) > 500 )
if __name__ == "__main__":
print(solution())
| 399 | 1 |
"""simple docstring"""
import inspect
import unittest
from transformers import DPTConfig
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel
from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class SCREAMING_SNAKE_CASE__ :
def __init__(self , _lowercase , _lowercase=2 , _lowercase=32 , _lowercase=16 , _lowercase=3 , _lowercase=True , _lowercase=True , _lowercase=32 , _lowercase=4 , _lowercase=[0, 1, 2, 3] , _lowercase=4 , _lowercase=37 , _lowercase="gelu" , _lowercase=0.1 , _lowercase=0.1 , _lowercase=0.02 , _lowercase=3 , _lowercase=[1, 384, 24, 24] , _lowercase=True , _lowercase=None , ):
'''simple docstring'''
__a : Optional[Any] = parent
__a : List[Any] = batch_size
__a : Tuple = image_size
__a : Union[str, Any] = patch_size
__a : Tuple = num_channels
__a : Dict = is_training
__a : str = use_labels
__a : List[Any] = hidden_size
__a : Dict = num_hidden_layers
__a : str = backbone_out_indices
__a : List[str] = num_attention_heads
__a : Dict = intermediate_size
__a : Union[str, Any] = hidden_act
__a : Any = hidden_dropout_prob
__a : Tuple = attention_probs_dropout_prob
__a : Any = initializer_range
__a : Optional[Any] = num_labels
__a : Dict = backbone_featmap_shape
__a : List[Any] = scope
__a : List[str] = is_hybrid
# sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token)
__a : str = (image_size // patch_size) ** 2
__a : List[Any] = num_patches + 1
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__a : Optional[Any] = None
if self.use_labels:
__a : Optional[Any] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__a : Optional[int] = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : int = {
"""global_padding""": """same""",
"""layer_type""": """bottleneck""",
"""depths""": [3, 4, 9],
"""out_features""": ["""stage1""", """stage2""", """stage3"""],
"""embedding_dynamic_padding""": True,
"""hidden_sizes""": [96, 192, 384, 768],
"""num_groups""": 2,
}
return DPTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , backbone_out_indices=self.backbone_out_indices , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_lowercase , initializer_range=self.initializer_range , is_hybrid=self.is_hybrid , backbone_config=_lowercase , backbone_featmap_shape=self.backbone_featmap_shape , )
def lowerCAmelCase__(self , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
__a : Optional[int] = DPTModel(config=_lowercase )
model.to(_lowercase )
model.eval()
__a : Union[str, Any] = model(_lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase__(self , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
__a : str = self.num_labels
__a : Optional[int] = DPTForDepthEstimation(_lowercase )
model.to(_lowercase )
model.eval()
__a : int = model(_lowercase )
self.parent.assertEqual(result.predicted_depth.shape , (self.batch_size, self.image_size, self.image_size) )
def lowerCAmelCase__(self , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
__a : Dict = self.num_labels
__a : List[Any] = DPTForSemanticSegmentation(_lowercase )
model.to(_lowercase )
model.eval()
__a : Union[str, Any] = model(_lowercase , labels=_lowercase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : str = self.prepare_config_and_inputs()
__a , __a , __a : List[str] = config_and_inputs
__a : Union[str, Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( __snake_case , __snake_case , unittest.TestCase ):
_lowerCAmelCase = (DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else ()
_lowerCAmelCase = (
{
"depth-estimation": DPTForDepthEstimation,
"feature-extraction": DPTModel,
"image-segmentation": DPTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Optional[Any] = DPTModelTester(self )
__a : Optional[int] = ConfigTester(self , config_class=_lowercase , has_text_modality=_lowercase , hidden_size=37 )
def lowerCAmelCase__(self ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="""DPT does not use inputs_embeds""" )
def lowerCAmelCase__(self ):
'''simple docstring'''
pass
def lowerCAmelCase__(self ):
'''simple docstring'''
__a , __a : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a : Dict = model_class(_lowercase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__a : Optional[int] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_lowercase , nn.Linear ) )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a , __a : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a : str = model_class(_lowercase )
__a : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__a : Optional[int] = [*signature.parameters.keys()]
__a : List[str] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _lowercase )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowercase )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_depth_estimation(*_lowercase )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*_lowercase )
def lowerCAmelCase__(self ):
'''simple docstring'''
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
__a , __a : Dict = self.model_tester.prepare_config_and_inputs_for_common()
__a : Any = True
if model_class in get_values(_lowercase ):
continue
__a : List[Any] = model_class(_lowercase )
model.to(_lowercase )
model.train()
__a : Optional[Any] = self._prepare_for_class(_lowercase , _lowercase , return_labels=_lowercase )
__a : str = model(**_lowercase ).loss
loss.backward()
def lowerCAmelCase__(self ):
'''simple docstring'''
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
__a , __a : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
__a : List[str] = False
__a : Dict = True
if model_class in get_values(_lowercase ) or not model_class.supports_gradient_checkpointing:
continue
__a : Optional[Any] = model_class(_lowercase )
model.to(_lowercase )
model.gradient_checkpointing_enable()
model.train()
__a : int = self._prepare_for_class(_lowercase , _lowercase , return_labels=_lowercase )
__a : Optional[int] = model(**_lowercase ).loss
loss.backward()
def lowerCAmelCase__(self ):
'''simple docstring'''
__a , __a : Dict = self.model_tester.prepare_config_and_inputs_for_common()
__a : Optional[int] = _config_zero_init(_lowercase )
for model_class in self.all_model_classes:
__a : str = model_class(config=_lowercase )
# Skip the check for the backbone
__a : str = []
for name, module in model.named_modules():
if module.__class__.__name__ == "DPTViTHybridEmbeddings":
__a : List[str] = [F'''{name}.{key}''' for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def lowerCAmelCase__(self ):
'''simple docstring'''
pass
@slow
def lowerCAmelCase__(self ):
'''simple docstring'''
for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]:
__a : List[str] = DPTModel.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a , __a : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
__a : Dict = """add"""
with self.assertRaises(_lowercase ):
__a : Union[str, Any] = DPTForDepthEstimation(_lowercase )
def __magic_name__ ( ):
__a : Optional[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
@slow
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Tuple = DPTImageProcessor.from_pretrained("""Intel/dpt-hybrid-midas""" )
__a : List[str] = DPTForDepthEstimation.from_pretrained("""Intel/dpt-hybrid-midas""" ).to(_lowercase )
__a : Dict = prepare_img()
__a : List[Any] = image_processor(images=_lowercase , return_tensors="""pt""" ).to(_lowercase )
# forward pass
with torch.no_grad():
__a : Dict = model(**_lowercase )
__a : Tuple = outputs.predicted_depth
# verify the predicted depth
__a : Dict = torch.Size((1, 384, 384) )
self.assertEqual(predicted_depth.shape , _lowercase )
__a : List[str] = torch.tensor(
[[[5.6437, 5.6146, 5.6511], [5.4371, 5.5649, 5.5958], [5.5215, 5.5184, 5.5293]]] ).to(_lowercase )
self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 100 , _lowercase , atol=1e-4 ) )
| 63 |
"""simple docstring"""
import torch
from diffusers import DiffusionPipeline
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
def __init__(self , _lowercase , _lowercase ):
'''simple docstring'''
super().__init__()
self.register_modules(unet=_lowercase , scheduler=_lowercase )
def __call__(self ):
'''simple docstring'''
__a : Dict = torch.randn(
(1, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , )
__a : Optional[Any] = 1
__a : List[str] = self.unet(_lowercase , _lowercase ).sample
__a : Union[str, Any] = self.scheduler.step(_lowercase , _lowercase , _lowercase ).prev_sample
__a : Optional[int] = scheduler_output - scheduler_output + torch.ones_like(_lowercase )
return result
| 63 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_UpperCamelCase = logging.get_logger(__name__)
class __UpperCAmelCase (_A , _A ):
'''simple docstring'''
_UpperCamelCase : Union[str, Any] = 'maskformer-swin'
_UpperCamelCase : int = {
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self , snake_case_=224 , snake_case_=4 , snake_case_=3 , snake_case_=96 , snake_case_=[2, 2, 6, 2] , snake_case_=[3, 6, 12, 24] , snake_case_=7 , snake_case_=4.0 , snake_case_=True , snake_case_=0.0 , snake_case_=0.0 , snake_case_=0.1 , snake_case_="gelu" , snake_case_=False , snake_case_=0.02 , snake_case_=1E-5 , snake_case_=None , snake_case_=None , **snake_case_ , ):
'''simple docstring'''
super().__init__(**UpperCamelCase__ )
A__ : List[Any] = image_size
A__ : Optional[Any] = patch_size
A__ : List[str] = num_channels
A__ : Optional[int] = embed_dim
A__ : Any = depths
A__ : Optional[int] = len(UpperCamelCase__ )
A__ : List[Any] = num_heads
A__ : Any = window_size
A__ : Union[str, Any] = mlp_ratio
A__ : Dict = qkv_bias
A__ : List[str] = hidden_dropout_prob
A__ : List[str] = attention_probs_dropout_prob
A__ : Tuple = drop_path_rate
A__ : List[str] = hidden_act
A__ : Optional[Any] = use_absolute_embeddings
A__ : List[Any] = layer_norm_eps
A__ : Union[str, Any] = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
A__ : Optional[int] = int(embed_dim * 2 ** (len(UpperCamelCase__ ) - 1) )
A__ : Union[str, Any] = ["""stem"""] + [F'''stage{idx}''' for idx in range(1 , len(UpperCamelCase__ ) + 1 )]
A__ , A__ : List[Any] = get_aligned_output_features_output_indices(
out_features=UpperCamelCase__ , out_indices=UpperCamelCase__ , stage_names=self.stage_names )
| 363 | '''simple docstring'''
import inspect
import tempfile
import unittest
from huggingface_hub import hf_hub_download
from transformers import is_torch_available
from transformers.testing_utils import is_flaky, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
__snake_case : Optional[Any] = 1e-4
if is_torch_available():
import torch
from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel
from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder
@require_torch
class lowercase_ :
def __init__( self , UpperCamelCase__ , UpperCamelCase__=1_6 , UpperCamelCase__=1_3 , UpperCamelCase__=7 , UpperCamelCase__=1_4 , UpperCamelCase__=1_0 , UpperCamelCase__=1_9 , UpperCamelCase__=5 , UpperCamelCase__=4 , UpperCamelCase__=True , UpperCamelCase__=1_6 , UpperCamelCase__=2 , UpperCamelCase__=4 , UpperCamelCase__=4 , UpperCamelCase__="gelu" , UpperCamelCase__=0.1 , UpperCamelCase__=0.1 , UpperCamelCase__=[1, 2, 3, 4, 5] , UpperCamelCase__=2_5 , UpperCamelCase__=5 , ) -> Tuple:
"""simple docstring"""
UpperCAmelCase_ = d_model
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = prediction_length
UpperCAmelCase_ = context_length
UpperCAmelCase_ = cardinality
UpperCAmelCase_ = num_time_features
UpperCAmelCase_ = lags_sequence
UpperCAmelCase_ = embedding_dimension
UpperCAmelCase_ = is_training
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = context_length
UpperCAmelCase_ = prediction_length + label_length
UpperCAmelCase_ = label_length
UpperCAmelCase_ = moving_average
UpperCAmelCase_ = autocorrelation_factor
def lowerCamelCase_ ( self ) -> List[Any]:
"""simple docstring"""
return AutoformerConfig(
d_model=self.d_model , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , prediction_length=self.prediction_length , context_length=self.context_length , label_length=self.label_length , lags_sequence=self.lags_sequence , num_time_features=self.num_time_features , num_static_categorical_features=1 , cardinality=[self.cardinality] , embedding_dimension=[self.embedding_dimension] , moving_average=self.moving_average , )
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> Any:
"""simple docstring"""
UpperCAmelCase_ = config.context_length + max(config.lags_sequence )
UpperCAmelCase_ = ids_tensor([self.batch_size, 1] , config.cardinality[0] )
UpperCAmelCase_ = floats_tensor([self.batch_size, _past_length, config.num_time_features] )
UpperCAmelCase_ = floats_tensor([self.batch_size, _past_length] )
UpperCAmelCase_ = floats_tensor([self.batch_size, _past_length] ) > 0.5
# decoder inputs
UpperCAmelCase_ = floats_tensor([self.batch_size, config.prediction_length, config.num_time_features] )
UpperCAmelCase_ = floats_tensor([self.batch_size, config.prediction_length] )
UpperCAmelCase_ = {
"past_values": past_values,
"static_categorical_features": static_categorical_features,
"past_time_features": past_time_features,
"past_observed_mask": past_observed_mask,
"future_time_features": future_time_features,
"future_values": future_values,
}
return inputs_dict
def lowerCamelCase_ ( self ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ = self.get_config()
UpperCAmelCase_ = self.prepare_autoformer_inputs_dict(UpperCamelCase__ )
return config, inputs_dict
def lowerCamelCase_ ( self ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ = self.prepare_config_and_inputs()
return config, inputs_dict
def lowerCamelCase_ ( self , UpperCamelCase__ , UpperCamelCase__ ) -> str:
"""simple docstring"""
UpperCAmelCase_ = AutoformerModel(config=UpperCamelCase__ ).to(UpperCamelCase__ ).eval()
UpperCAmelCase_ = model(**UpperCamelCase__ )
UpperCAmelCase_ = outputs.encoder_last_hidden_state
UpperCAmelCase_ = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase_ = model.get_encoder()
encoder.save_pretrained(UpperCamelCase__ )
UpperCAmelCase_ = AutoformerEncoder.from_pretrained(UpperCamelCase__ ).to(UpperCamelCase__ )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = model.create_network_inputs(**UpperCamelCase__ )
UpperCAmelCase_ , UpperCAmelCase_ = model.decomposition_layer(transformer_inputs[:, : config.context_length, ...] )
UpperCAmelCase_ = torch.cat(
(transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]) , dim=-1 , )
UpperCAmelCase_ = encoder(inputs_embeds=UpperCamelCase__ )[0]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1e-3 )
UpperCAmelCase_ = (
torch.mean(transformer_inputs[:, : config.context_length, ...] , dim=1 )
.unsqueeze(1 )
.repeat(1 , config.prediction_length , 1 )
)
UpperCAmelCase_ = torch.zeros(
[transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]] , device=enc_input.device , )
UpperCAmelCase_ = torch.cat(
(
torch.cat((seasonal_input[:, -config.label_length :, ...], zeros) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
UpperCAmelCase_ = torch.cat(
(
torch.cat((trend_input[:, -config.label_length :, ...], mean) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase_ = model.get_decoder()
decoder.save_pretrained(UpperCamelCase__ )
UpperCAmelCase_ = AutoformerDecoder.from_pretrained(UpperCamelCase__ ).to(UpperCamelCase__ )
UpperCAmelCase_ = decoder(
trend=UpperCamelCase__ , inputs_embeds=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1e-3 )
@require_torch
class lowercase_ ( _A , _A , unittest.TestCase ):
a_ = (AutoformerModel, AutoformerForPrediction) if is_torch_available() else ()
a_ = (AutoformerForPrediction,) if is_torch_available() else ()
a_ = {"""feature-extraction""": AutoformerModel} if is_torch_available() else {}
a_ = False
a_ = False
a_ = False
a_ = False
a_ = False
a_ = False
def lowerCamelCase_ ( self ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ = AutoformerModelTester(self )
UpperCAmelCase_ = ConfigTester(self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ )
def lowerCamelCase_ ( self ) -> Any:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCamelCase_ ( self ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(UpperCamelCase__ )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(UpperCamelCase__ )
UpperCAmelCase_ , UpperCAmelCase_ = model_class.from_pretrained(UpperCamelCase__ , output_loading_info=UpperCamelCase__ )
self.assertEqual(info["missing_keys"] , [] )
def lowerCamelCase_ ( self ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*UpperCamelCase__ )
@unittest.skip(reason="Model has no tokens embeddings" )
def lowerCamelCase_ ( self ) -> Any:
"""simple docstring"""
pass
def lowerCamelCase_ ( self ) -> int:
"""simple docstring"""
UpperCAmelCase_ = inspect.signature(getattr(UpperCamelCase__ , "forward" ) )
# The main input is the name of the argument after `self`
UpperCAmelCase_ = list(model_signature.parameters.keys() )[1]
self.assertEqual(AutoformerModel.main_input_name , UpperCamelCase__ )
def lowerCamelCase_ ( self ) -> Any:
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(UpperCamelCase__ )
UpperCAmelCase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ = [*signature.parameters.keys()]
UpperCAmelCase_ = [
"past_values",
"past_time_features",
"past_observed_mask",
"static_categorical_features",
"static_real_features",
"future_values",
"future_time_features",
]
if model.__class__.__name__ in ["AutoformerForPrediction"]:
expected_arg_names.append("future_observed_mask" )
expected_arg_names.extend(
[
"decoder_attention_mask",
"head_mask",
"decoder_head_mask",
"cross_attn_head_mask",
"encoder_outputs",
"past_key_values",
"output_hidden_states",
"output_attentions",
"use_cache",
"return_dict",
] )
self.assertListEqual(arg_names[: len(UpperCamelCase__ )] , UpperCamelCase__ )
def lowerCamelCase_ ( self ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ = True
UpperCAmelCase_ = getattr(self.model_tester , "seq_length" , UpperCamelCase__ )
UpperCAmelCase_ = getattr(self.model_tester , "decoder_seq_length" , UpperCamelCase__ )
UpperCAmelCase_ = getattr(self.model_tester , "encoder_seq_length" , UpperCamelCase__ )
UpperCAmelCase_ = getattr(self.model_tester , "d_model" , UpperCamelCase__ )
UpperCAmelCase_ = getattr(self.model_tester , "num_attention_heads" , UpperCamelCase__ )
UpperCAmelCase_ = d_model // num_attention_heads
for model_class in self.all_model_classes:
UpperCAmelCase_ = True
UpperCAmelCase_ = False
UpperCAmelCase_ = True
UpperCAmelCase_ = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
UpperCAmelCase_ = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
UpperCAmelCase_ = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
UpperCAmelCase_ = True
UpperCAmelCase_ = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
UpperCAmelCase_ = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
UpperCAmelCase_ = outputs.encoder_attentions
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
UpperCAmelCase_ = len(UpperCamelCase__ )
UpperCAmelCase_ = 7
if "last_hidden_state" in outputs:
correct_outlen += 1
if "trend" in outputs:
correct_outlen += 1
if "past_key_values" in outputs:
correct_outlen += 1 # past_key_values have been returned
if "loss" in outputs:
correct_outlen += 1
if "params" in outputs:
correct_outlen += 1
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
# decoder attentions
UpperCAmelCase_ = outputs.decoder_attentions
self.assertIsInstance(UpperCamelCase__ , (list, tuple) )
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# cross attentions
UpperCAmelCase_ = outputs.cross_attentions
self.assertIsInstance(UpperCamelCase__ , (list, tuple) )
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(cross_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# Check attention is always last and order is fine
UpperCAmelCase_ = True
UpperCAmelCase_ = True
UpperCAmelCase_ = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
UpperCAmelCase_ = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
self.assertEqual(out_len + 2 , len(UpperCamelCase__ ) )
UpperCAmelCase_ = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
@is_flaky()
def lowerCamelCase_ ( self ) -> str:
"""simple docstring"""
super().test_retain_grad_hidden_states_attentions()
def lowerCamelCase__ ( A_="train-batch.pt" ):
UpperCAmelCase_ = hf_hub_download(repo_id="hf-internal-testing/tourism-monthly-batch" , filename=A_ , repo_type="dataset" )
UpperCAmelCase_ = torch.load(A_ , map_location=A_ )
return batch
@require_torch
@slow
class lowercase_ ( unittest.TestCase ):
def lowerCamelCase_ ( self ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ = AutoformerModel.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(UpperCamelCase__ )
UpperCAmelCase_ = prepare_batch()
with torch.no_grad():
UpperCAmelCase_ = model(
past_values=batch["past_values"] , past_time_features=batch["past_time_features"] , past_observed_mask=batch["past_observed_mask"] , static_categorical_features=batch["static_categorical_features"] , future_values=batch["future_values"] , future_time_features=batch["future_time_features"] , )[0]
UpperCAmelCase_ = torch.Size(
(6_4, model.config.prediction_length + model.config.label_length, model.config.feature_size) )
self.assertEqual(output.shape , UpperCamelCase__ )
UpperCAmelCase_ = torch.tensor(
[[0.3593, -1.3398, 0.6330], [0.2279, 1.5396, -0.1792], [0.0450, 1.3225, -0.2335]] , device=UpperCamelCase__ )
self.assertTrue(torch.allclose(output[0, :3, :3] , UpperCamelCase__ , atol=UpperCamelCase__ ) )
def lowerCamelCase_ ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ = AutoformerForPrediction.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(UpperCamelCase__ )
UpperCAmelCase_ = prepare_batch("val-batch.pt" )
with torch.no_grad():
UpperCAmelCase_ = model(
past_values=batch["past_values"] , past_time_features=batch["past_time_features"] , past_observed_mask=batch["past_observed_mask"] , static_categorical_features=batch["static_categorical_features"] , ).encoder_last_hidden_state
UpperCAmelCase_ = torch.Size((6_4, model.config.context_length, model.config.d_model) )
self.assertEqual(output.shape , UpperCamelCase__ )
UpperCAmelCase_ = torch.tensor(
[[-0.0734, -0.9036, 0.8358], [4.7186, 2.4113, 1.9581], [1.7953, 2.3558, 1.2970]] , device=UpperCamelCase__ )
self.assertTrue(torch.allclose(output[0, :3, :3] , UpperCamelCase__ , atol=UpperCamelCase__ ) )
def lowerCamelCase_ ( self ) -> Any:
"""simple docstring"""
UpperCAmelCase_ = AutoformerForPrediction.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(UpperCamelCase__ )
UpperCAmelCase_ = prepare_batch("val-batch.pt" )
with torch.no_grad():
UpperCAmelCase_ = model.generate(
static_categorical_features=batch["static_categorical_features"] , past_time_features=batch["past_time_features"] , past_values=batch["past_values"] , future_time_features=batch["future_time_features"] , past_observed_mask=batch["past_observed_mask"] , )
UpperCAmelCase_ = torch.Size((6_4, model.config.num_parallel_samples, model.config.prediction_length) )
self.assertEqual(outputs.sequences.shape , UpperCamelCase__ )
UpperCAmelCase_ = torch.tensor([3130.6763, 4056.5293, 7053.0786] , device=UpperCamelCase__ )
UpperCAmelCase_ = outputs.sequences.mean(dim=1 )
self.assertTrue(torch.allclose(mean_prediction[0, -3:] , UpperCamelCase__ , rtol=1e-1 ) )
| 660 | 0 |
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
class A_ ( unittest.TestCase , __lowercase ):
'''simple docstring'''
def snake_case__ ( self) -> Any:
"""simple docstring"""
_UpperCAmelCase : List[Any] = load_tool('''text-classification''')
self.tool.setup()
_UpperCAmelCase : List[Any] = load_tool('''text-classification''' , remote=lowercase_)
def snake_case__ ( self) -> Any:
"""simple docstring"""
_UpperCAmelCase : List[Any] = self.tool('''That\'s quite cool''' , ['''positive''', '''negative'''])
self.assertEqual(lowercase_ , '''positive''')
def snake_case__ ( self) -> Dict:
"""simple docstring"""
_UpperCAmelCase : List[Any] = self.remote_tool('''That\'s quite cool''' , ['''positive''', '''negative'''])
self.assertEqual(lowercase_ , '''positive''')
def snake_case__ ( self) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase : Any = self.tool(text='''That\'s quite cool''' , labels=['''positive''', '''negative'''])
self.assertEqual(lowercase_ , '''positive''')
def snake_case__ ( self) -> Any:
"""simple docstring"""
_UpperCAmelCase : Dict = self.remote_tool(text='''That\'s quite cool''' , labels=['''positive''', '''negative'''])
self.assertEqual(lowercase_ , '''positive''')
| 716 |
import os
from collections.abc import Iterator
def _lowerCamelCase ( __A : str = "." ) -> Iterator[str]:
for dir_path, dir_names, filenames in os.walk(__A ):
_UpperCAmelCase : List[Any] = [d for d in dir_names if d != '''scripts''' and d[0] not in '''._''']
for filename in filenames:
if filename == "__init__.py":
continue
if os.path.splitext(__A )[1] in (".py", ".ipynb"):
yield os.path.join(__A , __A ).lstrip('''./''' )
def _lowerCamelCase ( __A : Dict ) -> List[Any]:
return f'''{i * ' '}*''' if i else "\n##"
def _lowerCamelCase ( __A : str , __A : str ) -> str:
_UpperCAmelCase : int = old_path.split(os.sep )
for i, new_part in enumerate(new_path.split(os.sep ) ):
if (i + 1 > len(__A ) or old_parts[i] != new_part) and new_part:
print(f'''{md_prefix(__A )} {new_part.replace('_' , ' ' ).title()}''' )
return new_path
def _lowerCamelCase ( __A : str = "." ) -> None:
_UpperCAmelCase : List[str] = ''''''
for filepath in sorted(good_file_paths(__A ) ):
_UpperCAmelCase , _UpperCAmelCase : Optional[Any] = os.path.split(__A )
if filepath != old_path:
_UpperCAmelCase : Optional[int] = print_path(__A , __A )
_UpperCAmelCase : Dict = (filepath.count(os.sep ) + 1) if filepath else 0
_UpperCAmelCase : Union[str, Any] = f'''{filepath}/{filename}'''.replace(''' ''' , '''%20''' )
_UpperCAmelCase : List[Any] = os.path.splitext(filename.replace('''_''' , ''' ''' ).title() )[0]
print(f'''{md_prefix(__A )} [{filename}]({url})''' )
if __name__ == "__main__":
print_directory_md('.')
| 186 | 0 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class lowercase ( metaclass=_UpperCAmelCase ):
_SCREAMING_SNAKE_CASE = ['keras_nlp']
def __init__( self , *lowercase , **lowercase ) -> Dict:
requires_backends(self , ["""keras_nlp"""] )
| 532 |
"""simple docstring"""
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
if a < 0 or b < 0:
raise ValueError("""the value of both inputs must be positive""" )
lowerCAmelCase = str(bin(SCREAMING_SNAKE_CASE ) )[2:] # remove the leading "0b"
lowerCAmelCase = str(bin(SCREAMING_SNAKE_CASE ) )[2:]
lowerCAmelCase = max(len(SCREAMING_SNAKE_CASE ) , len(SCREAMING_SNAKE_CASE ) )
return "0b" + "".join(
str(int("""1""" in (char_a, char_b) ) )
for char_a, char_b in zip(a_binary.zfill(SCREAMING_SNAKE_CASE ) , b_binary.zfill(SCREAMING_SNAKE_CASE ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 532 | 1 |
import os
import sys
import warnings
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen
from ..table import array_cast
from ..utils.file_utils import is_local_path
from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
import PIL.Image
from .features import FeatureType
UpperCamelCase_ = None
UpperCamelCase_ = '<' if sys.byteorder == 'little' else '>'
# Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image
UpperCamelCase_ = [
np.dtype('|b1'),
np.dtype('|u1'),
np.dtype('<u2'),
np.dtype('>u2'),
np.dtype('<i2'),
np.dtype('>i2'),
np.dtype('<u4'),
np.dtype('>u4'),
np.dtype('<i4'),
np.dtype('>i4'),
np.dtype('<f4'),
np.dtype('>f4'),
np.dtype('<f8'),
np.dtype('>f8'),
]
@dataclass
class snake_case_ :
'''simple docstring'''
__UpperCamelCase = True
__UpperCamelCase = None
# Automatically constructed
__UpperCamelCase = "PIL.Image.Image"
__UpperCamelCase = pa.struct({'bytes': pa.binary(), 'path': pa.string()} )
__UpperCamelCase = field(default='Image', init=UpperCAmelCase__, repr=UpperCAmelCase__ )
def __call__( self ) -> Tuple:
return self.pa_type
def __UpperCAmelCase ( self, A_ ) -> dict:
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'." )
if isinstance(A_, A_ ):
UpperCAmelCase__ =np.array(A_ )
if isinstance(A_, A_ ):
return {"path": value, "bytes": None}
elif isinstance(A_, A_ ):
return {"path": None, "bytes": value}
elif isinstance(A_, np.ndarray ):
# convert the image array to PNG/TIFF bytes
return encode_np_array(A_ )
elif isinstance(A_, PIL.Image.Image ):
# convert the PIL image to bytes (default format is PNG/TIFF)
return encode_pil_image(A_ )
elif value.get("path" ) is not None and os.path.isfile(value["path"] ):
# we set "bytes": None to not duplicate the data if they're already available locally
return {"bytes": None, "path": value.get("path" )}
elif value.get("bytes" ) is not None or value.get("path" ) is not None:
# store the image bytes, and path is used to infer the image format using the file extension
return {"bytes": value.get("bytes" ), "path": value.get("path" )}
else:
raise ValueError(
f"""An image sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.""" )
def __UpperCAmelCase ( self, A_, A_=None ) -> "PIL.Image.Image":
if not self.decode:
raise RuntimeError("Decoding is disabled for this feature. Please use Image(decode=True) instead." )
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support decoding images, please install 'Pillow'." )
if token_per_repo_id is None:
UpperCAmelCase__ ={}
UpperCAmelCase__ =value["""path"""], value["""bytes"""]
if bytes_ is None:
if path is None:
raise ValueError(f"""An image should have one of \'path\' or \'bytes\' but both are None in {value}.""" )
else:
if is_local_path(A_ ):
UpperCAmelCase__ =PIL.Image.open(A_ )
else:
UpperCAmelCase__ =path.split("::" )[-1]
try:
UpperCAmelCase__ =string_to_dict(A_, config.HUB_DATASETS_URL )["""repo_id"""]
UpperCAmelCase__ =token_per_repo_id.get(A_ )
except ValueError:
UpperCAmelCase__ =None
with xopen(A_, "rb", use_auth_token=A_ ) as f:
UpperCAmelCase__ =BytesIO(f.read() )
UpperCAmelCase__ =PIL.Image.open(bytes_ )
else:
UpperCAmelCase__ =PIL.Image.open(BytesIO(bytes_ ) )
image.load() # to avoid "Too many open files" errors
return image
def __UpperCAmelCase ( self ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
from .features import Value
return (
self
if self.decode
else {
"bytes": Value("binary" ),
"path": Value("string" ),
}
)
def __UpperCAmelCase ( self, A_ ) -> pa.StructArray:
if pa.types.is_string(storage.type ):
UpperCAmelCase__ =pa.array([None] * len(A_ ), type=pa.binary() )
UpperCAmelCase__ =pa.StructArray.from_arrays([bytes_array, storage], ["bytes", "path"], mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
UpperCAmelCase__ =pa.array([None] * len(A_ ), type=pa.string() )
UpperCAmelCase__ =pa.StructArray.from_arrays([storage, path_array], ["bytes", "path"], mask=storage.is_null() )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index("bytes" ) >= 0:
UpperCAmelCase__ =storage.field("bytes" )
else:
UpperCAmelCase__ =pa.array([None] * len(A_ ), type=pa.binary() )
if storage.type.get_field_index("path" ) >= 0:
UpperCAmelCase__ =storage.field("path" )
else:
UpperCAmelCase__ =pa.array([None] * len(A_ ), type=pa.string() )
UpperCAmelCase__ =pa.StructArray.from_arrays([bytes_array, path_array], ["bytes", "path"], mask=storage.is_null() )
elif pa.types.is_list(storage.type ):
UpperCAmelCase__ =pa.array(
[encode_np_array(np.array(A_ ) )["bytes"] if arr is not None else None for arr in storage.to_pylist()], type=pa.binary(), )
UpperCAmelCase__ =pa.array([None] * len(A_ ), type=pa.string() )
UpperCAmelCase__ =pa.StructArray.from_arrays(
[bytes_array, path_array], ["bytes", "path"], mask=bytes_array.is_null() )
return array_cast(A_, self.pa_type )
def __UpperCAmelCase ( self, A_ ) -> pa.StructArray:
@no_op_if_value_is_null
def path_to_bytes(A_ ):
with xopen(A_, "rb" ) as f:
UpperCAmelCase__ =f.read()
return bytes_
UpperCAmelCase__ =pa.array(
[
(path_to_bytes(x["path"] ) if x["bytes"] is None else x["bytes"]) if x is not None else None
for x in storage.to_pylist()
], type=pa.binary(), )
UpperCAmelCase__ =pa.array(
[os.path.basename(A_ ) if path is not None else None for path in storage.field("path" ).to_pylist()], type=pa.string(), )
UpperCAmelCase__ =pa.StructArray.from_arrays([bytes_array, path_array], ["bytes", "path"], mask=bytes_array.is_null() )
return array_cast(A_, self.pa_type )
def _UpperCAmelCase ( ):
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'." )
global _IMAGE_COMPRESSION_FORMATS
if _IMAGE_COMPRESSION_FORMATS is None:
PIL.Image.init()
UpperCAmelCase__ =list(set(PIL.Image.OPEN.keys() ) & set(PIL.Image.SAVE.keys() ) )
return _IMAGE_COMPRESSION_FORMATS
def _UpperCAmelCase ( A ):
'''simple docstring'''
UpperCAmelCase__ =BytesIO()
if image.format in list_image_compression_formats():
UpperCAmelCase__ =image.format
else:
UpperCAmelCase__ ="""PNG""" if image.mode in ["""1""", """L""", """LA""", """RGB""", """RGBA"""] else """TIFF"""
image.save(A , format=A )
return buffer.getvalue()
def _UpperCAmelCase ( A ):
'''simple docstring'''
if hasattr(A , "filename" ) and image.filename != "":
return {"path": image.filename, "bytes": None}
else:
return {"path": None, "bytes": image_to_bytes(A )}
def _UpperCAmelCase ( A ):
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'." )
UpperCAmelCase__ =array.dtype
UpperCAmelCase__ =dtype.byteorder if dtype.byteorder != """=""" else _NATIVE_BYTEORDER
UpperCAmelCase__ =dtype.kind
UpperCAmelCase__ =dtype.itemsize
UpperCAmelCase__ =None
# Multi-channel array case (only np.dtype("|u1") is allowed)
if array.shape[2:]:
UpperCAmelCase__ =np.dtype("|u1" )
if dtype_kind not in ["u", "i"]:
raise TypeError(
F"""Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays.""" )
if dtype is not dest_dtype:
warnings.warn(F"""Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'""" )
# Exact match
elif dtype in _VALID_IMAGE_ARRAY_DTPYES:
UpperCAmelCase__ =dtype
else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually)
while dtype_itemsize >= 1:
UpperCAmelCase__ =dtype_byteorder + dtype_kind + str(A )
UpperCAmelCase__ =np.dtype(A )
if dest_dtype in _VALID_IMAGE_ARRAY_DTPYES:
warnings.warn(F"""Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'""" )
break
else:
dtype_itemsize //= 2
if dest_dtype is None:
raise TypeError(
F"""Cannot convert dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}""" )
UpperCAmelCase__ =PIL.Image.fromarray(array.astype(A ) )
return {"path": None, "bytes": image_to_bytes(A )}
def _UpperCAmelCase ( A ):
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'." )
if objs:
UpperCAmelCase__ =first_non_null_value(A )
if isinstance(A , A ):
return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs]
if isinstance(A , np.ndarray ):
UpperCAmelCase__ =no_op_if_value_is_null(A )
return [obj_to_image_dict_func(A ) for obj in objs]
elif isinstance(A , PIL.Image.Image ):
UpperCAmelCase__ =no_op_if_value_is_null(A )
return [obj_to_image_dict_func(A ) for obj in objs]
else:
return objs
else:
return objs
| 714 |
import argparse
from pathlib import Path
import torch
from transformers import OPTConfig, OPTModel
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase_ = logging.get_logger(__name__)
def _UpperCAmelCase ( A ):
'''simple docstring'''
UpperCAmelCase__ =torch.load(A , map_location="cpu" )
if "model" in sd.keys():
UpperCAmelCase__ =torch.load(A , map_location="cpu" )["model"]
# pop unnecessary weights
UpperCAmelCase__ =[
"decoder.version",
"decoder.output_projection.weight",
]
for key in keys_to_delete:
if key in sd:
sd.pop(A )
UpperCAmelCase__ ={
"decoder.project_in_dim.weight": "decoder.project_in.weight",
"decoder.project_out_dim.weight": "decoder.project_out.weight",
"decoder.layer_norm.weight": "decoder.final_layer_norm.weight",
"decoder.layer_norm.bias": "decoder.final_layer_norm.bias",
}
for old_key, new_key in keys_to_rename.items():
if old_key in sd:
UpperCAmelCase__ =sd.pop(A )
UpperCAmelCase__ =list(sd.keys() )
for key in keys:
if ".qkv_proj." in key:
UpperCAmelCase__ =sd[key]
# We split QKV in separate Q,K,V
UpperCAmelCase__ =key.replace(".qkv_proj." , ".q_proj." )
UpperCAmelCase__ =key.replace(".qkv_proj." , ".k_proj." )
UpperCAmelCase__ =key.replace(".qkv_proj." , ".v_proj." )
UpperCAmelCase__ =value.shape[0]
assert depth % 3 == 0
# `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming:
# https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ =torch.split(A , depth // 3 , dim=0 )
UpperCAmelCase__ =q
UpperCAmelCase__ =k
UpperCAmelCase__ =v
del sd[key]
return sd
@torch.no_grad()
def _UpperCAmelCase ( A , A , A=None ):
'''simple docstring'''
UpperCAmelCase__ =load_checkpoint(A )
if config is not None:
UpperCAmelCase__ =OPTConfig.from_pretrained(A )
else:
UpperCAmelCase__ =OPTConfig()
UpperCAmelCase__ =OPTModel(A ).half().eval()
model.load_state_dict(A )
# Check results
Path(A ).mkdir(exist_ok=A )
model.save_pretrained(A )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--fairseq_path',
type=str,
help=(
'path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:'
' https://huggingface.co/models?other=opt_metasq'
),
)
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--hf_config', default=None, type=str, help='Define HF config.')
UpperCamelCase_ = parser.parse_args()
convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config)
| 510 | 0 |
import requests
lowerCAmelCase__ : str ='https://newsapi.org/v1/articles?source=bbc-news&sortBy=top&apiKey='
def a__ ( A__ ):
# fetching a list of articles in json format
SCREAMING_SNAKE_CASE_ : Any = requests.get(_NEWS_API + bbc_news_api_key ).json()
# each article in the list is a dict
for i, article in enumerate(bbc_news_page['articles'], 1 ):
print(F'''{i}.) {article['title']}''' )
if __name__ == "__main__":
fetch_bbc_news(bbc_news_api_key='<Your BBC News API key goes here>')
| 101 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_A = logging.get_logger(__name__)
_A = {
"""sail/poolformer_s12""": """https://huggingface.co/sail/poolformer_s12/resolve/main/config.json""",
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
}
class SCREAMING_SNAKE_CASE_ ( snake_case ):
__a : List[str] = '''poolformer'''
def __init__( self , lowercase=3 , lowercase=1_6 , lowercase=1_6 , lowercase=3 , lowercase=4.0 , lowercase=[2, 2, 6, 2] , lowercase=[6_4, 1_2_8, 3_2_0, 5_1_2] , lowercase=[7, 3, 3, 3] , lowercase=[4, 2, 2, 2] , lowercase=[2, 1, 1, 1] , lowercase=4 , lowercase=0.0 , lowercase="gelu" , lowercase=True , lowercase=1e-5 , lowercase=0.0_2 , **lowercase , ) -> Tuple:
'''simple docstring'''
__SCREAMING_SNAKE_CASE : Dict = num_channels
__SCREAMING_SNAKE_CASE : Dict = patch_size
__SCREAMING_SNAKE_CASE : int = stride
__SCREAMING_SNAKE_CASE : Optional[Any] = padding
__SCREAMING_SNAKE_CASE : str = pool_size
__SCREAMING_SNAKE_CASE : Optional[int] = hidden_sizes
__SCREAMING_SNAKE_CASE : Optional[Any] = mlp_ratio
__SCREAMING_SNAKE_CASE : List[str] = depths
__SCREAMING_SNAKE_CASE : Union[str, Any] = patch_sizes
__SCREAMING_SNAKE_CASE : Optional[Any] = strides
__SCREAMING_SNAKE_CASE : Optional[Any] = num_encoder_blocks
__SCREAMING_SNAKE_CASE : Any = drop_path_rate
__SCREAMING_SNAKE_CASE : Optional[Any] = hidden_act
__SCREAMING_SNAKE_CASE : Optional[Any] = use_layer_scale
__SCREAMING_SNAKE_CASE : List[Any] = layer_scale_init_value
__SCREAMING_SNAKE_CASE : Optional[int] = initializer_range
super().__init__(**lowercase )
class SCREAMING_SNAKE_CASE_ ( snake_case ):
__a : Union[str, Any] = version.parse('''1.11''' )
@property
def _snake_case ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def _snake_case ( self ) -> float:
'''simple docstring'''
return 2e-3
| 158 | 0 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import RoFormerConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerModel,
)
from transformers.models.roformer.modeling_tf_roformer import (
TFRoFormerSelfAttention,
TFRoFormerSinusoidalPositionalEmbedding,
)
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self : Optional[Any] , UpperCAmelCase : str , UpperCAmelCase : Union[str, Any]=13 , UpperCAmelCase : int=7 , UpperCAmelCase : Union[str, Any]=True , UpperCAmelCase : int=True , UpperCAmelCase : Any=True , UpperCAmelCase : int=True , UpperCAmelCase : Any=99 , UpperCAmelCase : List[Any]=32 , UpperCAmelCase : str=2 , UpperCAmelCase : str=4 , UpperCAmelCase : List[Any]=37 , UpperCAmelCase : str="gelu" , UpperCAmelCase : List[Any]=0.1 , UpperCAmelCase : int=0.1 , UpperCAmelCase : Dict=512 , UpperCAmelCase : List[Any]=16 , UpperCAmelCase : List[str]=2 , UpperCAmelCase : Any=0.0_2 , UpperCAmelCase : List[str]=3 , UpperCAmelCase : Dict=4 , UpperCAmelCase : Optional[int]=None , ) -> Optional[Any]:
'''simple docstring'''
lowercase : Optional[Any] =parent
lowercase : Tuple =13
lowercase : Any =7
lowercase : Union[str, Any] =True
lowercase : Any =True
lowercase : Optional[int] =True
lowercase : List[str] =True
lowercase : Tuple =99
lowercase : str =32
lowercase : Union[str, Any] =2
lowercase : Dict =4
lowercase : Union[str, Any] =37
lowercase : Union[str, Any] ='''gelu'''
lowercase : Any =0.1
lowercase : Dict =0.1
lowercase : Dict =512
lowercase : List[str] =16
lowercase : Dict =2
lowercase : int =0.0_2
lowercase : List[Any] =3
lowercase : List[str] =4
lowercase : Optional[Any] =None
def A__ ( self : Union[str, Any] ) -> int:
'''simple docstring'''
lowercase : Optional[Any] =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase : str =None
if self.use_input_mask:
lowercase : int =random_attention_mask([self.batch_size, self.seq_length] )
lowercase : Any =None
if self.use_token_type_ids:
lowercase : List[str] =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase : List[Any] =None
lowercase : List[str] =None
lowercase : List[str] =None
if self.use_labels:
lowercase : Optional[Any] =ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase : List[Any] =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase : Any =ids_tensor([self.batch_size] , self.num_choices )
lowercase : List[Any] =RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=UpperCAmelCase , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A__ ( self : List[str] , UpperCAmelCase : int , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Tuple , UpperCAmelCase : Dict , UpperCAmelCase : List[str] , UpperCAmelCase : List[Any] ) -> Optional[Any]:
'''simple docstring'''
lowercase : List[Any] =TFRoFormerModel(config=UpperCAmelCase )
lowercase : Optional[int] ={'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowercase : Tuple =[input_ids, input_mask]
lowercase : str =model(UpperCAmelCase )
lowercase : Dict =model(UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A__ ( self : Tuple , UpperCAmelCase : Dict , UpperCAmelCase : List[Any] , UpperCAmelCase : Any , UpperCAmelCase : Dict , UpperCAmelCase : int , UpperCAmelCase : List[Any] , UpperCAmelCase : List[str] ) -> Any:
'''simple docstring'''
lowercase : Dict =True
lowercase : List[Any] =TFRoFormerForCausalLM(config=UpperCAmelCase )
lowercase : Union[str, Any] ={
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
lowercase : Optional[Any] =model(UpperCAmelCase )['''logits''']
self.parent.assertListEqual(
list(prediction_scores.numpy().shape ) , [self.batch_size, self.seq_length, self.vocab_size] )
def A__ ( self : List[Any] , UpperCAmelCase : Dict , UpperCAmelCase : Optional[Any] , UpperCAmelCase : List[str] , UpperCAmelCase : Dict , UpperCAmelCase : int , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Tuple ) -> Dict:
'''simple docstring'''
lowercase : List[Any] =TFRoFormerForMaskedLM(config=UpperCAmelCase )
lowercase : List[str] ={
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
lowercase : Dict =model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A__ ( self : Tuple , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[str] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Dict , UpperCAmelCase : Tuple , UpperCAmelCase : int , UpperCAmelCase : int ) -> Optional[Any]:
'''simple docstring'''
lowercase : Optional[Any] =self.num_labels
lowercase : Optional[int] =TFRoFormerForSequenceClassification(config=UpperCAmelCase )
lowercase : Optional[int] ={
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
lowercase : Optional[Any] =model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A__ ( self : Optional[Any] , UpperCAmelCase : int , UpperCAmelCase : Any , UpperCAmelCase : str , UpperCAmelCase : Dict , UpperCAmelCase : List[str] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Tuple ) -> Union[str, Any]:
'''simple docstring'''
lowercase : int =self.num_choices
lowercase : Tuple =TFRoFormerForMultipleChoice(config=UpperCAmelCase )
lowercase : Union[str, Any] =tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
lowercase : List[Any] =tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
lowercase : Tuple =tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
lowercase : List[Any] ={
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
lowercase : Dict =model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def A__ ( self : Optional[int] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Dict , UpperCAmelCase : List[Any] , UpperCAmelCase : List[str] , UpperCAmelCase : Any , UpperCAmelCase : List[str] , UpperCAmelCase : int ) -> Optional[int]:
'''simple docstring'''
lowercase : List[Any] =self.num_labels
lowercase : Union[str, Any] =TFRoFormerForTokenClassification(config=UpperCAmelCase )
lowercase : Tuple ={
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
lowercase : List[str] =model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A__ ( self : int , UpperCAmelCase : int , UpperCAmelCase : Dict , UpperCAmelCase : Optional[int] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[Any] , UpperCAmelCase : List[Any] , UpperCAmelCase : str ) -> Any:
'''simple docstring'''
lowercase : Tuple =TFRoFormerForQuestionAnswering(config=UpperCAmelCase )
lowercase : List[str] ={
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
lowercase : List[str] =model(UpperCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A__ ( self : List[Any] ) -> Dict:
'''simple docstring'''
lowercase : Optional[Any] =self.prepare_config_and_inputs()
(
(
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) ,
) : Optional[int] =config_and_inputs
lowercase : str ={'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class UpperCAmelCase_ ( __A , __A , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ = (
(
TFRoFormerModel,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerForMultipleChoice,
)
if is_tf_available()
else ()
)
UpperCamelCase_ = (
{
'''feature-extraction''': TFRoFormerModel,
'''fill-mask''': TFRoFormerForMaskedLM,
'''question-answering''': TFRoFormerForQuestionAnswering,
'''text-classification''': TFRoFormerForSequenceClassification,
'''text-generation''': TFRoFormerForCausalLM,
'''token-classification''': TFRoFormerForTokenClassification,
'''zero-shot''': TFRoFormerForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCamelCase_ = False
UpperCamelCase_ = False
def A__ ( self : Dict , UpperCAmelCase : str , UpperCAmelCase : List[Any] , UpperCAmelCase : int , UpperCAmelCase : Optional[int] , UpperCAmelCase : str ) -> Tuple:
'''simple docstring'''
if pipeline_test_casse_name == "TextGenerationPipelineTests":
return True
return False
def A__ ( self : str ) -> Optional[int]:
'''simple docstring'''
lowercase : List[Any] =TFRoFormerModelTester(self )
lowercase : Union[str, Any] =ConfigTester(self , config_class=UpperCAmelCase , hidden_size=37 )
def A__ ( self : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def A__ ( self : List[str] ) -> List[str]:
'''simple docstring'''
lowercase : str =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase )
def A__ ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
lowercase : Tuple =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCAmelCase )
def A__ ( self : str ) -> Optional[int]:
'''simple docstring'''
lowercase : Any =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head(*UpperCAmelCase )
def A__ ( self : int ) -> Tuple:
'''simple docstring'''
lowercase : List[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*UpperCAmelCase )
def A__ ( self : Dict ) -> List[str]:
'''simple docstring'''
lowercase : Dict =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCAmelCase )
def A__ ( self : Dict ) -> Any:
'''simple docstring'''
lowercase : int =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCAmelCase )
def A__ ( self : List[Any] ) -> Tuple:
'''simple docstring'''
lowercase : str =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCAmelCase )
@slow
def A__ ( self : str ) -> str:
'''simple docstring'''
lowercase : Union[str, Any] =TFRoFormerModel.from_pretrained('''junnyu/roformer_chinese_base''' )
self.assertIsNotNone(UpperCAmelCase )
@require_tf
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def A__ ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
lowercase : Any =TFRoFormerForMaskedLM.from_pretrained('''junnyu/roformer_chinese_base''' )
lowercase : Optional[Any] =tf.constant([[0, 1, 2, 3, 4, 5]] )
lowercase : List[str] =model(UpperCAmelCase )[0]
# TODO Replace vocab size
lowercase : Tuple =5_0000
lowercase : List[str] =[1, 6, vocab_size]
self.assertEqual(output.shape , UpperCAmelCase )
print(output[:, :3, :3] )
# TODO Replace values below with what was printed above.
lowercase : Dict =tf.constant(
[
[
[-0.1_2_0_5_3_3_4_1, -1.0_2_6_4_9_0_1, 0.2_9_2_2_1_9_4_6],
[-1.5_1_3_3_7_8_3, 0.1_9_7_4_3_3, 0.1_5_1_9_0_6_0_7],
[-5.0_1_3_5_4_0_3, -3.9_0_0_2_5_6, -0.8_4_0_3_8_7_6_4],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , UpperCAmelCase , atol=1e-4 )
@require_tf
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ = 1e-4
def A__ ( self : int ) -> List[Any]:
'''simple docstring'''
lowercase : Union[str, Any] =tf.constant([[4, 10]] )
lowercase : List[Any] =TFRoFormerSinusoidalPositionalEmbedding(num_positions=6 , embedding_dim=6 )
lowercase : Any =emba(input_ids.shape )
lowercase : List[str] =tf.constant(
[[0.0_0_0_0, 0.0_0_0_0, 0.0_0_0_0, 1.0_0_0_0, 1.0_0_0_0, 1.0_0_0_0], [0.8_4_1_5, 0.0_4_6_4, 0.0_0_2_2, 0.5_4_0_3, 0.9_9_8_9, 1.0_0_0_0]] )
tf.debugging.assert_near(UpperCAmelCase , UpperCAmelCase , atol=self.tolerance )
def A__ ( self : Optional[Any] ) -> int:
'''simple docstring'''
lowercase : Optional[Any] =tf.constant(
[
[0.0_0_0_0, 0.0_0_0_0, 0.0_0_0_0, 0.0_0_0_0, 0.0_0_0_0],
[0.8_4_1_5, 0.8_2_1_9, 0.8_0_2_0, 0.7_8_1_9, 0.7_6_1_7],
[0.9_0_9_3, 0.9_3_6_4, 0.9_5_8_1, 0.9_7_4_9, 0.9_8_7_0],
] )
lowercase : Tuple =TFRoFormerSinusoidalPositionalEmbedding(num_positions=512 , embedding_dim=512 )
emba([2, 16, 512] )
lowercase : str =emba.weight[:3, :5]
tf.debugging.assert_near(UpperCAmelCase , UpperCAmelCase , atol=self.tolerance )
@require_tf
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ = 1e-4
def A__ ( self : Dict ) -> Dict:
'''simple docstring'''
lowercase : str =tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100
lowercase : Any =-tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100
lowercase : Any =TFRoFormerSinusoidalPositionalEmbedding(num_positions=32 , embedding_dim=64 )
lowercase : Optional[Any] =embed_positions([2, 16, 768] )[None, None, :, :]
lowercase , lowercase : Optional[int] =TFRoFormerSelfAttention.apply_rotary_position_embeddings(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
lowercase : Any =tf.constant(
[
[0.0_0_0_0, 0.0_1_0_0, 0.0_2_0_0, 0.0_3_0_0, 0.0_4_0_0, 0.0_5_0_0, 0.0_6_0_0, 0.0_7_0_0],
[-0.2_0_1_2, 0.8_8_9_7, 0.0_2_6_3, 0.9_4_0_1, 0.2_0_7_4, 0.9_4_6_3, 0.3_4_8_1, 0.9_3_4_3],
[-1.7_0_5_7, 0.6_2_7_1, -1.2_1_4_5, 1.3_8_9_7, -0.6_3_0_3, 1.7_6_4_7, -0.1_1_7_3, 1.8_9_8_5],
[-2.1_7_3_1, -1.6_3_9_7, -2.7_3_5_8, 0.2_8_5_4, -2.1_8_4_0, 1.7_1_8_3, -1.3_0_1_8, 2.4_8_7_1],
[0.2_7_1_7, -3.6_1_7_3, -2.9_2_0_6, -2.1_9_8_8, -3.6_6_3_8, 0.3_8_5_8, -2.9_1_5_5, 2.2_9_8_0],
[3.9_8_5_9, -2.1_5_8_0, -0.7_9_8_4, -4.4_9_0_4, -4.1_1_8_1, -2.0_2_5_2, -4.4_7_8_2, 1.1_2_5_3],
] )
lowercase : int =tf.constant(
[
[0.0_0_0_0, -0.0_1_0_0, -0.0_2_0_0, -0.0_3_0_0, -0.0_4_0_0, -0.0_5_0_0, -0.0_6_0_0, -0.0_7_0_0],
[0.2_0_1_2, -0.8_8_9_7, -0.0_2_6_3, -0.9_4_0_1, -0.2_0_7_4, -0.9_4_6_3, -0.3_4_8_1, -0.9_3_4_3],
[1.7_0_5_7, -0.6_2_7_1, 1.2_1_4_5, -1.3_8_9_7, 0.6_3_0_3, -1.7_6_4_7, 0.1_1_7_3, -1.8_9_8_5],
[2.1_7_3_1, 1.6_3_9_7, 2.7_3_5_8, -0.2_8_5_4, 2.1_8_4_0, -1.7_1_8_3, 1.3_0_1_8, -2.4_8_7_1],
[-0.2_7_1_7, 3.6_1_7_3, 2.9_2_0_6, 2.1_9_8_8, 3.6_6_3_8, -0.3_8_5_8, 2.9_1_5_5, -2.2_9_8_0],
[-3.9_8_5_9, 2.1_5_8_0, 0.7_9_8_4, 4.4_9_0_4, 4.1_1_8_1, 2.0_2_5_2, 4.4_7_8_2, -1.1_2_5_3],
] )
tf.debugging.assert_near(query_layer[0, 0, :6, :8] , UpperCAmelCase , atol=self.tolerance )
tf.debugging.assert_near(key_layer[0, 0, :6, :8] , UpperCAmelCase , atol=self.tolerance )
| 8 |
'''simple docstring'''
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
UpperCamelCase_ = ['''image_processor''', '''tokenizer''']
UpperCamelCase_ = '''LayoutLMv2ImageProcessor'''
UpperCamelCase_ = ('''LayoutXLMTokenizer''', '''LayoutXLMTokenizerFast''')
def __init__( self : List[str] , UpperCAmelCase : Tuple=None , UpperCAmelCase : str=None , **UpperCAmelCase : Dict ) -> Optional[int]:
'''simple docstring'''
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , UpperCAmelCase , )
lowercase : Any =kwargs.pop('''feature_extractor''' )
lowercase : Dict =image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(UpperCAmelCase , UpperCAmelCase )
def __call__( self : Any , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , UpperCAmelCase : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , UpperCAmelCase : Union[List[List[int]], List[List[List[int]]]] = None , UpperCAmelCase : Optional[Union[List[int], List[List[int]]]] = None , UpperCAmelCase : bool = True , UpperCAmelCase : Union[bool, str, PaddingStrategy] = False , UpperCAmelCase : Union[bool, str, TruncationStrategy] = None , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : int = 0 , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : Optional[bool] = None , UpperCAmelCase : Optional[bool] = None , UpperCAmelCase : bool = False , UpperCAmelCase : bool = False , UpperCAmelCase : bool = False , UpperCAmelCase : bool = False , UpperCAmelCase : bool = True , UpperCAmelCase : Optional[Union[str, TensorType]] = None , **UpperCAmelCase : Any , ) -> BatchEncoding:
'''simple docstring'''
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
'''You cannot provide bounding boxes '''
'''if you initialized the image processor with apply_ocr set to True.''' )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
'''You cannot provide word labels if you initialized the image processor with apply_ocr set to True.''' )
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError('''You cannot return overflowing tokens without returning the offsets mapping.''' )
# first, apply the image processor
lowercase : Tuple =self.image_processor(images=UpperCAmelCase , return_tensors=UpperCAmelCase )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(UpperCAmelCase , UpperCAmelCase ):
lowercase : Optional[Any] =[text] # add batch dimension (as the image processor always adds a batch dimension)
lowercase : List[str] =features['''words''']
lowercase : Optional[Any] =self.tokenizer(
text=text if text is not None else features['''words'''] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['''boxes'''] , word_labels=UpperCAmelCase , add_special_tokens=UpperCAmelCase , padding=UpperCAmelCase , truncation=UpperCAmelCase , max_length=UpperCAmelCase , stride=UpperCAmelCase , pad_to_multiple_of=UpperCAmelCase , return_token_type_ids=UpperCAmelCase , return_attention_mask=UpperCAmelCase , return_overflowing_tokens=UpperCAmelCase , return_special_tokens_mask=UpperCAmelCase , return_offsets_mapping=UpperCAmelCase , return_length=UpperCAmelCase , verbose=UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase , )
# add pixel values
lowercase : List[str] =features.pop('''pixel_values''' )
if return_overflowing_tokens is True:
lowercase : str =self.get_overflowing_images(UpperCAmelCase , encoded_inputs['''overflow_to_sample_mapping'''] )
lowercase : Dict =images
return encoded_inputs
def A__ ( self : Any , UpperCAmelCase : Optional[Any] , UpperCAmelCase : str ) -> str:
'''simple docstring'''
lowercase : str =[]
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(UpperCAmelCase ) != len(UpperCAmelCase ):
raise ValueError(
'''Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got'''
f' {len(UpperCAmelCase )} and {len(UpperCAmelCase )}' )
return images_with_overflow
def A__ ( self : Union[str, Any] , *UpperCAmelCase : Tuple , **UpperCAmelCase : Dict ) -> int:
'''simple docstring'''
return self.tokenizer.batch_decode(*UpperCAmelCase , **UpperCAmelCase )
def A__ ( self : Dict , *UpperCAmelCase : Any , **UpperCAmelCase : Dict ) -> str:
'''simple docstring'''
return self.tokenizer.decode(*UpperCAmelCase , **UpperCAmelCase )
@property
def A__ ( self : List[str] ) -> List[str]:
'''simple docstring'''
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def A__ ( self : int ) -> List[str]:
'''simple docstring'''
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , UpperCAmelCase , )
return self.image_processor_class
@property
def A__ ( self : Dict ) -> List[str]:
'''simple docstring'''
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , UpperCAmelCase , )
return self.image_processor
| 8 | 1 |
"""simple docstring"""
def lowerCAmelCase__ ( __magic_name__ = 6_0_0_8_5_1_4_7_5_1_4_3 ) ->int:
try:
__lowercase = int(__magic_name__ )
except (TypeError, ValueError):
raise TypeError("Parameter n must be int or castable to int." )
if n <= 0:
raise ValueError("Parameter n must be greater than or equal to one." )
__lowercase = 1
__lowercase = 2
while i * i <= n:
while n % i == 0:
__lowercase = i
n //= i
i += 1
if n > 1:
__lowercase = n
return int(__magic_name__ )
if __name__ == "__main__":
print(F"{solution() = }")
| 118 |
"""simple docstring"""
import inspect
import unittest
from transformers import MobileViTConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel
from transformers.models.mobilevit.modeling_mobilevit import MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class __a ( __a ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE ( self ) -> List[str]:
'''simple docstring'''
__lowercase = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_lowerCamelCase , "hidden_sizes" ) )
self.parent.assertTrue(hasattr(_lowerCamelCase , "neck_hidden_sizes" ) )
self.parent.assertTrue(hasattr(_lowerCamelCase , "num_attention_heads" ) )
class __a :
'''simple docstring'''
def __init__( self , _lowerCamelCase , _lowerCamelCase=13 , _lowerCamelCase=32 , _lowerCamelCase=2 , _lowerCamelCase=3 , _lowerCamelCase=640 , _lowerCamelCase=4 , _lowerCamelCase="silu" , _lowerCamelCase=3 , _lowerCamelCase=32 , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=0.02 , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=10 , _lowerCamelCase=None , ) -> Tuple:
'''simple docstring'''
__lowercase = parent
__lowercase = batch_size
__lowercase = image_size
__lowercase = patch_size
__lowercase = num_channels
__lowercase = last_hidden_size
__lowercase = num_attention_heads
__lowercase = hidden_act
__lowercase = conv_kernel_size
__lowercase = output_stride
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = classifier_dropout_prob
__lowercase = use_labels
__lowercase = is_training
__lowercase = num_labels
__lowercase = initializer_range
__lowercase = scope
def SCREAMING_SNAKE_CASE ( self ) -> Tuple:
'''simple docstring'''
__lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowercase = None
__lowercase = None
if self.use_labels:
__lowercase = ids_tensor([self.batch_size] , self.num_labels )
__lowercase = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__lowercase = self.get_config()
return config, pixel_values, labels, pixel_labels
def SCREAMING_SNAKE_CASE ( self ) -> Tuple:
'''simple docstring'''
return MobileViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def SCREAMING_SNAKE_CASE ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> str:
'''simple docstring'''
__lowercase = MobileViTModel(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
__lowercase = model(_lowerCamelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def SCREAMING_SNAKE_CASE ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> int:
'''simple docstring'''
__lowercase = self.num_labels
__lowercase = MobileViTForImageClassification(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
__lowercase = model(_lowerCamelCase , labels=_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Optional[int]:
'''simple docstring'''
__lowercase = self.num_labels
__lowercase = MobileViTForSemanticSegmentation(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
__lowercase = model(_lowerCamelCase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
__lowercase = model(_lowerCamelCase , labels=_lowerCamelCase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def SCREAMING_SNAKE_CASE ( self ) -> List[str]:
'''simple docstring'''
__lowercase = self.prepare_config_and_inputs()
__lowercase , __lowercase , __lowercase , __lowercase = config_and_inputs
__lowercase = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class __a ( __a , __a , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase : str = (
(MobileViTModel, MobileViTForImageClassification, MobileViTForSemanticSegmentation)
if is_torch_available()
else ()
)
_lowerCamelCase : Optional[int] = (
{
"""feature-extraction""": MobileViTModel,
"""image-classification""": MobileViTForImageClassification,
"""image-segmentation""": MobileViTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
_lowerCamelCase : Tuple = False
_lowerCamelCase : Optional[Any] = False
_lowerCamelCase : Any = False
_lowerCamelCase : List[Any] = False
def SCREAMING_SNAKE_CASE ( self ) -> Tuple:
'''simple docstring'''
__lowercase = MobileViTModelTester(self )
__lowercase = MobileViTConfigTester(self , config_class=_lowerCamelCase , has_text_modality=_lowerCamelCase )
def SCREAMING_SNAKE_CASE ( self ) -> Dict:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="MobileViT does not use inputs_embeds" )
def SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
pass
@unittest.skip(reason="MobileViT does not support input and output embeddings" )
def SCREAMING_SNAKE_CASE ( self ) -> Dict:
'''simple docstring'''
pass
@unittest.skip(reason="MobileViT does not output attentions" )
def SCREAMING_SNAKE_CASE ( self ) -> Dict:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = model_class(_lowerCamelCase )
__lowercase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase = [*signature.parameters.keys()]
__lowercase = ["pixel_values"]
self.assertListEqual(arg_names[:1] , _lowerCamelCase )
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def SCREAMING_SNAKE_CASE ( self ) -> Dict:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( self ) -> Tuple:
'''simple docstring'''
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCamelCase )
def SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
'''simple docstring'''
def check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
__lowercase = model_class(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
with torch.no_grad():
__lowercase = model(**self._prepare_for_class(_lowerCamelCase , _lowerCamelCase ) )
__lowercase = outputs.hidden_states
__lowercase = 5
self.assertEqual(len(_lowerCamelCase ) , _lowerCamelCase )
# MobileViT's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
__lowercase = 2
for i in range(len(_lowerCamelCase ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = True
check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowercase = True
check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def SCREAMING_SNAKE_CASE ( self ) -> List[str]:
'''simple docstring'''
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowerCamelCase )
def SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
'''simple docstring'''
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*_lowerCamelCase )
@slow
def SCREAMING_SNAKE_CASE ( self ) -> Tuple:
'''simple docstring'''
for model_name in MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase = MobileViTModel.from_pretrained(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
def lowerCAmelCase__ ( ) ->List[Any]:
__lowercase = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class __a ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
return MobileViTImageProcessor.from_pretrained("apple/mobilevit-xx-small" ) if is_vision_available() else None
@slow
def SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
'''simple docstring'''
__lowercase = MobileViTForImageClassification.from_pretrained("apple/mobilevit-xx-small" ).to(_lowerCamelCase )
__lowercase = self.default_image_processor
__lowercase = prepare_img()
__lowercase = image_processor(images=_lowerCamelCase , return_tensors="pt" ).to(_lowerCamelCase )
# forward pass
with torch.no_grad():
__lowercase = model(**_lowerCamelCase )
# verify the logits
__lowercase = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , _lowerCamelCase )
__lowercase = torch.tensor([-1.9364, -1.2327, -0.4653] ).to(_lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _lowerCamelCase , atol=1e-4 ) )
@slow
def SCREAMING_SNAKE_CASE ( self ) -> List[str]:
'''simple docstring'''
__lowercase = MobileViTForSemanticSegmentation.from_pretrained("apple/deeplabv3-mobilevit-xx-small" )
__lowercase = model.to(_lowerCamelCase )
__lowercase = MobileViTImageProcessor.from_pretrained("apple/deeplabv3-mobilevit-xx-small" )
__lowercase = prepare_img()
__lowercase = image_processor(images=_lowerCamelCase , return_tensors="pt" ).to(_lowerCamelCase )
# forward pass
with torch.no_grad():
__lowercase = model(**_lowerCamelCase )
__lowercase = outputs.logits
# verify the logits
__lowercase = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape , _lowerCamelCase )
__lowercase = torch.tensor(
[
[[6.9713, 6.9786, 7.2422], [7.2893, 7.2825, 7.4446], [7.6580, 7.8797, 7.9420]],
[[-10.6869, -10.3250, -10.3471], [-10.4228, -9.9868, -9.7132], [-11.0405, -11.0221, -10.7318]],
[[-3.3089, -2.8539, -2.6740], [-3.2706, -2.5621, -2.5108], [-3.2534, -2.6615, -2.6651]],
] , device=_lowerCamelCase , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , _lowerCamelCase , atol=1e-4 ) )
@slow
def SCREAMING_SNAKE_CASE ( self ) -> List[str]:
'''simple docstring'''
__lowercase = MobileViTForSemanticSegmentation.from_pretrained("apple/deeplabv3-mobilevit-xx-small" )
__lowercase = model.to(_lowerCamelCase )
__lowercase = MobileViTImageProcessor.from_pretrained("apple/deeplabv3-mobilevit-xx-small" )
__lowercase = prepare_img()
__lowercase = image_processor(images=_lowerCamelCase , return_tensors="pt" ).to(_lowerCamelCase )
# forward pass
with torch.no_grad():
__lowercase = model(**_lowerCamelCase )
__lowercase = outputs.logits.detach().cpu()
__lowercase = image_processor.post_process_semantic_segmentation(outputs=_lowerCamelCase , target_sizes=[(50, 60)] )
__lowercase = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape , _lowerCamelCase )
__lowercase = image_processor.post_process_semantic_segmentation(outputs=_lowerCamelCase )
__lowercase = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape , _lowerCamelCase )
| 118 | 1 |
import warnings
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
from ...models import UNetaDModel
from ...schedulers import RePaintScheduler
from ...utils import PIL_INTERPOLATION, logging, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
__a = logging.get_logger(__name__) # pylint: disable=invalid-name
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
warnings.warn(
'''The preprocess method is deprecated and will be removed in a future version. Please'''
''' use VaeImageProcessor.preprocess instead''' , __lowerCAmelCase , )
if isinstance(__lowerCAmelCase , torch.Tensor ):
return image
elif isinstance(__lowerCAmelCase , PIL.Image.Image ):
UpperCAmelCase_ : Optional[Any] = [image]
if isinstance(image[0] , PIL.Image.Image ):
UpperCAmelCase_ : List[Any] = image[0].size
UpperCAmelCase_ : Dict = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8
UpperCAmelCase_ : int = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION['''lanczos'''] ) )[None, :] for i in image]
UpperCAmelCase_ : Dict = np.concatenate(__lowerCAmelCase , axis=0 )
UpperCAmelCase_ : Tuple = np.array(__lowerCAmelCase ).astype(np.floataa ) / 255.0
UpperCAmelCase_ : List[Any] = image.transpose(0 , 3 , 1 , 2 )
UpperCAmelCase_ : Dict = 2.0 * image - 1.0
UpperCAmelCase_ : Dict = torch.from_numpy(__lowerCAmelCase )
elif isinstance(image[0] , torch.Tensor ):
UpperCAmelCase_ : Union[str, Any] = torch.cat(__lowerCAmelCase , dim=0 )
return image
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
if isinstance(__lowerCAmelCase , torch.Tensor ):
return mask
elif isinstance(__lowerCAmelCase , PIL.Image.Image ):
UpperCAmelCase_ : Optional[Any] = [mask]
if isinstance(mask[0] , PIL.Image.Image ):
UpperCAmelCase_ : Union[str, Any] = mask[0].size
UpperCAmelCase_ : List[Any] = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
UpperCAmelCase_ : Dict = [np.array(m.convert('''L''' ).resize((w, h) , resample=PIL_INTERPOLATION['''nearest'''] ) )[None, :] for m in mask]
UpperCAmelCase_ : int = np.concatenate(__lowerCAmelCase , axis=0 )
UpperCAmelCase_ : List[Any] = mask.astype(np.floataa ) / 255.0
UpperCAmelCase_ : Any = 0
UpperCAmelCase_ : Dict = 1
UpperCAmelCase_ : List[str] = torch.from_numpy(__lowerCAmelCase )
elif isinstance(mask[0] , torch.Tensor ):
UpperCAmelCase_ : Optional[Any] = torch.cat(__lowerCAmelCase , dim=0 )
return mask
class __a( UpperCamelCase_ ):
"""simple docstring"""
lowerCAmelCase = 42
lowerCAmelCase = 42
def __init__( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Any:
super().__init__()
self.register_modules(unet=_a ,scheduler=_a )
@torch.no_grad()
def __call__( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = 250 ,_SCREAMING_SNAKE_CASE = 0.0 ,_SCREAMING_SNAKE_CASE = 10 ,_SCREAMING_SNAKE_CASE = 10 ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = "pil" ,_SCREAMING_SNAKE_CASE = True ,) -> Union[ImagePipelineOutput, Tuple]:
UpperCAmelCase_ : str = image
UpperCAmelCase_ : List[Any] = _preprocess_image(_a )
UpperCAmelCase_ : int = original_image.to(device=self.device ,dtype=self.unet.dtype )
UpperCAmelCase_ : Optional[int] = _preprocess_mask(_a )
UpperCAmelCase_ : Optional[Any] = mask_image.to(device=self.device ,dtype=self.unet.dtype )
UpperCAmelCase_ : Optional[int] = original_image.shape[0]
# sample gaussian noise to begin the loop
if isinstance(_a ,_a ) and len(_a ) != batch_size:
raise ValueError(
f'''You have passed a list of generators of length {len(_a )}, but requested an effective batch'''
f''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
UpperCAmelCase_ : int = original_image.shape
UpperCAmelCase_ : List[str] = randn_tensor(_a ,generator=_a ,device=self.device ,dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(_a ,_a ,_a ,self.device )
UpperCAmelCase_ : Dict = eta
UpperCAmelCase_ : Dict = self.scheduler.timesteps[0] + 1
UpperCAmelCase_ : Union[str, Any] = generator[0] if isinstance(_a ,_a ) else generator
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
if t < t_last:
# predict the noise residual
UpperCAmelCase_ : Tuple = self.unet(_a ,_a ).sample
# compute previous image: x_t -> x_t-1
UpperCAmelCase_ : Tuple = self.scheduler.step(_a ,_a ,_a ,_a ,_a ,_a ).prev_sample
else:
# compute the reverse: x_t-1 -> x_t
UpperCAmelCase_ : List[Any] = self.scheduler.undo_step(_a ,_a ,_a )
UpperCAmelCase_ : int = t
UpperCAmelCase_ : int = (image / 2 + 0.5).clamp(0 ,1 )
UpperCAmelCase_ : Tuple = image.cpu().permute(0 ,2 ,3 ,1 ).numpy()
if output_type == "pil":
UpperCAmelCase_ : Tuple = self.numpy_to_pil(_a )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_a ) | 712 |
def lowerCamelCase__ ( _lowercase , _lowercase ):
'''simple docstring'''
if digit_amount > 0:
return round(number - int(_lowercase ) , _lowercase )
return number - int(_lowercase )
if __name__ == "__main__":
print(decimal_isolate(1.53, 0))
print(decimal_isolate(35.345, 1))
print(decimal_isolate(35.345, 2))
print(decimal_isolate(35.345, 3))
print(decimal_isolate(-14.789, 3))
print(decimal_isolate(0, 2))
print(decimal_isolate(-14.123, 1))
print(decimal_isolate(-14.123, 2))
print(decimal_isolate(-14.123, 3)) | 300 | 0 |
"""simple docstring"""
import multiprocessing
import os
from typing import BinaryIO, Optional, Union
import fsspec
from .. import Dataset, Features, NamedSplit, config
from ..formatting import query_table
from ..packaged_modules.json.json import Json
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class UpperCamelCase__ ( _lowerCAmelCase ):
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = False , SCREAMING_SNAKE_CASE__ = False , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , **SCREAMING_SNAKE_CASE__ , ) -> Optional[Any]:
super().__init__(
SCREAMING_SNAKE_CASE__ , split=SCREAMING_SNAKE_CASE__ , features=SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ , keep_in_memory=SCREAMING_SNAKE_CASE__ , streaming=SCREAMING_SNAKE_CASE__ , num_proc=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
A__ = field
A__ = path_or_paths if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else {self.split: path_or_paths}
A__ = Json(
cache_dir=SCREAMING_SNAKE_CASE__ , data_files=SCREAMING_SNAKE_CASE__ , features=SCREAMING_SNAKE_CASE__ , field=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
def snake_case__ ( self ) -> Any:
# Build iterable dataset
if self.streaming:
A__ = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
A__ = None
A__ = None
A__ = None
A__ = None
self.builder.download_and_prepare(
download_config=SCREAMING_SNAKE_CASE__ , download_mode=SCREAMING_SNAKE_CASE__ , verification_mode=SCREAMING_SNAKE_CASE__ , base_path=SCREAMING_SNAKE_CASE__ , num_proc=self.num_proc , )
A__ = self.builder.as_dataset(
split=self.split , verification_mode=SCREAMING_SNAKE_CASE__ , in_memory=self.keep_in_memory )
return dataset
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , **SCREAMING_SNAKE_CASE__ , ) -> str:
if num_proc is not None and num_proc <= 0:
raise ValueError(f"""num_proc {num_proc} must be an integer > 0.""" )
A__ = dataset
A__ = path_or_buf
A__ = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
A__ = num_proc
A__ = "utf-8"
A__ = to_json_kwargs
def snake_case__ ( self ) -> int:
A__ = self.to_json_kwargs.pop("path_or_buf" , SCREAMING_SNAKE_CASE__ )
A__ = self.to_json_kwargs.pop("orient" , "records" )
A__ = self.to_json_kwargs.pop("lines" , True if orient == "records" else False )
A__ = self.to_json_kwargs.pop("index" , False if orient in ["split", "table"] else True )
A__ = self.to_json_kwargs.pop("compression" , SCREAMING_SNAKE_CASE__ )
if compression not in [None, "infer", "gzip", "bz2", "xz"]:
raise NotImplementedError(f"""`datasets` currently does not support {compression} compression""" )
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with fsspec.open(self.path_or_buf , "wb" , compression=SCREAMING_SNAKE_CASE__ ) as buffer:
A__ = self._write(file_obj=SCREAMING_SNAKE_CASE__ , orient=SCREAMING_SNAKE_CASE__ , lines=SCREAMING_SNAKE_CASE__ , index=SCREAMING_SNAKE_CASE__ , **self.to_json_kwargs )
else:
if compression:
raise NotImplementedError(
f"""The compression parameter is not supported when writing to a buffer, but compression={compression}"""
" was passed. Please provide a local path instead." )
A__ = self._write(
file_obj=self.path_or_buf , orient=SCREAMING_SNAKE_CASE__ , lines=SCREAMING_SNAKE_CASE__ , index=SCREAMING_SNAKE_CASE__ , **self.to_json_kwargs )
return written
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ ) -> List[Any]:
A__ , A__ , A__ , A__ , A__ = args
A__ = query_table(
table=self.dataset.data , key=slice(SCREAMING_SNAKE_CASE__ , offset + self.batch_size ) , indices=self.dataset._indices , )
A__ = batch.to_pandas().to_json(
path_or_buf=SCREAMING_SNAKE_CASE__ , orient=SCREAMING_SNAKE_CASE__ , lines=SCREAMING_SNAKE_CASE__ , index=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
if not json_str.endswith("\n" ):
json_str += "\n"
return json_str.encode(self.encoding )
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , ) -> int:
A__ = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating json from Arrow format" , ):
A__ = self._batch_json((offset, orient, lines, index, to_json_kwargs) )
written += file_obj.write(SCREAMING_SNAKE_CASE__ )
else:
A__ , A__ = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for json_str in logging.tqdm(
pool.imap(
self._batch_json , [(offset, orient, lines, index, to_json_kwargs) for offset in range(0 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating json from Arrow format" , ):
written += file_obj.write(SCREAMING_SNAKE_CASE__ )
return written
| 104 |
"""simple docstring"""
import unittest
from transformers.utils.backbone_utils import (
BackboneMixin,
get_aligned_output_features_output_indices,
verify_out_features_out_indices,
)
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def snake_case__ ( self ) -> Dict:
A__ = ["a", "b", "c"]
# Defaults to last layer if both are None
A__ , A__ = get_aligned_output_features_output_indices(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self.assertEqual(SCREAMING_SNAKE_CASE__ , ["c"] )
self.assertEqual(SCREAMING_SNAKE_CASE__ , [2] )
# Out indices set to match out features
A__ , A__ = get_aligned_output_features_output_indices(["a", "c"] , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self.assertEqual(SCREAMING_SNAKE_CASE__ , ["a", "c"] )
self.assertEqual(SCREAMING_SNAKE_CASE__ , [0, 2] )
# Out features set to match out indices
A__ , A__ = get_aligned_output_features_output_indices(SCREAMING_SNAKE_CASE__ , [0, 2] , SCREAMING_SNAKE_CASE__ )
self.assertEqual(SCREAMING_SNAKE_CASE__ , ["a", "c"] )
self.assertEqual(SCREAMING_SNAKE_CASE__ , [0, 2] )
# Out features selected from negative indices
A__ , A__ = get_aligned_output_features_output_indices(SCREAMING_SNAKE_CASE__ , [-3, -1] , SCREAMING_SNAKE_CASE__ )
self.assertEqual(SCREAMING_SNAKE_CASE__ , ["a", "c"] )
self.assertEqual(SCREAMING_SNAKE_CASE__ , [-3, -1] )
def snake_case__ ( self ) -> Dict:
# Stage names must be set
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
verify_out_features_out_indices(["a", "b"] , (0, 1) , SCREAMING_SNAKE_CASE__ )
# Out features must be a list
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
verify_out_features_out_indices(("a", "b") , (0, 1) , ["a", "b"] )
# Out features must be a subset of stage names
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
verify_out_features_out_indices(["a", "b"] , (0, 1) , ["a"] )
# Out indices must be a list or tuple
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
verify_out_features_out_indices(SCREAMING_SNAKE_CASE__ , 0 , ["a", "b"] )
# Out indices must be a subset of stage names
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
verify_out_features_out_indices(SCREAMING_SNAKE_CASE__ , (0, 1) , ["a"] )
# Out features and out indices must be the same length
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
verify_out_features_out_indices(["a", "b"] , (0,) , ["a", "b", "c"] )
# Out features should match out indices
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
verify_out_features_out_indices(["a", "b"] , (0, 2) , ["a", "b", "c"] )
# Out features and out indices should be in order
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
verify_out_features_out_indices(["b", "a"] , (0, 1) , ["a", "b"] )
# Check passes with valid inputs
verify_out_features_out_indices(["a", "b", "d"] , (0, 1, -1) , ["a", "b", "c", "d"] )
def snake_case__ ( self ) -> List[Any]:
A__ = BackboneMixin()
A__ = ["a", "b", "c"]
A__ = ["a", "c"]
A__ = [0, 2]
# Check that the output features and indices are set correctly
self.assertEqual(backbone.out_features , ["a", "c"] )
self.assertEqual(backbone.out_indices , [0, 2] )
# Check out features and indices are updated correctly
A__ = ["a", "b"]
self.assertEqual(backbone.out_features , ["a", "b"] )
self.assertEqual(backbone.out_indices , [0, 1] )
A__ = [-3, -1]
self.assertEqual(backbone.out_features , ["a", "c"] )
self.assertEqual(backbone.out_indices , [-3, -1] )
| 104 | 1 |
'''simple docstring'''
import multiprocessing
from typing import TYPE_CHECKING, Optional, Union
from .. import Dataset, Features, config
from ..formatting import query_table
from ..packaged_modules.sql.sql import Sql
from ..utils import logging
from .abc import AbstractDatasetInputStream
if TYPE_CHECKING:
import sqlitea
import sqlalchemy
class lowerCAmelCase_ ( __magic_name__ ):
def __init__( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = False , **_lowerCAmelCase , ) -> Optional[int]:
super().__init__(features=_lowerCAmelCase , cache_dir=_lowerCAmelCase , keep_in_memory=_lowerCAmelCase , **_lowerCAmelCase )
_lowerCAmelCase = Sql(
cache_dir=_lowerCAmelCase , features=_lowerCAmelCase , sql=_lowerCAmelCase , con=_lowerCAmelCase , **_lowerCAmelCase , )
def _snake_case ( self ) -> List[str]:
_lowerCAmelCase = None
_lowerCAmelCase = None
_lowerCAmelCase = None
_lowerCAmelCase = None
self.builder.download_and_prepare(
download_config=_lowerCAmelCase , download_mode=_lowerCAmelCase , verification_mode=_lowerCAmelCase , base_path=_lowerCAmelCase , )
# Build dataset for splits
_lowerCAmelCase = self.builder.as_dataset(
split="train" , verification_mode=_lowerCAmelCase , in_memory=self.keep_in_memory )
return dataset
class lowerCAmelCase_ :
def __init__( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = None , _lowerCAmelCase = None , **_lowerCAmelCase , ) -> Optional[Any]:
if num_proc is not None and num_proc <= 0:
raise ValueError(f'''num_proc {num_proc} must be an integer > 0.''' )
_lowerCAmelCase = dataset
_lowerCAmelCase = name
_lowerCAmelCase = con
_lowerCAmelCase = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
_lowerCAmelCase = num_proc
_lowerCAmelCase = to_sql_kwargs
def _snake_case ( self ) -> int:
_lowerCAmelCase = self.to_sql_kwargs.pop("sql" , _lowerCAmelCase )
_lowerCAmelCase = self.to_sql_kwargs.pop("con" , _lowerCAmelCase )
_lowerCAmelCase = self.to_sql_kwargs.pop("index" , _lowerCAmelCase )
_lowerCAmelCase = self._write(index=_lowerCAmelCase , **self.to_sql_kwargs )
return written
def _snake_case ( self , _lowerCAmelCase ) -> Any:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = args
_lowerCAmelCase = {**to_sql_kwargs, "if_exists": "append"} if offset > 0 else to_sql_kwargs
_lowerCAmelCase = query_table(
table=self.dataset.data , key=slice(_lowerCAmelCase , offset + self.batch_size ) , indices=self.dataset._indices , )
_lowerCAmelCase = batch.to_pandas()
_lowerCAmelCase = df.to_sql(self.name , self.con , index=_lowerCAmelCase , **_lowerCAmelCase )
return num_rows or len(_lowerCAmelCase )
def _snake_case ( self , _lowerCAmelCase , **_lowerCAmelCase ) -> int:
_lowerCAmelCase = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating SQL from Arrow format" , ):
written += self._batch_sql((offset, index, to_sql_kwargs) )
else:
_lowerCAmelCase , _lowerCAmelCase = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for num_rows in logging.tqdm(
pool.imap(
self._batch_sql , [(offset, index, to_sql_kwargs) for offset in range(0 , _lowerCAmelCase , _lowerCAmelCase )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating SQL from Arrow format" , ):
written += num_rows
return written
| 489 |
'''simple docstring'''
import jax.numpy as jnp
from ...utils import logging
from ..ta.modeling_flax_ta import FlaxTaEncoderModel, FlaxTaForConditionalGeneration, FlaxTaModel
from .configuration_mta import MTaConfig
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = "T5Config"
def __a(SCREAMING_SNAKE_CASE_ : jnp.array , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ):
'''simple docstring'''
_lowerCAmelCase = jnp.zeros_like(SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase = shifted_input_ids.at[:, 1:].set(input_ids[:, :-1] )
_lowerCAmelCase = shifted_input_ids.at[:, 0].set(SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase = jnp.where(shifted_input_ids == -100 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return shifted_input_ids
class lowerCAmelCase_ ( __magic_name__ ):
__lowerCamelCase : List[str] = "mt5"
__lowerCamelCase : Any = MTaConfig
class lowerCAmelCase_ ( __magic_name__ ):
__lowerCamelCase : List[str] = "mt5"
__lowerCamelCase : Dict = MTaConfig
class lowerCAmelCase_ ( __magic_name__ ):
__lowerCamelCase : Optional[Any] = "mt5"
__lowerCamelCase : str = MTaConfig
| 489 | 1 |
"""simple docstring"""
import argparse
import importlib
from pathlib import Path
# Test all the extensions added in the setup
__UpperCAmelCase =[
"""kernels/rwkv/wkv_cuda.cu""",
"""kernels/rwkv/wkv_op.cpp""",
"""kernels/deformable_detr/ms_deform_attn.h""",
"""kernels/deformable_detr/cuda/ms_deform_im2col_cuda.cuh""",
"""models/graphormer/algos_graphormer.pyx""",
]
def __a ( A ) -> Dict:
'''simple docstring'''
for file in FILES_TO_FIND:
if not (transformers_path / file).exists():
return False
return True
if __name__ == "__main__":
__UpperCAmelCase =argparse.ArgumentParser()
parser.add_argument("""--check_lib""", action="""store_true""", help="""Whether to check the build or the actual package.""")
__UpperCAmelCase =parser.parse_args()
if args.check_lib:
__UpperCAmelCase =importlib.import_module("""transformers""")
__UpperCAmelCase =Path(transformers_module.__file__).parent
else:
__UpperCAmelCase =Path.cwd() / """build/lib/transformers"""
if not test_custom_files_are_present(transformers_path):
raise ValueError("""The built release does not contain the custom files. Fix this before going further!""") | 337 |
from __future__ import annotations
import numpy as np
def UpperCamelCase_( _A :list[float] )-> Optional[Any]:
return np.maximum(0 , _A )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 551 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
"EleutherAI/gpt-neox-20b": "https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json",
# See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox
}
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
snake_case_ = '''gpt_neox'''
def __init__( self , lowerCamelCase__=50_432 , lowerCamelCase__=6_144 , lowerCamelCase__=44 , lowerCamelCase__=64 , lowerCamelCase__=24_576 , lowerCamelCase__="gelu" , lowerCamelCase__=0.25 , lowerCamelCase__=10_000 , lowerCamelCase__=0.0 , lowerCamelCase__=0.0 , lowerCamelCase__=0.1 , lowerCamelCase__=2_048 , lowerCamelCase__=0.02 , lowerCamelCase__=1e-5 , lowerCamelCase__=True , lowerCamelCase__=0 , lowerCamelCase__=2 , lowerCamelCase__=False , lowerCamelCase__=True , lowerCamelCase__=None , **lowerCamelCase__ , ) -> List[Any]:
'''simple docstring'''
super().__init__(bos_token_id=lowerCamelCase__ , eos_token_id=lowerCamelCase__ , **lowerCamelCase__ )
__lowerCamelCase = vocab_size
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = hidden_size
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_act
__lowerCamelCase = rotary_pct
__lowerCamelCase = rotary_emb_base
__lowerCamelCase = attention_dropout
__lowerCamelCase = hidden_dropout
__lowerCamelCase = classifier_dropout
__lowerCamelCase = initializer_range
__lowerCamelCase = layer_norm_eps
__lowerCamelCase = use_cache
__lowerCamelCase = tie_word_embeddings
__lowerCamelCase = use_parallel_residual
__lowerCamelCase = rope_scaling
self._rope_scaling_validation()
if self.hidden_size % self.num_attention_heads != 0:
raise ValueError(
'The hidden size is not divisble by the number of attention heads! Make sure to update them!' )
def lowercase_ ( self ) -> Any:
'''simple docstring'''
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , lowerCamelCase__ ) or len(self.rope_scaling ) != 2:
raise ValueError(
'`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '
f"""got {self.rope_scaling}""" )
__lowerCamelCase = self.rope_scaling.get('type' , lowerCamelCase__ )
__lowerCamelCase = self.rope_scaling.get('factor' , lowerCamelCase__ )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f"""`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}""" )
if rope_scaling_factor is None or not isinstance(lowerCamelCase__ , lowerCamelCase__ ) or rope_scaling_factor <= 1.0:
raise ValueError(f"""`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}""" )
| 700 |
def lowerCamelCase_ ( UpperCamelCase__ : int ) -> int:
"""simple docstring"""
assert isinstance(UpperCamelCase__ , UpperCamelCase__ ), F"""The input value of [n={number}] is not an integer"""
if number == 1:
return 2
elif number < 1:
__lowerCamelCase = F"""The input value of [n={number}] has to be > 0"""
raise ValueError(UpperCamelCase__ )
else:
__lowerCamelCase = sylvester(number - 1 )
__lowerCamelCase = num - 1
__lowerCamelCase = num
return lower * upper + 1
if __name__ == "__main__":
print(f'''The 8th number in Sylvester\'s sequence: {sylvester(8)}''')
| 167 | 0 |
import numpy as np
import torch
from torch.nn import CrossEntropyLoss
from transformers import AutoModelForCausalLM, AutoTokenizer
import datasets
from datasets import logging
SCREAMING_SNAKE_CASE_ : Optional[Any] = '''\
'''
SCREAMING_SNAKE_CASE_ : Union[str, Any] = '''
Perplexity (PPL) is one of the most common metrics for evaluating language models.
It is defined as the exponentiated average negative log-likelihood of a sequence.
For more information, see https://huggingface.co/docs/transformers/perplexity
'''
SCREAMING_SNAKE_CASE_ : List[str] = '''
Args:
model_id (str): model used for calculating Perplexity
NOTE: Perplexity can only be calculated for causal language models.
This includes models such as gpt2, causal variations of bert,
causal versions of t5, and more (the full list can be found
in the AutoModelForCausalLM documentation here:
https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )
input_texts (list of str): input text, each separate text snippet
is one list entry.
batch_size (int): the batch size to run texts through the model. Defaults to 16.
add_start_token (bool): whether to add the start token to the texts,
so the perplexity can include the probability of the first word. Defaults to True.
device (str): device to run on, defaults to \'cuda\' when available
Returns:
perplexity: dictionary containing the perplexity scores for the texts
in the input list, as well as the mean perplexity. If one of the input texts is
longer than the max input length of the model, then it is truncated to the
max length for the perplexity computation.
Examples:
Example 1:
>>> perplexity = datasets.load_metric("perplexity")
>>> input_texts = ["lorem ipsum", "Happy Birthday!", "Bienvenue"]
>>> results = perplexity.compute(model_id=\'gpt2\',
... add_start_token=False,
... input_texts=input_texts) # doctest:+ELLIPSIS
>>> print(list(results.keys()))
[\'perplexities\', \'mean_perplexity\']
>>> print(round(results["mean_perplexity"], 2))
78.22
>>> print(round(results["perplexities"][0], 2))
11.11
Example 2:
>>> perplexity = datasets.load_metric("perplexity")
>>> input_texts = datasets.load_dataset("wikitext",
... "wikitext-2-raw-v1",
... split="test")["text"][:50] # doctest:+ELLIPSIS
[...]
>>> input_texts = [s for s in input_texts if s!=\'\']
>>> results = perplexity.compute(model_id=\'gpt2\',
... input_texts=input_texts) # doctest:+ELLIPSIS
>>> print(list(results.keys()))
[\'perplexities\', \'mean_perplexity\']
>>> print(round(results["mean_perplexity"], 2))
60.35
>>> print(round(results["perplexities"][0], 2))
81.12
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class snake_case_ ( datasets.Metric ):
'''simple docstring'''
def UpperCAmelCase ( self : int ) -> int:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'input_texts': datasets.Value('string' ),
} ) , reference_urls=['https://huggingface.co/docs/transformers/perplexity'] , )
def UpperCAmelCase ( self : List[str] , __lowerCamelCase : Any , __lowerCamelCase : Tuple , __lowerCamelCase : int = 16 , __lowerCamelCase : bool = True , __lowerCamelCase : Union[str, Any]=None ) -> List[str]:
'''simple docstring'''
if device is not None:
assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu."
if device == "gpu":
__lowercase = 'cuda'
else:
__lowercase = 'cuda' if torch.cuda.is_available() else 'cpu'
__lowercase = AutoModelForCausalLM.from_pretrained(__lowerCamelCase )
__lowercase = model.to(__lowerCamelCase )
__lowercase = AutoTokenizer.from_pretrained(__lowerCamelCase )
# if batch_size > 1 (which generally leads to padding being required), and
# if there is not an already assigned pad_token, assign an existing
# special token to also be the padding token
if tokenizer.pad_token is None and batch_size > 1:
__lowercase = list(tokenizer.special_tokens_map_extended.values() )
# check that the model already has at least one special token defined
assert (
len(__lowerCamelCase ) > 0
), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1."
# assign one of the special tokens to also be the pad token
tokenizer.add_special_tokens({'pad_token': existing_special_tokens[0]} )
if add_start_token:
# leave room for <BOS> token to be added:
assert (
tokenizer.bos_token is not None
), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False"
__lowercase = model.config.max_length - 1
else:
__lowercase = model.config.max_length
__lowercase = tokenizer(
__lowerCamelCase , add_special_tokens=__lowerCamelCase , padding=__lowerCamelCase , truncation=__lowerCamelCase , max_length=__lowerCamelCase , return_tensors='pt' , return_attention_mask=__lowerCamelCase , ).to(__lowerCamelCase )
__lowercase = encodings['input_ids']
__lowercase = encodings['attention_mask']
# check that each input is long enough:
if add_start_token:
assert torch.all(torch.ge(attn_masks.sum(1 ) , 1 ) ), "Each input text must be at least one token long."
else:
assert torch.all(
torch.ge(attn_masks.sum(1 ) , 2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings."
__lowercase = []
__lowercase = CrossEntropyLoss(reduction='none' )
for start_index in logging.tqdm(range(0 , len(__lowerCamelCase ) , __lowerCamelCase ) ):
__lowercase = min(start_index + batch_size , len(__lowerCamelCase ) )
__lowercase = encoded_texts[start_index:end_index]
__lowercase = attn_masks[start_index:end_index]
if add_start_token:
__lowercase = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(__lowerCamelCase )
__lowercase = torch.cat([bos_tokens_tensor, encoded_batch] , dim=1 )
__lowercase = torch.cat(
[torch.ones(bos_tokens_tensor.size() , dtype=torch.intaa ).to(__lowerCamelCase ), attn_mask] , dim=1 )
__lowercase = encoded_batch
with torch.no_grad():
__lowercase = model(__lowerCamelCase , attention_mask=__lowerCamelCase ).logits
__lowercase = out_logits[..., :-1, :].contiguous()
__lowercase = labels[..., 1:].contiguous()
__lowercase = attn_mask[..., 1:].contiguous()
__lowercase = torch.expa(
(loss_fct(shift_logits.transpose(1 , 2 ) , __lowerCamelCase ) * shift_attention_mask_batch).sum(1 )
/ shift_attention_mask_batch.sum(1 ) )
ppls += perplexity_batch.tolist()
return {"perplexities": ppls, "mean_perplexity": np.mean(__lowerCamelCase )}
| 375 |
from __future__ import annotations
def SCREAMING_SNAKE_CASE ( snake_case , snake_case ) -> list[tuple[int, int]]:
__lowercase , __lowercase = position
__lowercase = [
(y + 1, x + 2),
(y - 1, x + 2),
(y + 1, x - 2),
(y - 1, x - 2),
(y + 2, x + 1),
(y + 2, x - 1),
(y - 2, x + 1),
(y - 2, x - 1),
]
__lowercase = []
for position in positions:
__lowercase , __lowercase = position
if 0 <= y_test < n and 0 <= x_test < n:
permissible_positions.append(snake_case )
return permissible_positions
def SCREAMING_SNAKE_CASE ( snake_case ) -> bool:
return not any(elem == 0 for row in board for elem in row )
def SCREAMING_SNAKE_CASE ( snake_case , snake_case , snake_case ) -> bool:
if is_complete(snake_case ):
return True
for position in get_valid_pos(snake_case , len(snake_case ) ):
__lowercase , __lowercase = position
if board[y][x] == 0:
__lowercase = curr + 1
if open_knight_tour_helper(snake_case , snake_case , curr + 1 ):
return True
__lowercase = 0
return False
def SCREAMING_SNAKE_CASE ( snake_case ) -> list[list[int]]:
__lowercase = [[0 for i in range(snake_case )] for j in range(snake_case )]
for i in range(snake_case ):
for j in range(snake_case ):
__lowercase = 1
if open_knight_tour_helper(snake_case , (i, j) , 1 ):
return board
__lowercase = 0
__lowercase = F"Open Kight Tour cannot be performed on a board of size {n}"
raise ValueError(snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 375 | 1 |
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
__lowerCamelCase = subprocess.check_output('git merge-base main HEAD'.split()).decode('utf-8')
__lowerCamelCase = (
subprocess.check_output(F'''git diff --diff-filter=d --name-only {fork_point_sha}'''.split()).decode('utf-8').split()
)
__lowerCamelCase = '|'.join(sys.argv[1:])
__lowerCamelCase = re.compile(RF'''^({joined_dirs}).*?\.py$''')
__lowerCamelCase = [x for x in modified_files if regex.match(x)]
print(' '.join(relevant_modified_files), end='') | 307 |
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
__lowerCamelCase = logging.get_logger(__name__)
class UpperCamelCase_ ( UpperCamelCase ):
lowercase = ['''input_features''', '''is_longer''']
def __init__( self , lowercase=64 , lowercase=48_000 , lowercase=480 , lowercase=10 , lowercase=1_024 , lowercase=0.0 , lowercase=False , lowercase = 0 , lowercase = 14_000 , lowercase = None , lowercase = "fusion" , lowercase = "repeatpad" , **lowercase , ) -> str:
super().__init__(
feature_size=lowercase , sampling_rate=lowercase , padding_value=lowercase , return_attention_mask=lowercase , **lowercase , )
_a : int = top_db
_a : Union[str, Any] = truncation
_a : Optional[int] = padding
_a : List[Any] = fft_window_size
_a : str = (fft_window_size >> 1) + 1
_a : Optional[Any] = hop_length
_a : Tuple = max_length_s
_a : Optional[Any] = max_length_s * sampling_rate
_a : Dict = sampling_rate
_a : int = frequency_min
_a : Union[str, Any] = frequency_max
_a : List[Any] = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=lowercase , min_frequency=lowercase , max_frequency=lowercase , sampling_rate=lowercase , norm=lowercase , mel_scale='''htk''' , )
_a : int = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=lowercase , min_frequency=lowercase , max_frequency=lowercase , sampling_rate=lowercase , norm='''slaney''' , mel_scale='''slaney''' , )
def snake_case__( self ) -> Dict[str, Any]:
_a : int = copy.deepcopy(self.__dict__ )
_a : str = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def snake_case__( self , lowercase , lowercase = None ) -> np.ndarray:
_a : List[str] = spectrogram(
lowercase , window_function(self.fft_window_size , '''hann''' ) , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=lowercase , log_mel='''dB''' , )
return log_mel_spectrogram.T
def snake_case__( self , lowercase , lowercase , lowercase ) -> Union[str, Any]:
_a : Tuple = np.array_split(list(range(0 , total_frames - chunk_frames + 1 ) ) , 3 )
if len(ranges[1] ) == 0:
# if the audio is too short, we just use the first chunk
_a : Optional[int] = [0]
if len(ranges[2] ) == 0:
# if the audio is too short, we just use the first chunk
_a : str = [0]
# randomly choose index for each part
_a : str = np.random.choice(ranges[0] )
_a : Optional[int] = np.random.choice(ranges[1] )
_a : Tuple = np.random.choice(ranges[2] )
_a : Optional[int] = mel[idx_front : idx_front + chunk_frames, :]
_a : List[Any] = mel[idx_middle : idx_middle + chunk_frames, :]
_a : Tuple = mel[idx_back : idx_back + chunk_frames, :]
_a : List[str] = torch.tensor(mel[None, None, :] )
_a : str = torch.nn.functional.interpolate(
lowercase , size=[chunk_frames, 64] , mode='''bilinear''' , align_corners=lowercase )
_a : Dict = mel_shrink[0][0].numpy()
_a : Any = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0 )
return mel_fusion
def snake_case__( self , lowercase , lowercase , lowercase , lowercase ) -> np.array:
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
_a : List[Any] = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
_a : List[Any] = len(lowercase ) - max_length
_a : Union[str, Any] = np.random.randint(0 , overflow + 1 )
_a : Optional[int] = waveform[idx : idx + max_length]
_a : Optional[int] = self._np_extract_fbank_features(lowercase , self.mel_filters_slaney )[None, :]
elif truncation == "fusion":
_a : str = self._np_extract_fbank_features(lowercase , self.mel_filters )
_a : Any = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
_a : str = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
_a : Dict = np.stack([mel, mel, mel, mel] , axis=0 )
_a : Tuple = False
else:
_a : List[str] = self._random_mel_fusion(lowercase , lowercase , lowercase )
_a : Tuple = True
else:
raise NotImplementedError(F'data_truncating {truncation} not implemented' )
else:
_a : Optional[Any] = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
_a : Tuple = int(max_length / len(lowercase ) )
_a : str = np.stack(np.tile(lowercase , n_repeat + 1 ) )[:max_length]
if padding == "repeatpad":
_a : int = int(max_length / len(lowercase ) )
_a : Optional[Any] = np.stack(np.tile(lowercase , lowercase ) )
_a : str = np.pad(lowercase , (0, max_length - waveform.shape[0]) , mode='''constant''' , constant_values=0 )
if truncation == "fusion":
_a : List[Any] = self._np_extract_fbank_features(lowercase , self.mel_filters )
_a : int = np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0 )
else:
_a : Optional[int] = self._np_extract_fbank_features(lowercase , self.mel_filters_slaney )[None, :]
return input_mel, longer
def __call__( self , lowercase , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , **lowercase , ) -> BatchFeature:
_a : str = truncation if truncation is not None else self.truncation
_a : Any = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a'
F' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input'
F' was sampled with {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
_a : List[str] = isinstance(lowercase , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F'Only mono-channel audio is supported for input to {self}' )
_a : Dict = is_batched_numpy or (
isinstance(lowercase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
_a : List[str] = [np.asarray(lowercase , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(lowercase , np.ndarray ):
_a : Any = np.asarray(lowercase , dtype=np.floataa )
elif isinstance(lowercase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
_a : List[Any] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
_a : int = [np.asarray(lowercase )]
# convert to mel spectrogram, truncate and pad if needed.
_a : Union[str, Any] = [
self._get_input_mel(lowercase , max_length if max_length else self.nb_max_samples , lowercase , lowercase )
for waveform in raw_speech
]
_a : Tuple = []
_a : int = []
for mel, longer in padded_inputs:
input_mel.append(lowercase )
is_longer.append(lowercase )
if truncation == "fusion" and sum(lowercase ) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
_a : str = np.random.randint(0 , len(lowercase ) )
_a : Optional[int] = True
if isinstance(input_mel[0] , lowercase ):
_a : Union[str, Any] = [np.asarray(lowercase , dtype=np.floataa ) for feature in input_mel]
# is_longer is a list of bool
_a : Dict = [[longer] for longer in is_longer]
_a : int = {'''input_features''': input_mel, '''is_longer''': is_longer}
_a : str = BatchFeature(lowercase )
if return_tensors is not None:
_a : Dict = input_features.convert_to_tensors(lowercase )
return input_features | 307 | 1 |
"""simple docstring"""
import requests
def lowerCAmelCase_ (_SCREAMING_SNAKE_CASE :Union[str, Any] , _SCREAMING_SNAKE_CASE :Dict ) -> Any:
a_ : int = {'''Content-Type''': '''application/json'''}
a_ : Tuple = requests.post(__UpperCAmelCase , json={"text": message_body} , headers=__UpperCAmelCase )
if response.status_code != 200:
a_ : int = (
'''Request to slack returned an error '''
F'''{response.status_code}, the response is:\n{response.text}'''
)
raise ValueError(__UpperCAmelCase )
if __name__ == "__main__":
# Set the slack url to the one provided by Slack when you create the webhook at
# https://my.slack.com/services/new/incoming-webhook/
send_slack_message('<YOUR MESSAGE BODY>', '<SLACK CHANNEL URL>')
| 473 |
'''simple docstring'''
import argparse
import glob
import logging
import os
from argparse import Namespace
from importlib import import_module
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch.nn import CrossEntropyLoss
from torch.utils.data import DataLoader, TensorDataset
from utils_ner import TokenClassificationTask
__lowerCamelCase : Dict = logging.getLogger(__name__)
class lowerCAmelCase__ ( _lowerCAmelCase ):
A = "token-classification"
def __init__( self : Tuple , UpperCamelCase_ : Tuple ) -> Dict:
"""simple docstring"""
if type(UpperCamelCase_ ) == dict:
lowerCamelCase_ : Tuple = Namespace(**UpperCamelCase_ )
lowerCamelCase_ : Any = import_module('''tasks''' )
try:
lowerCamelCase_ : Any = getattr(UpperCamelCase_ , hparams.task_type )
lowerCamelCase_ : TokenClassificationTask = token_classification_task_clazz()
except AttributeError:
raise ValueError(
F"""Task {hparams.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. """
F"""Available tasks classes are: {TokenClassificationTask.__subclasses__()}""" )
lowerCamelCase_ : int = self.token_classification_task.get_labels(hparams.labels )
lowerCamelCase_ : Optional[Any] = CrossEntropyLoss().ignore_index
super().__init__(UpperCamelCase_ , len(self.labels ) , self.mode )
def __UpperCamelCase ( self : Dict , **UpperCamelCase_ : List[str] ) -> Dict:
"""simple docstring"""
return self.model(**UpperCamelCase_ )
def __UpperCamelCase ( self : Any , UpperCamelCase_ : Dict , UpperCamelCase_ : Optional[Any] ) -> Any:
"""simple docstring"""
lowerCamelCase_ : Dict = {'''input_ids''': batch[0], '''attention_mask''': batch[1], '''labels''': batch[3]}
if self.config.model_type != "distilbert":
lowerCamelCase_ : str = (
batch[2] if self.config.model_type in ['''bert''', '''xlnet'''] else None
) # XLM and RoBERTa don"t use token_type_ids
lowerCamelCase_ : List[str] = self(**UpperCamelCase_ )
lowerCamelCase_ : int = outputs[0]
# tensorboard_logs = {"loss": loss, "rate": self.lr_scheduler.get_last_lr()[-1]}
return {"loss": loss}
def __UpperCamelCase ( self : int ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ : str = self.hparams
for mode in ["train", "dev", "test"]:
lowerCamelCase_ : List[Any] = self._feature_file(UpperCamelCase_ )
if os.path.exists(UpperCamelCase_ ) and not args.overwrite_cache:
logger.info('''Loading features from cached file %s''' , UpperCamelCase_ )
lowerCamelCase_ : Dict = torch.load(UpperCamelCase_ )
else:
logger.info('''Creating features from dataset file at %s''' , args.data_dir )
lowerCamelCase_ : Optional[int] = self.token_classification_task.read_examples_from_file(args.data_dir , UpperCamelCase_ )
lowerCamelCase_ : int = self.token_classification_task.convert_examples_to_features(
UpperCamelCase_ , self.labels , args.max_seq_length , self.tokenizer , cls_token_at_end=bool(self.config.model_type in ['''xlnet'''] ) , cls_token=self.tokenizer.cls_token , cls_token_segment_id=2 if self.config.model_type in ['''xlnet'''] else 0 , sep_token=self.tokenizer.sep_token , sep_token_extra=UpperCamelCase_ , pad_on_left=bool(self.config.model_type in ['''xlnet'''] ) , pad_token=self.tokenizer.pad_token_id , pad_token_segment_id=self.tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info('''Saving features into cached file %s''' , UpperCamelCase_ )
torch.save(UpperCamelCase_ , UpperCamelCase_ )
def __UpperCamelCase ( self : str , UpperCamelCase_ : int , UpperCamelCase_ : int , UpperCamelCase_ : bool = False ) -> DataLoader:
"""simple docstring"""
lowerCamelCase_ : Dict = self._feature_file(UpperCamelCase_ )
logger.info('''Loading features from cached file %s''' , UpperCamelCase_ )
lowerCamelCase_ : Optional[Any] = torch.load(UpperCamelCase_ )
lowerCamelCase_ : List[Any] = torch.tensor([f.input_ids for f in features] , dtype=torch.long )
lowerCamelCase_ : Union[str, Any] = torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
if features[0].token_type_ids is not None:
lowerCamelCase_ : Optional[int] = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
else:
lowerCamelCase_ : Optional[int] = torch.tensor([0 for f in features] , dtype=torch.long )
# HACK(we will not use this anymore soon)
lowerCamelCase_ : Optional[Any] = torch.tensor([f.label_ids for f in features] , dtype=torch.long )
return DataLoader(
TensorDataset(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) , batch_size=UpperCamelCase_ )
def __UpperCamelCase ( self : Optional[int] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : str ) -> Any:
"""simple docstring"""
"""Compute validation""" ""
lowerCamelCase_ : Optional[Any] = {'''input_ids''': batch[0], '''attention_mask''': batch[1], '''labels''': batch[3]}
if self.config.model_type != "distilbert":
lowerCamelCase_ : Any = (
batch[2] if self.config.model_type in ['''bert''', '''xlnet'''] else None
) # XLM and RoBERTa don"t use token_type_ids
lowerCamelCase_ : List[Any] = self(**UpperCamelCase_ )
lowerCamelCase_ , lowerCamelCase_ : List[Any] = outputs[:2]
lowerCamelCase_ : List[Any] = logits.detach().cpu().numpy()
lowerCamelCase_ : str = inputs['''labels'''].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def __UpperCamelCase ( self : Tuple , UpperCamelCase_ : str ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ : Tuple = torch.stack([x['''val_loss'''] for x in outputs] ).mean()
lowerCamelCase_ : Dict = np.concatenate([x['''pred'''] for x in outputs] , axis=0 )
lowerCamelCase_ : Optional[Any] = np.argmax(UpperCamelCase_ , axis=2 )
lowerCamelCase_ : List[str] = np.concatenate([x['''target'''] for x in outputs] , axis=0 )
lowerCamelCase_ : List[str] = dict(enumerate(self.labels ) )
lowerCamelCase_ : Optional[Any] = [[] for _ in range(out_label_ids.shape[0] )]
lowerCamelCase_ : str = [[] for _ in range(out_label_ids.shape[0] )]
for i in range(out_label_ids.shape[0] ):
for j in range(out_label_ids.shape[1] ):
if out_label_ids[i, j] != self.pad_token_label_id:
out_label_list[i].append(label_map[out_label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
lowerCamelCase_ : Optional[Any] = {
'''val_loss''': val_loss_mean,
'''accuracy_score''': accuracy_score(UpperCamelCase_ , UpperCamelCase_ ),
'''precision''': precision_score(UpperCamelCase_ , UpperCamelCase_ ),
'''recall''': recall_score(UpperCamelCase_ , UpperCamelCase_ ),
'''f1''': fa_score(UpperCamelCase_ , UpperCamelCase_ ),
}
lowerCamelCase_ : Union[str, Any] = dict(results.items() )
lowerCamelCase_ : List[str] = results
return ret, preds_list, out_label_list
def __UpperCamelCase ( self : Optional[Any] , UpperCamelCase_ : Tuple ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ : Any = self._eval_end(UpperCamelCase_ )
lowerCamelCase_ : Tuple = ret['''log''']
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def __UpperCamelCase ( self : Dict , UpperCamelCase_ : Tuple ) -> Dict:
"""simple docstring"""
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ : Optional[Any] = self._eval_end(UpperCamelCase_ )
# Converting to the dict required by pl
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master/\
# pytorch_lightning/trainer/logging.py#L139
lowerCamelCase_ : Optional[int] = ret['''log''']
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def __UpperCamelCase ( UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : List[Any] ) -> Optional[int]:
"""simple docstring"""
BaseTransformer.add_model_specific_args(UpperCamelCase_ , UpperCamelCase_ )
parser.add_argument(
'''--task_type''' , default='''NER''' , type=UpperCamelCase_ , help='''Task type to fine tune in training (e.g. NER, POS, etc)''' )
parser.add_argument(
'''--max_seq_length''' , default=128 , type=UpperCamelCase_ , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument(
'''--labels''' , default='''''' , type=UpperCamelCase_ , help='''Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.''' , )
parser.add_argument(
'''--gpus''' , default=0 , type=UpperCamelCase_ , help='''The number of GPUs allocated for this, it is by default 0 meaning none''' , )
parser.add_argument(
'''--overwrite_cache''' , action='''store_true''' , help='''Overwrite the cached training and evaluation sets''' )
return parser
if __name__ == "__main__":
__lowerCamelCase : Tuple = argparse.ArgumentParser()
add_generic_args(parser, os.getcwd())
__lowerCamelCase : Optional[int] = NERTransformer.add_model_specific_args(parser, os.getcwd())
__lowerCamelCase : Any = parser.parse_args()
__lowerCamelCase : List[Any] = NERTransformer(args)
__lowerCamelCase : Any = generic_train(model, args)
if args.do_predict:
# See https://github.com/huggingface/transformers/issues/3159
# pl use this default format to create a checkpoint:
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master\
# /pytorch_lightning/callbacks/model_checkpoint.py#L322
__lowerCamelCase : Any = sorted(glob.glob(os.path.join(args.output_dir, """checkpoint-epoch=*.ckpt"""), recursive=True))
__lowerCamelCase : List[Any] = model.load_from_checkpoint(checkpoints[-1])
trainer.test(model)
| 501 | 0 |
"""simple docstring"""
import re
import string
from collections import Counter
import sacrebleu
import sacremoses
from packaging import version
import datasets
UpperCAmelCase : Dict = "\n@inproceedings{xu-etal-2016-optimizing,\n title = {Optimizing Statistical Machine Translation for Text Simplification},\n authors={Xu, Wei and Napoles, Courtney and Pavlick, Ellie and Chen, Quanze and Callison-Burch, Chris},\n journal = {Transactions of the Association for Computational Linguistics},\n volume = {4},\n year={2016},\n url = {https://www.aclweb.org/anthology/Q16-1029},\n pages = {401--415\n},\n@inproceedings{post-2018-call,\n title = \"A Call for Clarity in Reporting {BLEU} Scores\",\n author = \"Post, Matt\",\n booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",\n month = oct,\n year = \"2018\",\n address = \"Belgium, Brussels\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/W18-6319\",\n pages = \"186--191\",\n}\n"
UpperCAmelCase : Optional[int] = "\\nWIKI_SPLIT is the combination of three metrics SARI, EXACT and SACREBLEU\nIt can be used to evaluate the quality of machine-generated texts.\n"
UpperCAmelCase : Tuple = "\nCalculates sari score (between 0 and 100) given a list of source and predicted\nsentences, and a list of lists of reference sentences. It also computes the BLEU score as well as the exact match score.\nArgs:\n sources: list of source sentences where each sentence should be a string.\n predictions: list of predicted sentences where each sentence should be a string.\n references: list of lists of reference sentences where each sentence should be a string.\nReturns:\n sari: sari score\n sacrebleu: sacrebleu score\n exact: exact score\n\nExamples:\n >>> sources=[\"About 95 species are currently accepted .\"]\n >>> predictions=[\"About 95 you now get in .\"]\n >>> references=[[\"About 95 species are currently known .\"]]\n >>> wiki_split = datasets.load_metric(\"wiki_split\")\n >>> results = wiki_split.compute(sources=sources, predictions=predictions, references=references)\n >>> print(results)\n {'sari': 21.805555555555557, 'sacrebleu': 14.535768424205482, 'exact': 0.0}\n"
def UpperCamelCase_ (__lowerCAmelCase ) -> Any:
'''simple docstring'''
def remove_articles(__lowerCAmelCase ):
lowercase_ = re.compile(R"""\b(a|an|the)\b""" , re.UNICODE )
return re.sub(__lowerCAmelCase , """ """ , __lowerCAmelCase )
def white_space_fix(__lowerCAmelCase ):
return " ".join(text.split() )
def remove_punc(__lowerCAmelCase ):
lowercase_ = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(__lowerCAmelCase ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(__lowerCAmelCase ) ) ) )
def UpperCamelCase_ (__lowerCAmelCase , __lowerCAmelCase ) -> int:
'''simple docstring'''
return int(normalize_answer(__lowerCAmelCase ) == normalize_answer(__lowerCAmelCase ) )
def UpperCamelCase_ (__lowerCAmelCase , __lowerCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ = [any(compute_exact(__lowerCAmelCase , __lowerCAmelCase ) for ref in refs ) for pred, refs in zip(__lowerCAmelCase , __lowerCAmelCase )]
return (sum(__lowerCAmelCase ) / len(__lowerCAmelCase )) * 1_00
def UpperCamelCase_ (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> List[Any]:
'''simple docstring'''
lowercase_ = [rgram for rgrams in rgramslist for rgram in rgrams]
lowercase_ = Counter(__lowerCAmelCase )
lowercase_ = Counter(__lowerCAmelCase )
lowercase_ = Counter()
for sgram, scount in sgramcounter.items():
lowercase_ = scount * numref
lowercase_ = Counter(__lowerCAmelCase )
lowercase_ = Counter()
for cgram, ccount in cgramcounter.items():
lowercase_ = ccount * numref
# KEEP
lowercase_ = sgramcounter_rep & cgramcounter_rep
lowercase_ = keepgramcounter_rep & rgramcounter
lowercase_ = sgramcounter_rep & rgramcounter
lowercase_ = 0
lowercase_ = 0
for keepgram in keepgramcountergood_rep:
keeptmpscorea += keepgramcountergood_rep[keepgram] / keepgramcounter_rep[keepgram]
# Fix an alleged bug [2] in the keep score computation.
# keeptmpscore2 += keepgramcountergood_rep[keepgram] / keepgramcounterall_rep[keepgram]
keeptmpscorea += keepgramcountergood_rep[keepgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
lowercase_ = 1
lowercase_ = 1
if len(__lowerCAmelCase ) > 0:
lowercase_ = keeptmpscorea / len(__lowerCAmelCase )
if len(__lowerCAmelCase ) > 0:
# Fix an alleged bug [2] in the keep score computation.
# keepscore_recall = keeptmpscore2 / len(keepgramcounterall_rep)
lowercase_ = keeptmpscorea / sum(keepgramcounterall_rep.values() )
lowercase_ = 0
if keepscore_precision > 0 or keepscore_recall > 0:
lowercase_ = 2 * keepscore_precision * keepscore_recall / (keepscore_precision + keepscore_recall)
# DELETION
lowercase_ = sgramcounter_rep - cgramcounter_rep
lowercase_ = delgramcounter_rep - rgramcounter
lowercase_ = sgramcounter_rep - rgramcounter
lowercase_ = 0
lowercase_ = 0
for delgram in delgramcountergood_rep:
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounter_rep[delgram]
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounterall_rep[delgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
lowercase_ = 1
if len(__lowerCAmelCase ) > 0:
lowercase_ = deltmpscorea / len(__lowerCAmelCase )
# ADDITION
lowercase_ = set(__lowerCAmelCase ) - set(__lowerCAmelCase )
lowercase_ = set(__lowerCAmelCase ) & set(__lowerCAmelCase )
lowercase_ = set(__lowerCAmelCase ) - set(__lowerCAmelCase )
lowercase_ = 0
for addgram in addgramcountergood:
addtmpscore += 1
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
lowercase_ = 1
lowercase_ = 1
if len(__lowerCAmelCase ) > 0:
lowercase_ = addtmpscore / len(__lowerCAmelCase )
if len(__lowerCAmelCase ) > 0:
lowercase_ = addtmpscore / len(__lowerCAmelCase )
lowercase_ = 0
if addscore_precision > 0 or addscore_recall > 0:
lowercase_ = 2 * addscore_precision * addscore_recall / (addscore_precision + addscore_recall)
return (keepscore, delscore_precision, addscore)
def UpperCamelCase_ (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> List[str]:
'''simple docstring'''
lowercase_ = len(__lowerCAmelCase )
lowercase_ = ssent.split(""" """ )
lowercase_ = csent.split(""" """ )
lowercase_ = []
lowercase_ = []
lowercase_ = []
lowercase_ = []
lowercase_ = []
lowercase_ = []
lowercase_ = []
lowercase_ = []
lowercase_ = []
lowercase_ = []
for rsent in rsents:
lowercase_ = rsent.split(""" """ )
lowercase_ = []
lowercase_ = []
lowercase_ = []
ragramslist.append(__lowerCAmelCase )
for i in range(0 , len(__lowerCAmelCase ) - 1 ):
if i < len(__lowerCAmelCase ) - 1:
lowercase_ = ragrams[i] + """ """ + ragrams[i + 1]
ragrams.append(__lowerCAmelCase )
if i < len(__lowerCAmelCase ) - 2:
lowercase_ = ragrams[i] + """ """ + ragrams[i + 1] + """ """ + ragrams[i + 2]
ragrams.append(__lowerCAmelCase )
if i < len(__lowerCAmelCase ) - 3:
lowercase_ = ragrams[i] + """ """ + ragrams[i + 1] + """ """ + ragrams[i + 2] + """ """ + ragrams[i + 3]
ragrams.append(__lowerCAmelCase )
ragramslist.append(__lowerCAmelCase )
ragramslist.append(__lowerCAmelCase )
ragramslist.append(__lowerCAmelCase )
for i in range(0 , len(__lowerCAmelCase ) - 1 ):
if i < len(__lowerCAmelCase ) - 1:
lowercase_ = sagrams[i] + """ """ + sagrams[i + 1]
sagrams.append(__lowerCAmelCase )
if i < len(__lowerCAmelCase ) - 2:
lowercase_ = sagrams[i] + """ """ + sagrams[i + 1] + """ """ + sagrams[i + 2]
sagrams.append(__lowerCAmelCase )
if i < len(__lowerCAmelCase ) - 3:
lowercase_ = sagrams[i] + """ """ + sagrams[i + 1] + """ """ + sagrams[i + 2] + """ """ + sagrams[i + 3]
sagrams.append(__lowerCAmelCase )
for i in range(0 , len(__lowerCAmelCase ) - 1 ):
if i < len(__lowerCAmelCase ) - 1:
lowercase_ = cagrams[i] + """ """ + cagrams[i + 1]
cagrams.append(__lowerCAmelCase )
if i < len(__lowerCAmelCase ) - 2:
lowercase_ = cagrams[i] + """ """ + cagrams[i + 1] + """ """ + cagrams[i + 2]
cagrams.append(__lowerCAmelCase )
if i < len(__lowerCAmelCase ) - 3:
lowercase_ = cagrams[i] + """ """ + cagrams[i + 1] + """ """ + cagrams[i + 2] + """ """ + cagrams[i + 3]
cagrams.append(__lowerCAmelCase )
((lowercase_) , (lowercase_) , (lowercase_)) = SARIngram(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
((lowercase_) , (lowercase_) , (lowercase_)) = SARIngram(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
((lowercase_) , (lowercase_) , (lowercase_)) = SARIngram(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
((lowercase_) , (lowercase_) , (lowercase_)) = SARIngram(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
lowercase_ = sum([keepascore, keepascore, keepascore, keepascore] ) / 4
lowercase_ = sum([delascore, delascore, delascore, delascore] ) / 4
lowercase_ = sum([addascore, addascore, addascore, addascore] ) / 4
lowercase_ = (avgkeepscore + avgdelscore + avgaddscore) / 3
return finalscore
def UpperCamelCase_ (__lowerCAmelCase , __lowerCAmelCase = True , __lowerCAmelCase = "13a" , __lowerCAmelCase = True ) -> str:
'''simple docstring'''
if lowercase:
lowercase_ = sentence.lower()
if tokenizer in ["13a", "intl"]:
if version.parse(sacrebleu.__version__ ).major >= 2:
lowercase_ = sacrebleu.metrics.bleu._get_tokenizer(__lowerCAmelCase )()(__lowerCAmelCase )
else:
lowercase_ = sacrebleu.TOKENIZERS[tokenizer]()(__lowerCAmelCase )
elif tokenizer == "moses":
lowercase_ = sacremoses.MosesTokenizer().tokenize(__lowerCAmelCase , return_str=__lowerCAmelCase , escape=__lowerCAmelCase )
elif tokenizer == "penn":
lowercase_ = sacremoses.MosesTokenizer().penn_tokenize(__lowerCAmelCase , return_str=__lowerCAmelCase )
else:
lowercase_ = sentence
if not return_str:
lowercase_ = normalized_sent.split()
return normalized_sent
def UpperCamelCase_ (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Any:
'''simple docstring'''
if not (len(__lowerCAmelCase ) == len(__lowerCAmelCase ) == len(__lowerCAmelCase )):
raise ValueError("""Sources length must match predictions and references lengths.""" )
lowercase_ = 0
for src, pred, refs in zip(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
sari_score += SARIsent(normalize(__lowerCAmelCase ) , normalize(__lowerCAmelCase ) , [normalize(__lowerCAmelCase ) for sent in refs] )
lowercase_ = sari_score / len(__lowerCAmelCase )
return 1_00 * sari_score
def UpperCamelCase_ (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase="exp" , __lowerCAmelCase=None , __lowerCAmelCase=False , __lowerCAmelCase=False , __lowerCAmelCase=False , ) -> int:
'''simple docstring'''
lowercase_ = len(references[0] )
if any(len(__lowerCAmelCase ) != references_per_prediction for refs in references ):
raise ValueError("""Sacrebleu requires the same number of references for each prediction""" )
lowercase_ = [[refs[i] for refs in references] for i in range(__lowerCAmelCase )]
lowercase_ = sacrebleu.corpus_bleu(
__lowerCAmelCase , __lowerCAmelCase , smooth_method=__lowerCAmelCase , smooth_value=__lowerCAmelCase , force=__lowerCAmelCase , lowercase=__lowerCAmelCase , use_effective_order=__lowerCAmelCase , )
return output.score
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE__ ( datasets.Metric ):
def _UpperCAmelCase ( self : Optional[Any]):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence"""),
"""references""": datasets.Sequence(datasets.Value("""string""" , id="""sequence""") , id="""references"""),
}) , codebase_urls=[
"""https://github.com/huggingface/transformers/blob/master/src/transformers/data/metrics/squad_metrics.py""",
"""https://github.com/cocoxu/simplification/blob/master/SARI.py""",
"""https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/sari_hook.py""",
"""https://github.com/mjpost/sacreBLEU""",
] , reference_urls=[
"""https://www.aclweb.org/anthology/Q16-1029.pdf""",
"""https://github.com/mjpost/sacreBLEU""",
"""https://en.wikipedia.org/wiki/BLEU""",
"""https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213""",
] , )
def _UpperCAmelCase ( self : Dict , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Union[str, Any]):
"""simple docstring"""
lowercase_ = {}
result.update({"""sari""": compute_sari(sources=lowerCAmelCase_ , predictions=lowerCAmelCase_ , references=lowerCAmelCase_)})
result.update({"""sacrebleu""": compute_sacrebleu(predictions=lowerCAmelCase_ , references=lowerCAmelCase_)})
result.update({"""exact""": compute_em(predictions=lowerCAmelCase_ , references=lowerCAmelCase_)})
return result
| 700 |
"""simple docstring"""
import unittest
from transformers import DebertaVaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaVaForMaskedLM,
DebertaVaForMultipleChoice,
DebertaVaForQuestionAnswering,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaModel,
)
from transformers.models.deberta_va.modeling_deberta_va import DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ):
def __init__( self : Union[str, Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[int]=1_3 , lowerCAmelCase_ : Union[str, Any]=7 , lowerCAmelCase_ : int=True , lowerCAmelCase_ : Optional[int]=True , lowerCAmelCase_ : List[Any]=True , lowerCAmelCase_ : Tuple=True , lowerCAmelCase_ : Dict=9_9 , lowerCAmelCase_ : List[str]=3_2 , lowerCAmelCase_ : Tuple=5 , lowerCAmelCase_ : str=4 , lowerCAmelCase_ : List[str]=3_7 , lowerCAmelCase_ : List[str]="gelu" , lowerCAmelCase_ : Optional[Any]=0.1 , lowerCAmelCase_ : Union[str, Any]=0.1 , lowerCAmelCase_ : List[str]=5_1_2 , lowerCAmelCase_ : Union[str, Any]=1_6 , lowerCAmelCase_ : List[Any]=2 , lowerCAmelCase_ : Dict=0.02 , lowerCAmelCase_ : int=False , lowerCAmelCase_ : Any=True , lowerCAmelCase_ : Tuple="None" , lowerCAmelCase_ : int=3 , lowerCAmelCase_ : Union[str, Any]=4 , lowerCAmelCase_ : Dict=None , ):
"""simple docstring"""
lowercase_ = parent
lowercase_ = batch_size
lowercase_ = seq_length
lowercase_ = is_training
lowercase_ = use_input_mask
lowercase_ = use_token_type_ids
lowercase_ = use_labels
lowercase_ = vocab_size
lowercase_ = hidden_size
lowercase_ = num_hidden_layers
lowercase_ = num_attention_heads
lowercase_ = intermediate_size
lowercase_ = hidden_act
lowercase_ = hidden_dropout_prob
lowercase_ = attention_probs_dropout_prob
lowercase_ = max_position_embeddings
lowercase_ = type_vocab_size
lowercase_ = type_sequence_label_size
lowercase_ = initializer_range
lowercase_ = num_labels
lowercase_ = num_choices
lowercase_ = relative_attention
lowercase_ = position_biased_input
lowercase_ = pos_att_type
lowercase_ = scope
def _UpperCAmelCase ( self : Any):
"""simple docstring"""
lowercase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
lowercase_ = None
if self.use_input_mask:
lowercase_ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2)
lowercase_ = None
if self.use_token_type_ids:
lowercase_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
lowercase_ = None
lowercase_ = None
lowercase_ = None
if self.use_labels:
lowercase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size)
lowercase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
lowercase_ = ids_tensor([self.batch_size] , self.num_choices)
lowercase_ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _UpperCAmelCase ( self : int):
"""simple docstring"""
return DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def _UpperCAmelCase ( self : Tuple , lowerCAmelCase_ : Dict):
"""simple docstring"""
self.parent.assertListEqual(list(result.loss.size()) , [])
def _UpperCAmelCase ( self : Any , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : List[str]):
"""simple docstring"""
lowercase_ = DebertaVaModel(config=lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.eval()
lowercase_ = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_)[0]
lowercase_ = model(lowerCAmelCase_ , token_type_ids=lowerCAmelCase_)[0]
lowercase_ = model(lowerCAmelCase_)[0]
self.parent.assertListEqual(list(sequence_output.size()) , [self.batch_size, self.seq_length, self.hidden_size])
def _UpperCAmelCase ( self : Optional[Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : str , lowerCAmelCase_ : int , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Union[str, Any]):
"""simple docstring"""
lowercase_ = DebertaVaForMaskedLM(config=lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.eval()
lowercase_ = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def _UpperCAmelCase ( self : Any , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Tuple):
"""simple docstring"""
lowercase_ = self.num_labels
lowercase_ = DebertaVaForSequenceClassification(lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.eval()
lowercase_ = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_)
self.parent.assertListEqual(list(result.logits.size()) , [self.batch_size, self.num_labels])
self.check_loss_output(lowerCAmelCase_)
def _UpperCAmelCase ( self : List[str] , lowerCAmelCase_ : Any , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Any):
"""simple docstring"""
lowercase_ = self.num_labels
lowercase_ = DebertaVaForTokenClassification(config=lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.eval()
lowercase_ = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def _UpperCAmelCase ( self : str , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : str , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Dict):
"""simple docstring"""
lowercase_ = DebertaVaForQuestionAnswering(config=lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.eval()
lowercase_ = model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , start_positions=lowerCAmelCase_ , end_positions=lowerCAmelCase_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def _UpperCAmelCase ( self : Tuple , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : str , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Optional[Any]):
"""simple docstring"""
lowercase_ = DebertaVaForMultipleChoice(config=lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.eval()
lowercase_ = input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
lowercase_ = token_type_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
lowercase_ = input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
lowercase_ = model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def _UpperCAmelCase ( self : int):
"""simple docstring"""
lowercase_ = self.prepare_config_and_inputs()
(
(
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) ,
) = config_and_inputs
lowercase_ = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
lowercase__ = (
(
DebertaVaModel,
DebertaVaForMaskedLM,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaForQuestionAnswering,
DebertaVaForMultipleChoice,
)
if is_torch_available()
else ()
)
lowercase__ = (
{
"feature-extraction": DebertaVaModel,
"fill-mask": DebertaVaForMaskedLM,
"question-answering": DebertaVaForQuestionAnswering,
"text-classification": DebertaVaForSequenceClassification,
"token-classification": DebertaVaForTokenClassification,
"zero-shot": DebertaVaForSequenceClassification,
}
if is_torch_available()
else {}
)
lowercase__ = True
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
def _UpperCAmelCase ( self : Optional[Any]):
"""simple docstring"""
lowercase_ = DebertaVaModelTester(self)
lowercase_ = ConfigTester(self , config_class=lowerCAmelCase_ , hidden_size=3_7)
def _UpperCAmelCase ( self : Union[str, Any]):
"""simple docstring"""
self.config_tester.run_common_tests()
def _UpperCAmelCase ( self : int):
"""simple docstring"""
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*lowerCAmelCase_)
def _UpperCAmelCase ( self : Union[str, Any]):
"""simple docstring"""
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*lowerCAmelCase_)
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*lowerCAmelCase_)
def _UpperCAmelCase ( self : List[Any]):
"""simple docstring"""
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*lowerCAmelCase_)
def _UpperCAmelCase ( self : str):
"""simple docstring"""
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*lowerCAmelCase_)
def _UpperCAmelCase ( self : Union[str, Any]):
"""simple docstring"""
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_multiple_choice(*lowerCAmelCase_)
@slow
def _UpperCAmelCase ( self : Tuple):
"""simple docstring"""
for model_name in DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ = DebertaVaModel.from_pretrained(lowerCAmelCase_)
self.assertIsNotNone(lowerCAmelCase_)
@require_torch
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@unittest.skip(reason="""Model not available yet""")
def _UpperCAmelCase ( self : Optional[int]):
"""simple docstring"""
pass
@slow
def _UpperCAmelCase ( self : int):
"""simple docstring"""
lowercase_ = DebertaVaModel.from_pretrained("""microsoft/deberta-v2-xlarge""")
lowercase_ = torch.tensor([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]])
lowercase_ = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]])
with torch.no_grad():
lowercase_ = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_)[0]
# compare the actual values for a slice.
lowercase_ = torch.tensor(
[[[0.2_356, 0.1_948, 0.0_369], [-0.1_063, 0.3_586, -0.5_152], [-0.6_399, -0.0_259, -0.2_525]]])
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , lowerCAmelCase_ , atol=1E-4) , F'''{output[:, 1:4, 1:4]}''')
| 100 | 0 |
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
ConditionalDetrConfig,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
__magic_name__ : str = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
__magic_name__ : Union[str, Any] = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''', f'''encoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(f'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', f'''encoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.weight''', f'''encoder.layers.{i}.fc1.weight'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.bias''', f'''encoder.layers.{i}.fc1.bias'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.weight''', f'''encoder.layers.{i}.fc2.weight'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.bias''', f'''encoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(f'''transformer.encoder.layers.{i}.norm1.weight''', f'''encoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((f'''transformer.encoder.layers.{i}.norm1.bias''', f'''encoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.weight''', f'''encoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.bias''', f'''encoder.layers.{i}.final_layer_norm.bias'''))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(f'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', f'''decoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', f'''decoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append(
(
f'''transformer.decoder.layers.{i}.cross_attn.out_proj.weight''',
f'''decoder.layers.{i}.encoder_attn.out_proj.weight''',
)
)
rename_keys.append(
(
f'''transformer.decoder.layers.{i}.cross_attn.out_proj.bias''',
f'''decoder.layers.{i}.encoder_attn.out_proj.bias''',
)
)
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.weight''', f'''decoder.layers.{i}.fc1.weight'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.bias''', f'''decoder.layers.{i}.fc1.bias'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.weight''', f'''decoder.layers.{i}.fc2.weight'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.bias''', f'''decoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm1.weight''', f'''decoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.norm1.bias''', f'''decoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm2.weight''', f'''decoder.layers.{i}.encoder_attn_layer_norm.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm2.bias''', f'''decoder.layers.{i}.encoder_attn_layer_norm.bias''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.weight''', f'''decoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.bias''', f'''decoder.layers.{i}.final_layer_norm.bias'''))
# q, k, v projections in self/cross-attention in decoder for conditional DETR
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_qcontent_proj.weight''', f'''decoder.layers.{i}.sa_qcontent_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_kcontent_proj.weight''', f'''decoder.layers.{i}.sa_kcontent_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_qpos_proj.weight''', f'''decoder.layers.{i}.sa_qpos_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_kpos_proj.weight''', f'''decoder.layers.{i}.sa_kpos_proj.weight''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.sa_v_proj.weight''', f'''decoder.layers.{i}.sa_v_proj.weight'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_qcontent_proj.weight''', f'''decoder.layers.{i}.ca_qcontent_proj.weight''')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight"))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_kcontent_proj.weight''', f'''decoder.layers.{i}.ca_kcontent_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_kpos_proj.weight''', f'''decoder.layers.{i}.ca_kpos_proj.weight''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.ca_v_proj.weight''', f'''decoder.layers.{i}.ca_v_proj.weight'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight''', f'''decoder.layers.{i}.ca_qpos_sine_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_qcontent_proj.bias''', f'''decoder.layers.{i}.sa_qcontent_proj.bias''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_kcontent_proj.bias''', f'''decoder.layers.{i}.sa_kcontent_proj.bias''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.sa_qpos_proj.bias''', f'''decoder.layers.{i}.sa_qpos_proj.bias'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.sa_kpos_proj.bias''', f'''decoder.layers.{i}.sa_kpos_proj.bias'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.sa_v_proj.bias''', f'''decoder.layers.{i}.sa_v_proj.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_qcontent_proj.bias''', f'''decoder.layers.{i}.ca_qcontent_proj.bias''')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias"))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_kcontent_proj.bias''', f'''decoder.layers.{i}.ca_kcontent_proj.bias''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.ca_kpos_proj.bias''', f'''decoder.layers.{i}.ca_kpos_proj.bias'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.ca_v_proj.bias''', f'''decoder.layers.{i}.ca_v_proj.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias''', f'''decoder.layers.{i}.ca_qpos_sine_proj.bias''')
)
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
# for conditional DETR, also convert reference point head and query scale MLP
rename_keys.extend(
[
("""input_proj.weight""", """input_projection.weight"""),
("""input_proj.bias""", """input_projection.bias"""),
("""query_embed.weight""", """query_position_embeddings.weight"""),
("""transformer.decoder.norm.weight""", """decoder.layernorm.weight"""),
("""transformer.decoder.norm.bias""", """decoder.layernorm.bias"""),
("""class_embed.weight""", """class_labels_classifier.weight"""),
("""class_embed.bias""", """class_labels_classifier.bias"""),
("""bbox_embed.layers.0.weight""", """bbox_predictor.layers.0.weight"""),
("""bbox_embed.layers.0.bias""", """bbox_predictor.layers.0.bias"""),
("""bbox_embed.layers.1.weight""", """bbox_predictor.layers.1.weight"""),
("""bbox_embed.layers.1.bias""", """bbox_predictor.layers.1.bias"""),
("""bbox_embed.layers.2.weight""", """bbox_predictor.layers.2.weight"""),
("""bbox_embed.layers.2.bias""", """bbox_predictor.layers.2.bias"""),
("""transformer.decoder.ref_point_head.layers.0.weight""", """decoder.ref_point_head.layers.0.weight"""),
("""transformer.decoder.ref_point_head.layers.0.bias""", """decoder.ref_point_head.layers.0.bias"""),
("""transformer.decoder.ref_point_head.layers.1.weight""", """decoder.ref_point_head.layers.1.weight"""),
("""transformer.decoder.ref_point_head.layers.1.bias""", """decoder.ref_point_head.layers.1.bias"""),
("""transformer.decoder.query_scale.layers.0.weight""", """decoder.query_scale.layers.0.weight"""),
("""transformer.decoder.query_scale.layers.0.bias""", """decoder.query_scale.layers.0.bias"""),
("""transformer.decoder.query_scale.layers.1.weight""", """decoder.query_scale.layers.1.weight"""),
("""transformer.decoder.query_scale.layers.1.bias""", """decoder.query_scale.layers.1.bias"""),
("""transformer.decoder.layers.0.ca_qpos_proj.weight""", """decoder.layers.0.ca_qpos_proj.weight"""),
("""transformer.decoder.layers.0.ca_qpos_proj.bias""", """decoder.layers.0.ca_qpos_proj.bias"""),
]
)
def UpperCamelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
UpperCamelCase : List[str] = state_dict.pop(SCREAMING_SNAKE_CASE )
UpperCamelCase : List[Any] = val
def UpperCamelCase (SCREAMING_SNAKE_CASE ):
UpperCamelCase : Optional[Any] = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
UpperCamelCase : Tuple = key.replace("""backbone.0.body""" , """backbone.conv_encoder.model""" )
UpperCamelCase : Optional[int] = value
else:
UpperCamelCase : Any = value
return new_state_dict
def UpperCamelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False ):
UpperCamelCase : Tuple = """"""
if is_panoptic:
UpperCamelCase : Any = """conditional_detr."""
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
UpperCamelCase : Optional[int] = state_dict.pop(f"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight""" )
UpperCamelCase : int = state_dict.pop(f"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
UpperCamelCase : Any = in_proj_weight[:256, :]
UpperCamelCase : str = in_proj_bias[:256]
UpperCamelCase : int = in_proj_weight[256:512, :]
UpperCamelCase : Dict = in_proj_bias[256:512]
UpperCamelCase : str = in_proj_weight[-256:, :]
UpperCamelCase : str = in_proj_bias[-256:]
def UpperCamelCase ():
UpperCamelCase : int = """http://images.cocodataset.org/val2017/000000039769.jpg"""
UpperCamelCase : Union[str, Any] = Image.open(requests.get(SCREAMING_SNAKE_CASE , stream=SCREAMING_SNAKE_CASE ).raw )
return im
@torch.no_grad()
def UpperCamelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
UpperCamelCase : int = ConditionalDetrConfig()
# set backbone and dilation attributes
if "resnet101" in model_name:
UpperCamelCase : Tuple = """resnet101"""
if "dc5" in model_name:
UpperCamelCase : List[str] = True
UpperCamelCase : Optional[int] = """panoptic""" in model_name
if is_panoptic:
UpperCamelCase : int = 250
else:
UpperCamelCase : Tuple = 91
UpperCamelCase : Optional[int] = """huggingface/label-files"""
UpperCamelCase : Optional[int] = """coco-detection-id2label.json"""
UpperCamelCase : Tuple = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , repo_type="""dataset""" ) , """r""" ) )
UpperCamelCase : Union[str, Any] = {int(SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
UpperCamelCase : Union[str, Any] = idalabel
UpperCamelCase : Tuple = {v: k for k, v in idalabel.items()}
# load image processor
UpperCamelCase : Dict = """coco_panoptic""" if is_panoptic else """coco_detection"""
UpperCamelCase : Union[str, Any] = ConditionalDetrImageProcessor(format=SCREAMING_SNAKE_CASE )
# prepare image
UpperCamelCase : Optional[int] = prepare_img()
UpperCamelCase : List[Any] = image_processor(images=SCREAMING_SNAKE_CASE , return_tensors="""pt""" )
UpperCamelCase : List[Any] = encoding["""pixel_values"""]
logger.info(f"""Converting model {model_name}...""" )
# load original model from torch hub
UpperCamelCase : List[str] = torch.hub.load("""DeppMeng/ConditionalDETR""" , SCREAMING_SNAKE_CASE , pretrained=SCREAMING_SNAKE_CASE ).eval()
UpperCamelCase : str = conditional_detr.state_dict()
# rename keys
for src, dest in rename_keys:
if is_panoptic:
UpperCamelCase : Union[str, Any] = """conditional_detr.""" + src
rename_key(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
UpperCamelCase : Union[str, Any] = rename_backbone_keys(SCREAMING_SNAKE_CASE )
# query, key and value matrices need special treatment
read_in_q_k_v(SCREAMING_SNAKE_CASE , is_panoptic=SCREAMING_SNAKE_CASE )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
UpperCamelCase : List[Any] = """conditional_detr.model.""" if is_panoptic else """model."""
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith("""conditional_detr""" )
and not key.startswith("""class_labels_classifier""" )
and not key.startswith("""bbox_predictor""" )
):
UpperCamelCase : Optional[int] = state_dict.pop(SCREAMING_SNAKE_CASE )
UpperCamelCase : List[str] = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
UpperCamelCase : Optional[int] = state_dict.pop(SCREAMING_SNAKE_CASE )
UpperCamelCase : str = val
elif key.startswith("""bbox_attention""" ) or key.startswith("""mask_head""" ):
continue
else:
UpperCamelCase : Any = state_dict.pop(SCREAMING_SNAKE_CASE )
UpperCamelCase : str = val
else:
if not key.startswith("""class_labels_classifier""" ) and not key.startswith("""bbox_predictor""" ):
UpperCamelCase : List[Any] = state_dict.pop(SCREAMING_SNAKE_CASE )
UpperCamelCase : Any = val
# finally, create HuggingFace model and load state dict
UpperCamelCase : Dict = ConditionalDetrForSegmentation(SCREAMING_SNAKE_CASE ) if is_panoptic else ConditionalDetrForObjectDetection(SCREAMING_SNAKE_CASE )
model.load_state_dict(SCREAMING_SNAKE_CASE )
model.eval()
model.push_to_hub(repo_id=SCREAMING_SNAKE_CASE , organization="""DepuMeng""" , commit_message="""Add model""" )
# verify our conversion
UpperCamelCase : Dict = conditional_detr(SCREAMING_SNAKE_CASE )
UpperCamelCase : Tuple = model(SCREAMING_SNAKE_CASE )
assert torch.allclose(outputs.logits , original_outputs["""pred_logits"""] , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes , original_outputs["""pred_boxes"""] , atol=1E-4 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs["""pred_masks"""] , atol=1E-4 )
# Save model and image processor
logger.info(f"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" )
Path(SCREAMING_SNAKE_CASE ).mkdir(exist_ok=SCREAMING_SNAKE_CASE )
model.save_pretrained(SCREAMING_SNAKE_CASE )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__magic_name__ : List[str] = argparse.ArgumentParser()
parser.add_argument(
"""--model_name""",
default="""conditional_detr_resnet50""",
type=str,
help="""Name of the CONDITIONAL_DETR model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
__magic_name__ : str = parser.parse_args()
convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 102 |
"""simple docstring"""
import logging
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import arg_to_scheduler
from transformers import TrainingArguments
__magic_name__ : Dict = logging.getLogger(__name__)
@dataclass
class lowercase__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
__lowerCAmelCase : Optional[float] = field(
default=0.0 , metadata={"""help""": """The label smoothing epsilon to apply (if not zero)."""} )
__lowerCAmelCase : bool = field(default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """Whether to SortishSamler or not."""} )
__lowerCAmelCase : bool = field(
default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """Whether to use generate to calculate generative metrics (ROUGE, BLEU)."""} )
__lowerCAmelCase : bool = field(default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """whether to use adafactor"""} )
__lowerCAmelCase : Optional[float] = field(
default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """Encoder layer dropout probability. Goes into model.config."""} )
__lowerCAmelCase : Optional[float] = field(
default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """Decoder layer dropout probability. Goes into model.config."""} )
__lowerCAmelCase : Optional[float] = field(default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """Dropout probability. Goes into model.config."""} )
__lowerCAmelCase : Optional[float] = field(
default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """Attention dropout probability. Goes into model.config."""} )
__lowerCAmelCase : Optional[str] = field(
default="""linear""" , metadata={"""help""": f'''Which lr scheduler to use. Selected in {sorted(arg_to_scheduler.keys() )}'''} , )
| 102 | 1 |
'''simple docstring'''
import numpy as np
class __a :
def __init__( self : List[str] , lowercase__ : Optional[Any]=None , lowercase__ : Dict=None , lowercase__ : List[str]=None , lowercase__ : Any=None , lowercase__ : Tuple=None) ->Dict:
"""simple docstring"""
self.set_matricies(red=_a , green=_a , blue=_a , red_edge=_a , nir=_a)
def _UpperCAmelCase ( self : Union[str, Any] , lowercase__ : List[str]=None , lowercase__ : Union[str, Any]=None , lowercase__ : Tuple=None , lowercase__ : Tuple=None , lowercase__ : List[str]=None) ->Optional[Any]:
"""simple docstring"""
if red is not None:
_lowercase = red
if green is not None:
_lowercase = green
if blue is not None:
_lowercase = blue
if red_edge is not None:
_lowercase = red_edge
if nir is not None:
_lowercase = nir
return True
def _UpperCAmelCase ( self : Tuple , lowercase__ : Union[str, Any]="" , lowercase__ : Union[str, Any]=None , lowercase__ : Tuple=None , lowercase__ : str=None , lowercase__ : Union[str, Any]=None , lowercase__ : List[Any]=None) ->Optional[Any]:
"""simple docstring"""
self.set_matricies(red=_a , green=_a , blue=_a , red_edge=_a , nir=_a)
_lowercase = {
"""ARVI2""": self.arvaa,
"""CCCI""": self.ccci,
"""CVI""": self.cvi,
"""GLI""": self.gli,
"""NDVI""": self.ndvi,
"""BNDVI""": self.bndvi,
"""redEdgeNDVI""": self.red_edge_ndvi,
"""GNDVI""": self.gndvi,
"""GBNDVI""": self.gbndvi,
"""GRNDVI""": self.grndvi,
"""RBNDVI""": self.rbndvi,
"""PNDVI""": self.pndvi,
"""ATSAVI""": self.atsavi,
"""BWDRVI""": self.bwdrvi,
"""CIgreen""": self.ci_green,
"""CIrededge""": self.ci_rededge,
"""CI""": self.ci,
"""CTVI""": self.ctvi,
"""GDVI""": self.gdvi,
"""EVI""": self.evi,
"""GEMI""": self.gemi,
"""GOSAVI""": self.gosavi,
"""GSAVI""": self.gsavi,
"""Hue""": self.hue,
"""IVI""": self.ivi,
"""IPVI""": self.ipvi,
"""I""": self.i,
"""RVI""": self.rvi,
"""MRVI""": self.mrvi,
"""MSAVI""": self.m_savi,
"""NormG""": self.norm_g,
"""NormNIR""": self.norm_nir,
"""NormR""": self.norm_r,
"""NGRDI""": self.ngrdi,
"""RI""": self.ri,
"""S""": self.s,
"""IF""": self._if,
"""DVI""": self.dvi,
"""TVI""": self.tvi,
"""NDRE""": self.ndre,
}
try:
return funcs[index]()
except KeyError:
print("""Index not in the list!""")
return False
def _UpperCAmelCase ( self : Optional[int]) ->Optional[int]:
"""simple docstring"""
return -0.18 + (1.17 * ((self.nir - self.red) / (self.nir + self.red)))
def _UpperCAmelCase ( self : str) ->List[Any]:
"""simple docstring"""
return ((self.nir - self.redEdge) / (self.nir + self.redEdge)) / (
(self.nir - self.red) / (self.nir + self.red)
)
def _UpperCAmelCase ( self : Union[str, Any]) ->Union[str, Any]:
"""simple docstring"""
return self.nir * (self.red / (self.green**2))
def _UpperCAmelCase ( self : Tuple) ->int:
"""simple docstring"""
return (2 * self.green - self.red - self.blue) / (
2 * self.green + self.red + self.blue
)
def _UpperCAmelCase ( self : List[str]) ->List[str]:
"""simple docstring"""
return (self.nir - self.red) / (self.nir + self.red)
def _UpperCAmelCase ( self : List[str]) ->List[str]:
"""simple docstring"""
return (self.nir - self.blue) / (self.nir + self.blue)
def _UpperCAmelCase ( self : Optional[Any]) ->Dict:
"""simple docstring"""
return (self.redEdge - self.red) / (self.redEdge + self.red)
def _UpperCAmelCase ( self : Optional[int]) ->List[Any]:
"""simple docstring"""
return (self.nir - self.green) / (self.nir + self.green)
def _UpperCAmelCase ( self : List[Any]) ->Union[str, Any]:
"""simple docstring"""
return (self.nir - (self.green + self.blue)) / (
self.nir + (self.green + self.blue)
)
def _UpperCAmelCase ( self : str) ->Optional[Any]:
"""simple docstring"""
return (self.nir - (self.green + self.red)) / (
self.nir + (self.green + self.red)
)
def _UpperCAmelCase ( self : List[str]) ->Optional[Any]:
"""simple docstring"""
return (self.nir - (self.blue + self.red)) / (self.nir + (self.blue + self.red))
def _UpperCAmelCase ( self : int) ->Optional[Any]:
"""simple docstring"""
return (self.nir - (self.green + self.red + self.blue)) / (
self.nir + (self.green + self.red + self.blue)
)
def _UpperCAmelCase ( self : Any , lowercase__ : int=0.08 , lowercase__ : int=1.22 , lowercase__ : str=0.03) ->List[str]:
"""simple docstring"""
return a * (
(self.nir - a * self.red - b)
/ (a * self.nir + self.red - a * b + x * (1 + a**2))
)
def _UpperCAmelCase ( self : Any) ->Optional[Any]:
"""simple docstring"""
return (0.1 * self.nir - self.blue) / (0.1 * self.nir + self.blue)
def _UpperCAmelCase ( self : Union[str, Any]) ->List[str]:
"""simple docstring"""
return (self.nir / self.green) - 1
def _UpperCAmelCase ( self : Union[str, Any]) ->Tuple:
"""simple docstring"""
return (self.nir / self.redEdge) - 1
def _UpperCAmelCase ( self : Any) ->List[Any]:
"""simple docstring"""
return (self.red - self.blue) / self.red
def _UpperCAmelCase ( self : str) ->str:
"""simple docstring"""
_lowercase = self.ndvi()
return ((ndvi + 0.5) / (abs(ndvi + 0.5))) * (abs(ndvi + 0.5) ** (1 / 2))
def _UpperCAmelCase ( self : List[str]) ->Tuple:
"""simple docstring"""
return self.nir - self.green
def _UpperCAmelCase ( self : List[str]) ->Optional[Any]:
"""simple docstring"""
return 2.5 * (
(self.nir - self.red) / (self.nir + 6 * self.red - 7.5 * self.blue + 1)
)
def _UpperCAmelCase ( self : List[str]) ->int:
"""simple docstring"""
_lowercase = (2 * (self.nir**2 - self.red**2) + 1.5 * self.nir + 0.5 * self.red) / (
self.nir + self.red + 0.5
)
return n * (1 - 0.25 * n) - (self.red - 0.125) / (1 - self.red)
def _UpperCAmelCase ( self : int , lowercase__ : int=0.16) ->str:
"""simple docstring"""
return (self.nir - self.green) / (self.nir + self.green + y)
def _UpperCAmelCase ( self : str , lowercase__ : List[Any]=0.5) ->Optional[Any]:
"""simple docstring"""
return ((self.nir - self.green) / (self.nir + self.green + n)) * (1 + n)
def _UpperCAmelCase ( self : List[str]) ->List[Any]:
"""simple docstring"""
return np.arctan(
((2 * self.red - self.green - self.blue) / 30.5) * (self.green - self.blue))
def _UpperCAmelCase ( self : str , lowercase__ : List[Any]=None , lowercase__ : Tuple=None) ->Optional[int]:
"""simple docstring"""
return (self.nir - b) / (a * self.red)
def _UpperCAmelCase ( self : List[Any]) ->Dict:
"""simple docstring"""
return (self.nir / ((self.nir + self.red) / 2)) * (self.ndvi() + 1)
def _UpperCAmelCase ( self : Optional[int]) ->Optional[Any]:
"""simple docstring"""
return (self.red + self.green + self.blue) / 30.5
def _UpperCAmelCase ( self : Dict) ->int:
"""simple docstring"""
return self.nir / self.red
def _UpperCAmelCase ( self : Optional[int]) ->Dict:
"""simple docstring"""
return (self.rvi() - 1) / (self.rvi() + 1)
def _UpperCAmelCase ( self : List[Any]) ->Optional[int]:
"""simple docstring"""
return (
(2 * self.nir + 1)
- ((2 * self.nir + 1) ** 2 - 8 * (self.nir - self.red)) ** (1 / 2)
) / 2
def _UpperCAmelCase ( self : Optional[int]) ->List[str]:
"""simple docstring"""
return self.green / (self.nir + self.red + self.green)
def _UpperCAmelCase ( self : Dict) ->Tuple:
"""simple docstring"""
return self.nir / (self.nir + self.red + self.green)
def _UpperCAmelCase ( self : str) ->Optional[int]:
"""simple docstring"""
return self.red / (self.nir + self.red + self.green)
def _UpperCAmelCase ( self : Tuple) ->str:
"""simple docstring"""
return (self.green - self.red) / (self.green + self.red)
def _UpperCAmelCase ( self : Optional[int]) ->List[Any]:
"""simple docstring"""
return (self.red - self.green) / (self.red + self.green)
def _UpperCAmelCase ( self : List[str]) ->Any:
"""simple docstring"""
_lowercase = np.max([np.max(self.red), np.max(self.green), np.max(self.blue)])
_lowercase = np.min([np.min(self.red), np.min(self.green), np.min(self.blue)])
return (max_value - min_value) / max_value
def _UpperCAmelCase ( self : str) ->List[Any]:
"""simple docstring"""
return (2 * self.red - self.green - self.blue) / (self.green - self.blue)
def _UpperCAmelCase ( self : List[str]) ->Dict:
"""simple docstring"""
return self.nir / self.red
def _UpperCAmelCase ( self : Optional[Any]) ->Optional[int]:
"""simple docstring"""
return (self.ndvi() + 0.5) ** (1 / 2)
def _UpperCAmelCase ( self : List[str]) ->Union[str, Any]:
"""simple docstring"""
return (self.nir - self.redEdge) / (self.nir + self.redEdge)
| 712 |
'''simple docstring'''
import os
import time
import warnings
from dataclasses import dataclass, field
from enum import Enum
from typing import List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import logging
from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors
from ..processors.utils import InputFeatures
_lowerCamelCase = logging.get_logger(__name__)
@dataclass
class __a :
__SCREAMING_SNAKE_CASE : str = field(metadata={'help': 'The name of the task to train on: ' + ', '.join(glue_processors.keys() )} )
__SCREAMING_SNAKE_CASE : str = field(
metadata={'help': 'The input data dir. Should contain the .tsv files (or other data files) for the task.'} )
__SCREAMING_SNAKE_CASE : int = field(
default=1_2_8 ,metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} ,)
__SCREAMING_SNAKE_CASE : bool = field(
default=_snake_case ,metadata={'help': 'Overwrite the cached training and evaluation sets'} )
def _UpperCAmelCase ( self : int) ->Tuple:
"""simple docstring"""
_lowercase = self.task_name.lower()
class __a ( _snake_case ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = 'train'
__SCREAMING_SNAKE_CASE : Tuple = 'dev'
__SCREAMING_SNAKE_CASE : List[str] = 'test'
class __a ( _snake_case ):
__SCREAMING_SNAKE_CASE : GlueDataTrainingArguments
__SCREAMING_SNAKE_CASE : str
__SCREAMING_SNAKE_CASE : List[InputFeatures]
def __init__( self : Tuple , lowercase__ : GlueDataTrainingArguments , lowercase__ : PreTrainedTokenizerBase , lowercase__ : Optional[int] = None , lowercase__ : Union[str, Split] = Split.train , lowercase__ : Optional[str] = None , ) ->str:
"""simple docstring"""
warnings.warn(
"""This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets """
"""library. You can have a look at this example script for pointers: """
"""https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py""" , lowercase__ , )
_lowercase = args
_lowercase = glue_processors[args.task_name]()
_lowercase = glue_output_modes[args.task_name]
if isinstance(lowercase__ , lowercase__):
try:
_lowercase = Split[mode]
except KeyError:
raise KeyError("""mode is not a valid split name""")
# Load data features from cache or dataset file
_lowercase = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , f"""cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}""" , )
_lowercase = self.processor.get_labels()
if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in (
"RobertaTokenizer",
"RobertaTokenizerFast",
"XLMRobertaTokenizer",
"BartTokenizer",
"BartTokenizerFast",
):
# HACK(label indices are swapped in RoBERTa pretrained model)
_lowercase , _lowercase = label_list[2], label_list[1]
_lowercase = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
_lowercase = cached_features_file + """.lock"""
with FileLock(lowercase__):
if os.path.exists(lowercase__) and not args.overwrite_cache:
_lowercase = time.time()
_lowercase = torch.load(lowercase__)
logger.info(
f"""Loading features from cached file {cached_features_file} [took %.3f s]""" , time.time() - start)
else:
logger.info(f"""Creating features from dataset file at {args.data_dir}""")
if mode == Split.dev:
_lowercase = self.processor.get_dev_examples(args.data_dir)
elif mode == Split.test:
_lowercase = self.processor.get_test_examples(args.data_dir)
else:
_lowercase = self.processor.get_train_examples(args.data_dir)
if limit_length is not None:
_lowercase = examples[:limit_length]
_lowercase = glue_convert_examples_to_features(
lowercase__ , lowercase__ , max_length=args.max_seq_length , label_list=lowercase__ , output_mode=self.output_mode , )
_lowercase = time.time()
torch.save(self.features , lowercase__)
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f"""Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]""")
def __len__( self : str) ->List[Any]:
"""simple docstring"""
return len(self.features)
def __getitem__( self : Optional[Any] , lowercase__ : List[Any]) ->InputFeatures:
"""simple docstring"""
return self.features[i]
def _UpperCAmelCase ( self : Union[str, Any]) ->List[Any]:
"""simple docstring"""
return self.label_list
| 572 | 0 |
from collections.abc import Callable
import numpy as np
def lowerCAmelCase__ ( _a : Optional[Any] , _a : str , _a : Dict , _a : Optional[int] , _a : Any ):
snake_case_ : Tuple = int(np.ceil((x_end - xa) / step_size ) )
snake_case_ : List[str] = np.zeros((n + 1,) )
snake_case_ : Optional[int] = ya
snake_case_ : List[str] = xa
for k in range(SCREAMING_SNAKE_CASE_ ):
snake_case_ : str = y[k] + step_size * ode_func(SCREAMING_SNAKE_CASE_ , y[k] )
snake_case_ : int = y[k] + (
(step_size / 2) * (ode_func(SCREAMING_SNAKE_CASE_ , y[k] ) + ode_func(x + step_size , SCREAMING_SNAKE_CASE_ ))
)
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 568 | """simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
SCREAMING_SNAKE_CASE__ : Dict =logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : Optional[int] ='▁'
SCREAMING_SNAKE_CASE__ : Optional[Any] ={'vocab_file': 'sentencepiece.bpe.model', 'monolingual_vocab_file': 'dict.txt'}
SCREAMING_SNAKE_CASE__ : Any ={
'vocab_file': {
'vinai/bartpho-syllable': 'https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model',
},
'monolingual_vocab_file': {
'vinai/bartpho-syllable': 'https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt',
},
}
SCREAMING_SNAKE_CASE__ : Optional[int] ={'vinai/bartpho-syllable': 1024}
class _UpperCAmelCase ( a_ ):
"""simple docstring"""
__snake_case = VOCAB_FILES_NAMES
__snake_case = PRETRAINED_VOCAB_FILES_MAP
__snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__snake_case = ["""input_ids""", """attention_mask"""]
def __init__( self , _lowercase , _lowercase , _lowercase="<s>" , _lowercase="</s>" , _lowercase="</s>" , _lowercase="<s>" , _lowercase="<unk>" , _lowercase="<pad>" , _lowercase="<mask>" , _lowercase = None , **_lowercase , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
_lowerCamelCase : List[str] = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else mask_token
_lowerCamelCase : str = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_lowercase , eos_token=_lowercase , unk_token=_lowercase , sep_token=_lowercase , cls_token=_lowercase , pad_token=_lowercase , mask_token=_lowercase , sp_model_kwargs=self.sp_model_kwargs , **_lowercase , )
_lowerCamelCase : Optional[int] = vocab_file
_lowerCamelCase : Union[str, Any] = monolingual_vocab_file
_lowerCamelCase : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_lowercase ) )
# Load the reduced vocab
# Keep order of special tokens for backward compatibility
_lowerCamelCase : Optional[Any] = {}
_lowerCamelCase : Any = 0
for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]:
if str(_lowercase ) not in self.fairseq_tokens_to_ids:
_lowerCamelCase : int = cnt
cnt += 1
with open(_lowercase , '''r''' , encoding='''utf-8''' ) as f:
for line in f.readlines():
_lowerCamelCase : List[Any] = line.strip().split()[0]
_lowerCamelCase : Dict = len(self.fairseq_tokens_to_ids )
if str(_lowercase ) not in self.fairseq_tokens_to_ids:
_lowerCamelCase : Dict = len(self.fairseq_tokens_to_ids )
_lowerCamelCase : Any = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ) -> List[str]:
_lowerCamelCase : int = self.__dict__.copy()
_lowerCamelCase : Optional[Any] = None
_lowerCamelCase : int = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , _lowercase ) -> Optional[int]:
_lowerCamelCase : Any = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
_lowerCamelCase : Optional[Any] = {}
_lowerCamelCase : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def a__ ( self , _lowercase , _lowercase = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_lowerCamelCase : Optional[Any] = [self.cls_token_id]
_lowerCamelCase : Tuple = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def a__ ( self , _lowercase , _lowercase = None , _lowercase = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowercase , token_ids_a=_lowercase , already_has_special_tokens=_lowercase )
if token_ids_a is None:
return [1] + ([0] * len(_lowercase )) + [1]
return [1] + ([0] * len(_lowercase )) + [1, 1] + ([0] * len(_lowercase )) + [1]
def a__ ( self , _lowercase , _lowercase = None ) -> List[int]:
_lowerCamelCase : Optional[Any] = [self.sep_token_id]
_lowerCamelCase : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def a__ ( self ) -> Optional[int]:
return len(self.fairseq_ids_to_tokens )
def a__ ( self ) -> List[str]:
_lowerCamelCase : Union[str, Any] = {self.convert_ids_to_tokens(_lowercase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def a__ ( self , _lowercase ) -> List[str]:
return self.sp_model.encode(_lowercase , out_type=_lowercase )
def a__ ( self , _lowercase ) -> Dict:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
else:
return self.unk_token_id
def a__ ( self , _lowercase ) -> List[Any]:
return self.fairseq_ids_to_tokens[index]
def a__ ( self , _lowercase ) -> Tuple:
_lowerCamelCase : List[Any] = ''''''.join(_lowercase ).replace(_lowercase , ''' ''' ).strip()
return out_string
def a__ ( self , _lowercase , _lowercase = None ) -> Tuple[str]:
if not os.path.isdir(_lowercase ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
_lowerCamelCase : Tuple = os.path.join(
_lowercase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
_lowerCamelCase : Dict = os.path.join(
_lowercase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''monolingual_vocab_file'''] , )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowercase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _lowercase )
elif not os.path.isfile(self.vocab_file ):
with open(_lowercase , '''wb''' ) as fi:
_lowerCamelCase : Tuple = self.sp_model.serialized_model_proto()
fi.write(_lowercase )
if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath(
_lowercase ) and os.path.isfile(self.monolingual_vocab_file ):
copyfile(self.monolingual_vocab_file , _lowercase )
elif not os.path.isfile(self.monolingual_vocab_file ):
with open(_lowercase , '''w''' , encoding='''utf-8''' ) as fp:
for token in self.fairseq_tokens_to_ids:
if token not in self.all_special_tokens:
fp.write(F'''{str(_lowercase )} \n''' )
return out_vocab_file, out_monolingual_vocab_file
| 434 | 0 |
import tempfile
import unittest
import numpy as np
from diffusers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionPipeline,
PNDMScheduler,
)
from diffusers.utils.testing_utils import is_onnx_available, nightly, require_onnxruntime, require_torch_gpu
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class lowercase ( UpperCamelCase__,unittest.TestCase ):
_a = "hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline"
def a__ ( self , _a=0 ) -> List[str]:
_A : Dict = np.random.RandomState(_a )
_A : Any = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def a__ ( self ) -> Optional[int]:
_A : Dict = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
pipe.set_progress_bar_config(disable=_a )
_A : int = self.get_dummy_inputs()
_A : int = pipe(**_a ).images
_A : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
_A : Any = np.array([0.65072, 0.58492, 0.48219, 0.55521, 0.53180, 0.55939, 0.50697, 0.39800, 0.46455] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def a__ ( self ) -> List[str]:
_A : List[Any] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
_A : List[str] = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=_a )
pipe.set_progress_bar_config(disable=_a )
_A : Optional[int] = self.get_dummy_inputs()
_A : Tuple = pipe(**_a ).images
_A : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
_A : List[Any] = np.array([0.65863, 0.59425, 0.49326, 0.56313, 0.53875, 0.56627, 0.51065, 0.39777, 0.46330] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def a__ ( self ) -> List[str]:
_A : Union[str, Any] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
_A : List[str] = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_a )
_A : Tuple = self.get_dummy_inputs()
_A : Any = pipe(**_a ).images
_A : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
_A : Optional[int] = np.array([0.53755, 0.60786, 0.47402, 0.49488, 0.51869, 0.49819, 0.47985, 0.38957, 0.44279] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def a__ ( self ) -> Union[str, Any]:
_A : Dict = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
_A : Optional[Any] = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_a )
_A : Optional[int] = self.get_dummy_inputs()
_A : int = pipe(**_a ).images
_A : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
_A : Any = np.array([0.53755, 0.60786, 0.47402, 0.49488, 0.51869, 0.49819, 0.47985, 0.38957, 0.44279] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def a__ ( self ) -> Tuple:
_A : List[Any] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
_A : Optional[Any] = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_a )
_A : Any = self.get_dummy_inputs()
_A : int = pipe(**_a ).images
_A : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
_A : Tuple = np.array([0.53817, 0.60812, 0.47384, 0.49530, 0.51894, 0.49814, 0.47984, 0.38958, 0.44271] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def a__ ( self ) -> str:
_A : int = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
_A : Any = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_a )
_A : int = self.get_dummy_inputs()
_A : int = pipe(**_a ).images
_A : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
_A : Optional[int] = np.array([0.53895, 0.60808, 0.47933, 0.49608, 0.51886, 0.49950, 0.48053, 0.38957, 0.44200] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def a__ ( self ) -> Dict:
_A : List[Any] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
pipe.set_progress_bar_config(disable=_a )
_A : str = self.get_dummy_inputs()
_A : List[str] = 3 * [inputs["""prompt"""]]
# forward
_A : Any = pipe(**_a )
_A : Any = output.images[0, -3:, -3:, -1]
_A : Optional[int] = self.get_dummy_inputs()
_A : Optional[Any] = 3 * [inputs.pop("""prompt""" )]
_A : Dict = pipe.tokenizer(
_a , padding="""max_length""" , max_length=pipe.tokenizer.model_max_length , truncation=_a , return_tensors="""np""" , )
_A : Union[str, Any] = text_inputs["""input_ids"""]
_A : Union[str, Any] = pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0]
_A : int = prompt_embeds
# forward
_A : Dict = pipe(**_a )
_A : Optional[Any] = output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1e-4
def a__ ( self ) -> List[str]:
_A : Optional[Any] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
pipe.set_progress_bar_config(disable=_a )
_A : Optional[int] = self.get_dummy_inputs()
_A : Dict = 3 * ["""this is a negative prompt"""]
_A : List[Any] = negative_prompt
_A : int = 3 * [inputs["""prompt"""]]
# forward
_A : str = pipe(**_a )
_A : Dict = output.images[0, -3:, -3:, -1]
_A : Optional[int] = self.get_dummy_inputs()
_A : List[Any] = 3 * [inputs.pop("""prompt""" )]
_A : Tuple = []
for p in [prompt, negative_prompt]:
_A : str = pipe.tokenizer(
_a , padding="""max_length""" , max_length=pipe.tokenizer.model_max_length , truncation=_a , return_tensors="""np""" , )
_A : Union[str, Any] = text_inputs["""input_ids"""]
embeds.append(pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0] )
_A , _A : List[Any] = embeds
# forward
_A : Tuple = pipe(**_a )
_A : Optional[Any] = output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1e-4
@nightly
@require_onnxruntime
@require_torch_gpu
class lowercase ( unittest.TestCase ):
@property
def a__ ( self ) -> List[str]:
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def a__ ( self ) -> Dict:
_A : Any = ort.SessionOptions()
_A : int = False
return options
def a__ ( self ) -> List[Any]:
# using the PNDM scheduler by default
_A : str = OnnxStableDiffusionPipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , revision="""onnx""" , safety_checker=_a , feature_extractor=_a , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=_a )
_A : Optional[Any] = """A painting of a squirrel eating a burger"""
np.random.seed(0 )
_A : Union[str, Any] = sd_pipe([prompt] , guidance_scale=6.0 , num_inference_steps=10 , output_type="""np""" )
_A : Union[str, Any] = output.images
_A : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
_A : int = np.array([0.0452, 0.0390, 0.0087, 0.0350, 0.0617, 0.0364, 0.0544, 0.0523, 0.0720] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def a__ ( self ) -> Union[str, Any]:
_A : Optional[int] = DDIMScheduler.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , subfolder="""scheduler""" , revision="""onnx""" )
_A : List[Any] = OnnxStableDiffusionPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , revision="""onnx""" , scheduler=_a , safety_checker=_a , feature_extractor=_a , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=_a )
_A : Optional[int] = """open neural network exchange"""
_A : Optional[Any] = np.random.RandomState(0 )
_A : Any = sd_pipe([prompt] , guidance_scale=7.5 , num_inference_steps=10 , generator=_a , output_type="""np""" )
_A : Dict = output.images
_A : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
_A : Union[str, Any] = np.array([0.2867, 0.1974, 0.1481, 0.7294, 0.7251, 0.6667, 0.4194, 0.5642, 0.6486] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def a__ ( self ) -> List[str]:
_A : str = LMSDiscreteScheduler.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , subfolder="""scheduler""" , revision="""onnx""" )
_A : str = OnnxStableDiffusionPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , revision="""onnx""" , scheduler=_a , safety_checker=_a , feature_extractor=_a , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=_a )
_A : str = """open neural network exchange"""
_A : int = np.random.RandomState(0 )
_A : Union[str, Any] = sd_pipe([prompt] , guidance_scale=7.5 , num_inference_steps=10 , generator=_a , output_type="""np""" )
_A : List[Any] = output.images
_A : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
_A : Union[str, Any] = np.array([0.2306, 0.1959, 0.1593, 0.6549, 0.6394, 0.5408, 0.5065, 0.6010, 0.6161] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def a__ ( self ) -> int:
_A : List[Any] = 0
def test_callback_fn(_a , _a , _a ) -> None:
_A : List[str] = True
nonlocal number_of_steps
number_of_steps += 1
if step == 0:
assert latents.shape == (1, 4, 64, 64)
_A : Optional[int] = latents[0, -3:, -3:, -1]
_A : str = np.array(
[-0.6772, -0.3835, -1.2456, 0.1905, -1.0974, 0.6967, -1.9353, 0.0178, 1.0167] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1e-3
elif step == 5:
assert latents.shape == (1, 4, 64, 64)
_A : Union[str, Any] = latents[0, -3:, -3:, -1]
_A : Tuple = np.array(
[-0.3351, 0.2241, -0.1837, -0.2325, -0.6577, 0.3393, -0.0241, 0.5899, 1.3875] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1e-3
_A : List[Any] = False
_A : Any = OnnxStableDiffusionPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , revision="""onnx""" , safety_checker=_a , feature_extractor=_a , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=_a )
_A : List[Any] = """Andromeda galaxy in a bottle"""
_A : Optional[int] = np.random.RandomState(0 )
pipe(
prompt=_a , num_inference_steps=5 , guidance_scale=7.5 , generator=_a , callback=_a , callback_steps=1 , )
assert test_callback_fn.has_been_called
assert number_of_steps == 6
def a__ ( self ) -> Union[str, Any]:
_A : Dict = OnnxStableDiffusionPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , revision="""onnx""" , safety_checker=_a , feature_extractor=_a , provider=self.gpu_provider , sess_options=self.gpu_options , )
assert isinstance(_a , _a )
assert pipe.safety_checker is None
_A : Dict = pipe("""example prompt""" , num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(_a )
_A : List[Any] = OnnxStableDiffusionPipeline.from_pretrained(_a )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
_A : Union[str, Any] = pipe("""example prompt""" , num_inference_steps=2 ).images[0]
assert image is not None
| 54 |
from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def lowerCAmelCase_ ( snake_case_ ):
# A local function to see if a dot lands in the circle.
def is_in_circle(snake_case_,snake_case_ ) -> bool:
_A : List[str] = sqrt((x**2) + (y**2) )
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
_A : Optional[int] = mean(
int(is_in_circle(uniform(-1.0,1.0 ),uniform(-1.0,1.0 ) ) )
for _ in range(snake_case_ ) )
# The ratio of the area for circle to square is pi/4.
_A : List[str] = proportion * 4
print(f'''The estimated value of pi is {pi_estimate}''' )
print(f'''The numpy value of pi is {pi}''' )
print(f'''The total error is {abs(pi - pi_estimate )}''' )
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ = 0.0,snake_case_ = 1.0,):
return mean(
function_to_integrate(uniform(snake_case_,snake_case_ ) ) for _ in range(snake_case_ ) ) * (max_value - min_value)
def lowerCAmelCase_ ( snake_case_,snake_case_ = 0.0,snake_case_ = 1.0 ):
def identity_function(snake_case_ ) -> float:
return x
_A : Any = area_under_curve_estimator(
snake_case_,snake_case_,snake_case_,snake_case_ )
_A : Tuple = (max_value * max_value - min_value * min_value) / 2
print("""******************""" )
print(f'''Estimating area under y=x where x varies from {min_value} to {max_value}''' )
print(f'''Estimated value is {estimated_value}''' )
print(f'''Expected value is {expected_value}''' )
print(f'''Total error is {abs(estimated_value - expected_value )}''' )
print("""******************""" )
def lowerCAmelCase_ ( snake_case_ ):
def function_to_integrate(snake_case_ ) -> float:
return sqrt(4.0 - x * x )
_A : Optional[int] = area_under_curve_estimator(
snake_case_,snake_case_,0.0,2.0 )
print("""******************""" )
print("""Estimating pi using area_under_curve_estimator""" )
print(f'''Estimated value is {estimated_value}''' )
print(f'''Expected value is {pi}''' )
print(f'''Total error is {abs(estimated_value - pi )}''' )
print("""******************""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 54 | 1 |
"""simple docstring"""
from __future__ import annotations
def snake_case__ ( _lowerCamelCase, _lowerCamelCase ) ->list[list[int]]:
"""simple docstring"""
__lowercase : int = []
create_all_state(1, _lowerCamelCase, _lowerCamelCase, [], _lowerCamelCase )
return result
def snake_case__ ( _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, ) ->None:
"""simple docstring"""
if level == 0:
total_list.append(current_list[:] )
return
for i in range(_lowerCamelCase, total_number - level + 2 ):
current_list.append(_lowerCamelCase )
create_all_state(i + 1, _lowerCamelCase, level - 1, _lowerCamelCase, _lowerCamelCase )
current_list.pop()
def snake_case__ ( _lowerCamelCase ) ->None:
"""simple docstring"""
for i in total_list:
print(*_lowerCamelCase )
if __name__ == "__main__":
__A : Any = 4
__A : Union[str, Any] = 2
__A : Union[str, Any] = generate_all_combinations(n, k)
print_all_state(total_list)
| 575 |
import argparse
import json
import re
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileNetVaConfig,
MobileNetVaForImageClassification,
MobileNetVaImageProcessor,
load_tf_weights_in_mobilenet_va,
)
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE_ : str = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE ( snake_case ) -> Tuple:
__lowercase = MobileNetVaConfig(layer_norm_eps=0.001 )
if "_quant" in model_name:
raise ValueError('Quantized models are not supported.' )
__lowercase = re.match(r'^mobilenet_v1_([^_]*)_([^_]*)$' , snake_case )
if matches:
__lowercase = float(matches[1] )
__lowercase = int(matches[2] )
# The TensorFlow version of MobileNetV1 predicts 1001 classes instead of
# the usual 1000. The first class (index 0) is "background".
__lowercase = 1_001
__lowercase = 'imagenet-1k-id2label.json'
__lowercase = 'huggingface/label-files'
__lowercase = json.load(open(hf_hub_download(snake_case , snake_case , repo_type='dataset' ) , 'r' ) )
__lowercase = {int(snake_case ) + 1: v for k, v in idalabel.items()}
__lowercase = 'background'
__lowercase = idalabel
__lowercase = {v: k for k, v in idalabel.items()}
return config
def SCREAMING_SNAKE_CASE ( ) -> List[Any]:
__lowercase = 'http://images.cocodataset.org/val2017/000000039769.jpg'
__lowercase = Image.open(requests.get(snake_case , stream=snake_case ).raw )
return im
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( snake_case , snake_case , snake_case , snake_case=False ) -> Union[str, Any]:
__lowercase = get_mobilenet_va_config(snake_case )
# Load 🤗 model
__lowercase = MobileNetVaForImageClassification(snake_case ).eval()
# Load weights from TensorFlow checkpoint
load_tf_weights_in_mobilenet_va(snake_case , snake_case , snake_case )
# Check outputs on an image, prepared by MobileNetV1ImageProcessor
__lowercase = MobileNetVaImageProcessor(
crop_size={'width': config.image_size, 'height': config.image_size} , size={'shortest_edge': config.image_size + 32} , )
__lowercase = image_processor(images=prepare_img() , return_tensors='pt' )
__lowercase = model(**snake_case )
__lowercase = outputs.logits
assert logits.shape == (1, 1_001)
if model_name == "mobilenet_v1_1.0_224":
__lowercase = torch.tensor([-4.1739, -1.1233, 3.1205] )
elif model_name == "mobilenet_v1_0.75_192":
__lowercase = torch.tensor([-3.9440, -2.3141, -0.3333] )
else:
__lowercase = None
if expected_logits is not None:
assert torch.allclose(logits[0, :3] , snake_case , atol=1E-4 )
Path(snake_case ).mkdir(exist_ok=snake_case )
print(F"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(snake_case )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(snake_case )
if push_to_hub:
print('Pushing to the hub...' )
__lowercase = 'google/' + model_name
image_processor.push_to_hub(snake_case )
model.push_to_hub(snake_case )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''mobilenet_v1_1.0_224''',
type=str,
help='''Name of the MobileNetV1 model you\'d like to convert. Should in the form \'mobilenet_v1_<depth>_<size>\'.''',
)
parser.add_argument(
'''--checkpoint_path''', required=True, type=str, help='''Path to the original TensorFlow checkpoint (.ckpt file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
SCREAMING_SNAKE_CASE_ : int = parser.parse_args()
convert_movilevit_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 375 | 0 |
"""simple docstring"""
import datasets
from .evaluate import evaluate
_UpperCamelCase : List[Any] = '\\n@inproceedings{Rajpurkar2016SQuAD10,\n title={SQuAD: 100, 000+ Questions for Machine Comprehension of Text},\n author={Pranav Rajpurkar and Jian Zhang and Konstantin Lopyrev and Percy Liang},\n booktitle={EMNLP},\n year={2016}\n}\n'
_UpperCamelCase : List[str] = '\nThis metric wrap the official scoring script for version 1 of the Stanford Question Answering Dataset (SQuAD).\n\nStanford Question Answering Dataset (SQuAD) is a reading comprehension dataset, consisting of questions posed by\ncrowdworkers on a set of Wikipedia articles, where the answer to every question is a segment of text, or span,\nfrom the corresponding reading passage, or the question might be unanswerable.\n'
_UpperCamelCase : List[Any] = '\nComputes SQuAD scores (F1 and EM).\nArgs:\n predictions: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair as given in the references (see below)\n - \'prediction_text\': the text of the answer\n references: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair (see above),\n - \'answers\': a Dict in the SQuAD dataset format\n {\n \'text\': list of possible texts for the answer, as a list of strings\n \'answer_start\': list of start positions for the answer, as a list of ints\n }\n Note that answer_start values are not taken into account to compute the metric.\nReturns:\n \'exact_match\': Exact match (the normalized answer exactly match the gold answer)\n \'f1\': The F-score of predicted tokens versus the gold answer\nExamples:\n\n >>> predictions = [{\'prediction_text\': \'1976\', \'id\': \'56e10a3be3433e1400422b22\'}]\n >>> references = [{\'answers\': {\'answer_start\': [97], \'text\': [\'1976\']}, \'id\': \'56e10a3be3433e1400422b22\'}]\n >>> squad_metric = datasets.load_metric("squad")\n >>> results = squad_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 100.0, \'f1\': 100.0}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class a ( datasets.Metric ):
def UpperCamelCase_ ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': {'id': datasets.Value('string' ), 'prediction_text': datasets.Value('string' )},
'references': {
'id': datasets.Value('string' ),
'answers': datasets.features.Sequence(
{
'text': datasets.Value('string' ),
'answer_start': datasets.Value('int32' ),
} ),
},
} ) , codebase_urls=['https://rajpurkar.github.io/SQuAD-explorer/'] , reference_urls=['https://rajpurkar.github.io/SQuAD-explorer/'] , )
def UpperCamelCase_ ( self , _lowerCamelCase , _lowerCamelCase ):
lowercase = {prediction['id']: prediction['prediction_text'] for prediction in predictions}
lowercase = [
{
'paragraphs': [
{
'qas': [
{
'answers': [{'text': answer_text} for answer_text in ref['answers']['text']],
'id': ref['id'],
}
for ref in references
]
}
]
}
]
lowercase = evaluate(dataset=_lowerCamelCase , predictions=_lowerCamelCase )
return score
| 134 |
"""simple docstring"""
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BlipaProcessor, BlipImageProcessor, GPTaTokenizer, PreTrainedTokenizerFast
@require_vision
class a ( unittest.TestCase ):
def UpperCamelCase_ ( self ):
lowercase = tempfile.mkdtemp()
lowercase = BlipImageProcessor()
lowercase = GPTaTokenizer.from_pretrained('hf-internal-testing/tiny-random-GPT2Model' )
lowercase = BlipaProcessor(_lowerCamelCase , _lowerCamelCase )
processor.save_pretrained(self.tmpdirname )
def UpperCamelCase_ ( self , **_lowerCamelCase ):
return AutoProcessor.from_pretrained(self.tmpdirname , **_lowerCamelCase ).tokenizer
def UpperCamelCase_ ( self , **_lowerCamelCase ):
return AutoProcessor.from_pretrained(self.tmpdirname , **_lowerCamelCase ).image_processor
def UpperCamelCase_ ( self ):
shutil.rmtree(self.tmpdirname )
def UpperCamelCase_ ( self ):
lowercase = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )]
lowercase = [Image.fromarray(np.moveaxis(_lowerCamelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCamelCase_ ( self ):
lowercase = BlipaProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowercase = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
lowercase = self.get_image_processor(do_normalize=_lowerCamelCase , padding_value=1.0 )
lowercase = BlipaProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=_lowerCamelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _lowerCamelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _lowerCamelCase )
def UpperCamelCase_ ( self ):
lowercase = self.get_image_processor()
lowercase = self.get_tokenizer()
lowercase = BlipaProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
lowercase = self.prepare_image_inputs()
lowercase = image_processor(_lowerCamelCase , return_tensors='np' )
lowercase = processor(images=_lowerCamelCase , return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def UpperCamelCase_ ( self ):
lowercase = self.get_image_processor()
lowercase = self.get_tokenizer()
lowercase = BlipaProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
lowercase = 'lower newer'
lowercase = processor(text=_lowerCamelCase )
lowercase = tokenizer(_lowerCamelCase , return_token_type_ids=_lowerCamelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def UpperCamelCase_ ( self ):
lowercase = self.get_image_processor()
lowercase = self.get_tokenizer()
lowercase = BlipaProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
lowercase = 'lower newer'
lowercase = self.prepare_image_inputs()
lowercase = processor(text=_lowerCamelCase , images=_lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , ['pixel_values', 'input_ids', 'attention_mask'] )
# test if it raises when no input is passed
with pytest.raises(_lowerCamelCase ):
processor()
def UpperCamelCase_ ( self ):
lowercase = self.get_image_processor()
lowercase = self.get_tokenizer()
lowercase = BlipaProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
lowercase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowercase = processor.batch_decode(_lowerCamelCase )
lowercase = tokenizer.batch_decode(_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
def UpperCamelCase_ ( self ):
lowercase = self.get_image_processor()
lowercase = self.get_tokenizer()
lowercase = BlipaProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
lowercase = 'lower newer'
lowercase = self.prepare_image_inputs()
lowercase = processor(text=_lowerCamelCase , images=_lowerCamelCase )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) , ['pixel_values', 'input_ids', 'attention_mask'] )
| 134 | 1 |
from typing import Union
import fire
import torch
from tqdm import tqdm
def _UpperCAmelCase ( UpperCAmelCase : str , UpperCAmelCase : str = "cpu" , UpperCAmelCase : Union[str, None] = None ):
"""simple docstring"""
__lowerCamelCase : List[str] = torch.load(UpperCAmelCase , map_location=UpperCAmelCase )
for k, v in tqdm(state_dict.items() ):
if not isinstance(UpperCAmelCase , torch.Tensor ):
raise TypeError("""FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin""" )
__lowerCamelCase : Any = v.half()
if save_path is None: # overwrite src_path
__lowerCamelCase : Union[str, Any] = src_path
torch.save(UpperCAmelCase , UpperCAmelCase )
if __name__ == "__main__":
fire.Fire(convert)
| 519 |
import re
from typing import Callable, List, Optional, Union
import tensorflow as tf
try:
from tensorflow.keras.optimizers.legacy import Adam
except ImportError:
from tensorflow.keras.optimizers import Adam
class _UpperCamelCase ( tf.keras.optimizers.schedules.LearningRateSchedule ):
'''simple docstring'''
def __init__( self : Optional[Any] , _lowerCamelCase : float , _lowerCamelCase : Callable , _lowerCamelCase : int , _lowerCamelCase : float = 1.0 , _lowerCamelCase : str = None , ):
'''simple docstring'''
super().__init__()
__lowerCamelCase : Dict = initial_learning_rate
__lowerCamelCase : Any = warmup_steps
__lowerCamelCase : Optional[int] = power
__lowerCamelCase : str = decay_schedule_fn
__lowerCamelCase : Union[str, Any] = name
def __call__( self : List[str] , _lowerCamelCase : int ):
'''simple docstring'''
with tf.name_scope(self.name or """WarmUp""" ) as name:
# Implements polynomial warmup. i.e., if global_step < warmup_steps, the
# learning rate will be `global_step/num_warmup_steps * init_lr`.
__lowerCamelCase : Dict = tf.cast(_lowerCamelCase , tf.floataa )
__lowerCamelCase : Optional[Any] = tf.cast(self.warmup_steps , tf.floataa )
__lowerCamelCase : List[str] = global_step_float / warmup_steps_float
__lowerCamelCase : List[str] = self.initial_learning_rate * tf.math.pow(_lowerCamelCase , self.power )
return tf.cond(
global_step_float < warmup_steps_float , lambda: warmup_learning_rate , lambda: self.decay_schedule_fn(step - self.warmup_steps ) , name=_lowerCamelCase , )
def _snake_case ( self : Any ):
'''simple docstring'''
return {
"initial_learning_rate": self.initial_learning_rate,
"decay_schedule_fn": self.decay_schedule_fn,
"warmup_steps": self.warmup_steps,
"power": self.power,
"name": self.name,
}
def _UpperCAmelCase ( UpperCAmelCase : float , UpperCAmelCase : int , UpperCAmelCase : int , UpperCAmelCase : float = 0.0 , UpperCAmelCase : float = 0.9 , UpperCAmelCase : float = 0.9_9_9 , UpperCAmelCase : float = 1e-8 , UpperCAmelCase : Optional[float] = None , UpperCAmelCase : Optional[float] = None , UpperCAmelCase : float = 0.0 , UpperCAmelCase : float = 1.0 , UpperCAmelCase : Optional[List[str]] = None , ):
"""simple docstring"""
__lowerCamelCase : str = tf.keras.optimizers.schedules.PolynomialDecay(
initial_learning_rate=UpperCAmelCase , decay_steps=num_train_steps - num_warmup_steps , end_learning_rate=init_lr * min_lr_ratio , power=UpperCAmelCase , )
if num_warmup_steps:
__lowerCamelCase : str = WarmUp(
initial_learning_rate=UpperCAmelCase , decay_schedule_fn=UpperCAmelCase , warmup_steps=UpperCAmelCase , )
if weight_decay_rate > 0.0:
__lowerCamelCase : List[Any] = AdamWeightDecay(
learning_rate=UpperCAmelCase , weight_decay_rate=UpperCAmelCase , beta_a=UpperCAmelCase , beta_a=UpperCAmelCase , epsilon=UpperCAmelCase , clipnorm=UpperCAmelCase , global_clipnorm=UpperCAmelCase , exclude_from_weight_decay=["""LayerNorm""", """layer_norm""", """bias"""] , include_in_weight_decay=UpperCAmelCase , )
else:
__lowerCamelCase : Optional[int] = tf.keras.optimizers.Adam(
learning_rate=UpperCAmelCase , beta_a=UpperCAmelCase , beta_a=UpperCAmelCase , epsilon=UpperCAmelCase , clipnorm=UpperCAmelCase , global_clipnorm=UpperCAmelCase , )
# We return the optimizer and the LR scheduler in order to better track the
# evolution of the LR independently of the optimizer.
return optimizer, lr_schedule
class _UpperCamelCase ( A ):
'''simple docstring'''
def __init__( self : Tuple , _lowerCamelCase : Union[float, tf.keras.optimizers.schedules.LearningRateSchedule] = 0.001 , _lowerCamelCase : float = 0.9 , _lowerCamelCase : float = 0.999 , _lowerCamelCase : float = 1E-7 , _lowerCamelCase : bool = False , _lowerCamelCase : float = 0.0 , _lowerCamelCase : Optional[List[str]] = None , _lowerCamelCase : Optional[List[str]] = None , _lowerCamelCase : str = "AdamWeightDecay" , **_lowerCamelCase : Union[str, Any] , ):
'''simple docstring'''
super().__init__(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , **_lowerCamelCase )
__lowerCamelCase : Union[str, Any] = weight_decay_rate
__lowerCamelCase : Tuple = include_in_weight_decay
__lowerCamelCase : Optional[Any] = exclude_from_weight_decay
@classmethod
def _snake_case ( cls : Union[str, Any] , _lowerCamelCase : List[Any] ):
'''simple docstring'''
__lowerCamelCase : Optional[Any] = {"""WarmUp""": WarmUp}
return super(_lowerCamelCase , cls ).from_config(_lowerCamelCase , custom_objects=_lowerCamelCase )
def _snake_case ( self : str , _lowerCamelCase : Tuple , _lowerCamelCase : int , _lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
super(_lowerCamelCase , self )._prepare_local(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
__lowerCamelCase : Optional[Any] = tf.constant(
self.weight_decay_rate , name="""adam_weight_decay_rate""" )
def _snake_case ( self : Union[str, Any] , _lowerCamelCase : List[Any] , _lowerCamelCase : Tuple , _lowerCamelCase : Tuple ):
'''simple docstring'''
__lowerCamelCase : Optional[Any] = self._do_use_weight_decay(var.name )
if do_decay:
return var.assign_sub(
learning_rate * var * apply_state[(var.device, var.dtype.base_dtype)]["""weight_decay_rate"""] , use_locking=self._use_locking , )
return tf.no_op()
def _snake_case ( self : Tuple , _lowerCamelCase : Tuple , _lowerCamelCase : str=None , **_lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
__lowerCamelCase , __lowerCamelCase : Union[str, Any] = list(zip(*_lowerCamelCase ) )
return super(_lowerCamelCase , self ).apply_gradients(zip(_lowerCamelCase , _lowerCamelCase ) , name=_lowerCamelCase , **_lowerCamelCase )
def _snake_case ( self : int , _lowerCamelCase : Optional[int] , _lowerCamelCase : Tuple , _lowerCamelCase : Optional[Any] ):
'''simple docstring'''
if apply_state is None:
return self._decayed_lr_t[var_dtype], {}
__lowerCamelCase : Union[str, Any] = apply_state or {}
__lowerCamelCase : Union[str, Any] = apply_state.get((var_device, var_dtype) )
if coefficients is None:
__lowerCamelCase : List[str] = self._fallback_apply_state(_lowerCamelCase , _lowerCamelCase )
__lowerCamelCase : Dict = coefficients
return coefficients["lr_t"], {"apply_state": apply_state}
def _snake_case ( self : int , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Any , _lowerCamelCase : Dict=None ):
'''simple docstring'''
__lowerCamelCase , __lowerCamelCase : Optional[Any] = self._get_lr(var.device , var.dtype.base_dtype , _lowerCamelCase )
__lowerCamelCase : Dict = self._decay_weights_op(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
with tf.control_dependencies([decay] ):
return super(_lowerCamelCase , self )._resource_apply_dense(_lowerCamelCase , _lowerCamelCase , **_lowerCamelCase )
def _snake_case ( self : Union[str, Any] , _lowerCamelCase : Dict , _lowerCamelCase : List[str] , _lowerCamelCase : Tuple , _lowerCamelCase : Tuple=None ):
'''simple docstring'''
__lowerCamelCase , __lowerCamelCase : int = self._get_lr(var.device , var.dtype.base_dtype , _lowerCamelCase )
__lowerCamelCase : str = self._decay_weights_op(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
with tf.control_dependencies([decay] ):
return super(_lowerCamelCase , self )._resource_apply_sparse(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , **_lowerCamelCase )
def _snake_case ( self : int ):
'''simple docstring'''
__lowerCamelCase : int = super().get_config()
config.update({"""weight_decay_rate""": self.weight_decay_rate} )
return config
def _snake_case ( self : Dict , _lowerCamelCase : List[str] ):
'''simple docstring'''
if self.weight_decay_rate == 0:
return False
if self._include_in_weight_decay:
for r in self._include_in_weight_decay:
if re.search(_lowerCamelCase , _lowerCamelCase ) is not None:
return True
if self._exclude_from_weight_decay:
for r in self._exclude_from_weight_decay:
if re.search(_lowerCamelCase , _lowerCamelCase ) is not None:
return False
return True
class _UpperCamelCase ( A ):
'''simple docstring'''
def __init__( self : Union[str, Any] ):
'''simple docstring'''
__lowerCamelCase : Optional[int] = []
__lowerCamelCase : int = None
@property
def _snake_case ( self : Tuple ):
'''simple docstring'''
if self._accum_steps is None:
__lowerCamelCase : Any = tf.Variable(
tf.constant(0 , dtype=tf.intaa ) , trainable=_lowerCamelCase , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
return self._accum_steps.value()
@property
def _snake_case ( self : Any ):
'''simple docstring'''
if not self._gradients:
raise ValueError("""The accumulator should be called first to initialize the gradients""" )
return [gradient.value() if gradient is not None else gradient for gradient in self._gradients]
def __call__( self : List[str] , _lowerCamelCase : List[str] ):
'''simple docstring'''
if not self._gradients:
__lowerCamelCase : List[str] = self.step # Create the step variable.
self._gradients.extend(
[
tf.Variable(
tf.zeros_like(_lowerCamelCase ) , trainable=_lowerCamelCase , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
if gradient is not None
else gradient
for gradient in gradients
] )
if len(_lowerCamelCase ) != len(self._gradients ):
raise ValueError(F"""Expected {len(self._gradients )} gradients, but got {len(_lowerCamelCase )}""" )
for accum_gradient, gradient in zip(self._gradients , _lowerCamelCase ):
if accum_gradient is not None and gradient is not None:
accum_gradient.assign_add(_lowerCamelCase )
self._accum_steps.assign_add(1 )
def _snake_case ( self : Any ):
'''simple docstring'''
if not self._gradients:
return
self._accum_steps.assign(0 )
for gradient in self._gradients:
if gradient is not None:
gradient.assign(tf.zeros_like(_lowerCamelCase ) )
| 519 | 1 |
import heapq as hq
import math
from collections.abc import Iterator
class lowerCamelCase :
'''simple docstring'''
def __init__( self , lowerCAmelCase ):
UpperCAmelCase_ = str(id_ )
UpperCAmelCase_ = None
UpperCAmelCase_ = None
UpperCAmelCase_ = []
UpperCAmelCase_ = {} # {vertex:distance}
def __lt__( self , lowerCAmelCase ):
return self.key < other.key
def __repr__( self ):
return self.id
def A__ ( self , lowerCAmelCase ):
self.neighbors.append(lowerCAmelCase )
def A__ ( self , lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase_ = weight
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Optional[int]:
# add the neighbors:
graph[a - 1].add_neighbor(graph[b - 1] )
graph[b - 1].add_neighbor(graph[a - 1] )
# add the edges:
graph[a - 1].add_edge(graph[b - 1] , __SCREAMING_SNAKE_CASE )
graph[b - 1].add_edge(graph[a - 1] , __SCREAMING_SNAKE_CASE )
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> list:
UpperCAmelCase_ = []
for u in graph:
UpperCAmelCase_ = math.inf
UpperCAmelCase_ = None
UpperCAmelCase_ = 0
UpperCAmelCase_ = graph[:]
while q:
UpperCAmelCase_ = min(__SCREAMING_SNAKE_CASE )
q.remove(__SCREAMING_SNAKE_CASE )
for v in u.neighbors:
if (v in q) and (u.edges[v.id] < v.key):
UpperCAmelCase_ = u
UpperCAmelCase_ = u.edges[v.id]
for i in range(1 , len(__SCREAMING_SNAKE_CASE ) ):
a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) )
return a
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Iterator[tuple]:
for u in graph:
UpperCAmelCase_ = math.inf
UpperCAmelCase_ = None
UpperCAmelCase_ = 0
UpperCAmelCase_ = list(__SCREAMING_SNAKE_CASE )
hq.heapify(__SCREAMING_SNAKE_CASE )
while h:
UpperCAmelCase_ = hq.heappop(__SCREAMING_SNAKE_CASE )
for v in u.neighbors:
if (v in h) and (u.edges[v.id] < v.key):
UpperCAmelCase_ = u
UpperCAmelCase_ = u.edges[v.id]
hq.heapify(__SCREAMING_SNAKE_CASE )
for i in range(1 , len(__SCREAMING_SNAKE_CASE ) ):
yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1)
def snake_case__ ( ) -> None:
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 23 |
def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> int:
UpperCAmelCase_ = 1
for i in range(1 , num + 1 ):
fact *= i
return fact
def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> int:
UpperCAmelCase_ = 0
while number > 0:
UpperCAmelCase_ = number % 10
sum_of_digits += last_digit
UpperCAmelCase_ = number // 10 # Removing the last_digit from the given number
return sum_of_digits
def snake_case__ ( __SCREAMING_SNAKE_CASE = 100 ) -> int:
UpperCAmelCase_ = factorial(__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ = split_and_add(__SCREAMING_SNAKE_CASE )
return result
if __name__ == "__main__":
print(solution(int(input("Enter the Number: ").strip())))
| 23 | 1 |
import itertools
import random
import unittest
import numpy as np
from transformers import is_speech_available
from transformers.testing_utils import require_torch, require_torchaudio
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import SpeechaTextFeatureExtractor
A__: Union[str, Any] = random.Random()
def lowerCAmelCase_ ( A_ ,A_=1.0 ,A_=None ,A_=None):
if rng is None:
UpperCamelCase__: Any = global_rng
UpperCamelCase__: int = []
for batch_idx in range(shape[0]):
values.append([])
for _ in range(shape[1]):
values[-1].append(rng.random() * scale)
return values
@require_torch
@require_torchaudio
class _a ( unittest.TestCase):
"""simple docstring"""
def __init__( self: str , __lowerCamelCase: Dict , __lowerCamelCase: Optional[int]=7 , __lowerCamelCase: int=400 , __lowerCamelCase: Optional[Any]=2000 , __lowerCamelCase: Tuple=24 , __lowerCamelCase: Dict=24 , __lowerCamelCase: Union[str, Any]=0.0 , __lowerCamelCase: Optional[int]=1_6000 , __lowerCamelCase: int=True , __lowerCamelCase: List[str]=True , ):
'''simple docstring'''
UpperCamelCase__: Any = parent
UpperCamelCase__: Optional[Any] = batch_size
UpperCamelCase__: List[Any] = min_seq_length
UpperCamelCase__: int = max_seq_length
UpperCamelCase__: Union[str, Any] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
UpperCamelCase__: Dict = feature_size
UpperCamelCase__: Optional[Any] = num_mel_bins
UpperCamelCase__: List[Any] = padding_value
UpperCamelCase__: List[Any] = sampling_rate
UpperCamelCase__: Dict = return_attention_mask
UpperCamelCase__: List[str] = do_normalize
def UpperCAmelCase_ ( self: List[Any] ):
'''simple docstring'''
return {
"feature_size": self.feature_size,
"num_mel_bins": self.num_mel_bins,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def UpperCAmelCase_ ( self: Dict , __lowerCamelCase: Optional[int]=False , __lowerCamelCase: Any=False ):
'''simple docstring'''
def _flatten(__lowerCamelCase: Union[str, Any] ):
return list(itertools.chain(*__lowerCamelCase ) )
if equal_length:
UpperCamelCase__: Tuple = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
UpperCamelCase__: List[str] = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
UpperCamelCase__: List[Any] = [np.asarray(__lowerCamelCase ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class _a ( UpperCamelCase__ , unittest.TestCase):
"""simple docstring"""
UpperCamelCase__ = SpeechaTextFeatureExtractor if is_speech_available() else None
def UpperCAmelCase_ ( self: Dict ):
'''simple docstring'''
UpperCamelCase__: int = SpeechaTextFeatureExtractionTester(self )
def UpperCAmelCase_ ( self: Tuple , __lowerCamelCase: Optional[Any] ):
'''simple docstring'''
self.assertTrue(np.all(np.mean(__lowerCamelCase , axis=0 ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(__lowerCamelCase , axis=0 ) - 1 ) < 1e-3 ) )
def UpperCAmelCase_ ( self: Optional[Any] ):
'''simple docstring'''
UpperCamelCase__: Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
UpperCamelCase__: Any = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
UpperCamelCase__: Union[str, Any] = [np.asarray(__lowerCamelCase ) for speech_input in speech_inputs]
# Test feature size
UpperCamelCase__: Optional[Any] = feature_extractor(__lowerCamelCase , padding=__lowerCamelCase , return_tensors="np" ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.feature_size )
# Test not batched input
UpperCamelCase__: Dict = feature_extractor(speech_inputs[0] , return_tensors="np" ).input_features
UpperCamelCase__: Any = feature_extractor(np_speech_inputs[0] , return_tensors="np" ).input_features
self.assertTrue(np.allclose(__lowerCamelCase , __lowerCamelCase , atol=1e-3 ) )
# Test batched
UpperCamelCase__: Dict = feature_extractor(__lowerCamelCase , return_tensors="np" ).input_features
UpperCamelCase__: List[Any] = feature_extractor(__lowerCamelCase , return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(__lowerCamelCase , __lowerCamelCase ):
self.assertTrue(np.allclose(__lowerCamelCase , __lowerCamelCase , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
UpperCamelCase__: Optional[int] = [floats_list((1, x) )[0] for x in (800, 800, 800)]
UpperCamelCase__: List[Any] = np.asarray(__lowerCamelCase )
UpperCamelCase__: str = feature_extractor(__lowerCamelCase , return_tensors="np" ).input_features
UpperCamelCase__: Dict = feature_extractor(__lowerCamelCase , return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(__lowerCamelCase , __lowerCamelCase ):
self.assertTrue(np.allclose(__lowerCamelCase , __lowerCamelCase , atol=1e-3 ) )
def UpperCAmelCase_ ( self: Tuple ):
'''simple docstring'''
UpperCamelCase__: Optional[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase__: List[Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
UpperCamelCase__: str = ["longest", "max_length", "do_not_pad"]
UpperCamelCase__: List[str] = [None, 16, None]
for max_length, padding in zip(__lowerCamelCase , __lowerCamelCase ):
UpperCamelCase__: Optional[int] = feature_extractor(
__lowerCamelCase , padding=__lowerCamelCase , max_length=__lowerCamelCase , return_attention_mask=__lowerCamelCase )
UpperCamelCase__: Optional[int] = inputs.input_features
UpperCamelCase__: int = inputs.attention_mask
UpperCamelCase__: Optional[Any] = [np.sum(__lowerCamelCase ) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] )
def UpperCAmelCase_ ( self: str ):
'''simple docstring'''
UpperCamelCase__: List[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase__: List[str] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
UpperCamelCase__: Dict = ["longest", "max_length", "do_not_pad"]
UpperCamelCase__: Tuple = [None, 16, None]
for max_length, padding in zip(__lowerCamelCase , __lowerCamelCase ):
UpperCamelCase__: List[str] = feature_extractor(
__lowerCamelCase , max_length=__lowerCamelCase , padding=__lowerCamelCase , return_tensors="np" , return_attention_mask=__lowerCamelCase )
UpperCamelCase__: Union[str, Any] = inputs.input_features
UpperCamelCase__: Any = inputs.attention_mask
UpperCamelCase__: str = [np.sum(__lowerCamelCase ) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] )
self.assertTrue(input_features[0][fbank_feat_lengths[0] :].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] )
self.assertTrue(input_features[0][fbank_feat_lengths[1] :].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] )
def UpperCAmelCase_ ( self: str ):
'''simple docstring'''
UpperCamelCase__: List[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase__: int = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
UpperCamelCase__: str = feature_extractor(
__lowerCamelCase , padding="max_length" , max_length=4 , truncation=__lowerCamelCase , return_tensors="np" , return_attention_mask=__lowerCamelCase , )
UpperCamelCase__: str = inputs.input_features
UpperCamelCase__: str = inputs.attention_mask
UpperCamelCase__: Dict = np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1] )
self._check_zero_mean_unit_variance(input_features[2] )
def UpperCAmelCase_ ( self: Dict ):
'''simple docstring'''
UpperCamelCase__: int = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase__: int = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
UpperCamelCase__: Dict = feature_extractor(
__lowerCamelCase , padding="longest" , max_length=4 , truncation=__lowerCamelCase , return_tensors="np" , return_attention_mask=__lowerCamelCase , )
UpperCamelCase__: Tuple = inputs.input_features
UpperCamelCase__: Any = inputs.attention_mask
UpperCamelCase__: str = np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape , (3, 4, 24) )
UpperCamelCase__: List[Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
UpperCamelCase__: Any = feature_extractor(
__lowerCamelCase , padding="longest" , max_length=16 , truncation=__lowerCamelCase , return_tensors="np" , return_attention_mask=__lowerCamelCase , )
UpperCamelCase__: Dict = inputs.input_features
UpperCamelCase__: Tuple = inputs.attention_mask
UpperCamelCase__: Dict = np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape , (3, 6, 24) )
def UpperCAmelCase_ ( self: List[Any] ):
'''simple docstring'''
import torch
UpperCamelCase__: Optional[int] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase__: int = np.random.rand(100 , 32 ).astype(np.floataa )
UpperCamelCase__: Optional[int] = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
UpperCamelCase__: Optional[int] = feature_extractor.pad([{"input_features": inputs}] , return_tensors="np" )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
UpperCamelCase__: Optional[Any] = feature_extractor.pad([{"input_features": inputs}] , return_tensors="pt" )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def UpperCAmelCase_ ( self: Optional[Any] , __lowerCamelCase: Dict ):
'''simple docstring'''
from datasets import load_dataset
UpperCamelCase__: Any = load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" )
# automatic decoding with librispeech
UpperCamelCase__: str = ds.sort("id" ).select(range(__lowerCamelCase ) )[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
def UpperCAmelCase_ ( self: Optional[Any] ):
'''simple docstring'''
UpperCamelCase__: Any = np.array([
-1.5_745, -1.7_713, -1.7_020, -1.6_069, -1.2_250, -1.1_105, -0.9_072, -0.8_241,
-1.2_310, -0.8_098, -0.3_320, -0.4_101, -0.7_985, -0.4_996, -0.8_213, -0.9_128,
-1.0_420, -1.1_286, -1.0_440, -0.7_999, -0.8_405, -1.2_275, -1.5_443, -1.4_625,
] )
# fmt: on
UpperCamelCase__: Tuple = self._load_datasamples(1 )
UpperCamelCase__: Dict = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase__: Optional[Any] = feature_extractor(__lowerCamelCase , return_tensors="pt" ).input_features
self.assertEquals(input_features.shape , (1, 584, 24) )
self.assertTrue(np.allclose(input_features[0, 0, :30] , __lowerCamelCase , atol=1e-4 ) )
| 380 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
A__: str = logging.get_logger(__name__)
A__: Union[str, Any] = {
'''SenseTime/deformable-detr''': '''https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json''',
# See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr
}
class _a ( UpperCamelCase__):
"""simple docstring"""
UpperCamelCase__ = """deformable_detr"""
UpperCamelCase__ = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self: Dict , __lowerCamelCase: Optional[Any]=True , __lowerCamelCase: List[str]=None , __lowerCamelCase: Tuple=3 , __lowerCamelCase: Union[str, Any]=300 , __lowerCamelCase: Optional[Any]=1024 , __lowerCamelCase: Optional[int]=6 , __lowerCamelCase: Optional[int]=1024 , __lowerCamelCase: List[str]=8 , __lowerCamelCase: Any=6 , __lowerCamelCase: Tuple=1024 , __lowerCamelCase: List[str]=8 , __lowerCamelCase: Optional[int]=0.0 , __lowerCamelCase: Union[str, Any]=True , __lowerCamelCase: List[str]="relu" , __lowerCamelCase: Tuple=256 , __lowerCamelCase: Dict=0.1 , __lowerCamelCase: Optional[Any]=0.0 , __lowerCamelCase: Optional[int]=0.0 , __lowerCamelCase: Any=0.02 , __lowerCamelCase: int=1.0 , __lowerCamelCase: Optional[Any]=True , __lowerCamelCase: str=False , __lowerCamelCase: Any="sine" , __lowerCamelCase: Optional[Any]="resnet50" , __lowerCamelCase: Optional[int]=True , __lowerCamelCase: int=False , __lowerCamelCase: int=4 , __lowerCamelCase: int=4 , __lowerCamelCase: Any=4 , __lowerCamelCase: Any=False , __lowerCamelCase: List[str]=300 , __lowerCamelCase: Dict=False , __lowerCamelCase: str=1 , __lowerCamelCase: int=5 , __lowerCamelCase: str=2 , __lowerCamelCase: Dict=1 , __lowerCamelCase: Tuple=1 , __lowerCamelCase: Union[str, Any]=5 , __lowerCamelCase: Optional[Any]=2 , __lowerCamelCase: List[str]=0.1 , __lowerCamelCase: Dict=0.25 , __lowerCamelCase: Dict=False , **__lowerCamelCase: str , ):
'''simple docstring'''
if backbone_config is not None and use_timm_backbone:
raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`." )
if not use_timm_backbone:
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
UpperCamelCase__: Union[str, Any] = CONFIG_MAPPING["resnet"](out_features=["stage4"] )
elif isinstance(__lowerCamelCase , __lowerCamelCase ):
UpperCamelCase__: Tuple = backbone_config.get("model_type" )
UpperCamelCase__: Optional[int] = CONFIG_MAPPING[backbone_model_type]
UpperCamelCase__: Tuple = config_class.from_dict(__lowerCamelCase )
UpperCamelCase__: Dict = use_timm_backbone
UpperCamelCase__: Any = backbone_config
UpperCamelCase__: Optional[int] = num_channels
UpperCamelCase__: int = num_queries
UpperCamelCase__: List[str] = max_position_embeddings
UpperCamelCase__: Dict = d_model
UpperCamelCase__: List[Any] = encoder_ffn_dim
UpperCamelCase__: Union[str, Any] = encoder_layers
UpperCamelCase__: Tuple = encoder_attention_heads
UpperCamelCase__: Tuple = decoder_ffn_dim
UpperCamelCase__: Optional[int] = decoder_layers
UpperCamelCase__: int = decoder_attention_heads
UpperCamelCase__: Optional[Any] = dropout
UpperCamelCase__: List[str] = attention_dropout
UpperCamelCase__: List[Any] = activation_dropout
UpperCamelCase__: List[Any] = activation_function
UpperCamelCase__: Union[str, Any] = init_std
UpperCamelCase__: List[str] = init_xavier_std
UpperCamelCase__: Optional[Any] = encoder_layerdrop
UpperCamelCase__: List[str] = auxiliary_loss
UpperCamelCase__: Optional[int] = position_embedding_type
UpperCamelCase__: Optional[Any] = backbone
UpperCamelCase__: Any = use_pretrained_backbone
UpperCamelCase__: Union[str, Any] = dilation
# deformable attributes
UpperCamelCase__: Union[str, Any] = num_feature_levels
UpperCamelCase__: Optional[Any] = encoder_n_points
UpperCamelCase__: Tuple = decoder_n_points
UpperCamelCase__: Any = two_stage
UpperCamelCase__: Optional[int] = two_stage_num_proposals
UpperCamelCase__: str = with_box_refine
if two_stage is True and with_box_refine is False:
raise ValueError("If two_stage is True, with_box_refine must be True." )
# Hungarian matcher
UpperCamelCase__: Any = class_cost
UpperCamelCase__: str = bbox_cost
UpperCamelCase__: List[str] = giou_cost
# Loss coefficients
UpperCamelCase__: Optional[Any] = mask_loss_coefficient
UpperCamelCase__: List[Any] = dice_loss_coefficient
UpperCamelCase__: Optional[Any] = bbox_loss_coefficient
UpperCamelCase__: List[Any] = giou_loss_coefficient
UpperCamelCase__: Dict = eos_coefficient
UpperCamelCase__: List[Any] = focal_alpha
UpperCamelCase__: Dict = disable_custom_kernels
super().__init__(is_encoder_decoder=__lowerCamelCase , **__lowerCamelCase )
@property
def UpperCAmelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
return self.encoder_attention_heads
@property
def UpperCAmelCase_ ( self: Any ):
'''simple docstring'''
return self.d_model
def UpperCAmelCase_ ( self: int ):
'''simple docstring'''
UpperCamelCase__: Optional[int] = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
UpperCamelCase__: Tuple = self.backbone_config.to_dict()
UpperCamelCase__: Optional[int] = self.__class__.model_type
return output
| 380 | 1 |
'''simple docstring'''
from __future__ import annotations
from math import pi, sqrt
def lowerCAmelCase ( UpperCamelCase__ : float , UpperCamelCase__ : float ):
"""simple docstring"""
if inductance <= 0:
raise ValueError('''Inductance cannot be 0 or negative''' )
elif capacitance <= 0:
raise ValueError('''Capacitance cannot be 0 or negative''' )
else:
return (
"Resonant frequency",
float(1 / (2 * pi * (sqrt(inductance * capacitance ))) ),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 654 | '''simple docstring'''
import argparse
import os
import re
import packaging.version
__lowerCAmelCase : Optional[int] = "examples/"
__lowerCAmelCase : Dict = {
"examples": (re.compile(r"^check_min_version\(\"[^\"]+\"\)\s*$", re.MULTILINE), "check_min_version(\"VERSION\")\n"),
"init": (re.compile(r"^__version__\s+=\s+\"([^\"]+)\"\s*$", re.MULTILINE), "__version__ = \"VERSION\"\n"),
"setup": (re.compile(r"^(\s*)version\s*=\s*\"[^\"]+\",", re.MULTILINE), r"\1version=\"VERSION\","),
"doc": (re.compile(r"^(\s*)release\s*=\s*\"[^\"]+\"$", re.MULTILINE), "release = \"VERSION\"\n"),
}
__lowerCAmelCase : List[str] = {
"init": "src/transformers/__init__.py",
"setup": "setup.py",
}
__lowerCAmelCase : int = "README.md"
def lowerCAmelCase ( UpperCamelCase__ : List[str] , UpperCamelCase__ : int , UpperCamelCase__ : Tuple ):
"""simple docstring"""
with open(UpperCamelCase__ , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
__UpperCAmelCase = f.read()
__UpperCAmelCase , __UpperCAmelCase = REPLACE_PATTERNS[pattern]
__UpperCAmelCase = replace.replace('''VERSION''' , UpperCamelCase__ )
__UpperCAmelCase = re_pattern.sub(UpperCamelCase__ , UpperCamelCase__ )
with open(UpperCamelCase__ , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.write(UpperCamelCase__ )
def lowerCAmelCase ( UpperCamelCase__ : Optional[int] ):
"""simple docstring"""
for folder, directories, fnames in os.walk(UpperCamelCase__ ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove('''research_projects''' )
if "legacy" in directories:
directories.remove('''legacy''' )
for fname in fnames:
if fname.endswith('''.py''' ):
update_version_in_file(os.path.join(UpperCamelCase__ , UpperCamelCase__ ) , UpperCamelCase__ , pattern='''examples''' )
def lowerCAmelCase ( UpperCamelCase__ : Any , UpperCamelCase__ : Any=False ):
"""simple docstring"""
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
if not patch:
update_version_in_examples(UpperCamelCase__ )
def lowerCAmelCase ( ):
"""simple docstring"""
__UpperCAmelCase = '''🤗 Transformers currently provides the following architectures'''
__UpperCAmelCase = '''1. Want to contribute a new model?'''
with open(UpperCamelCase__ , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
__UpperCAmelCase = f.readlines()
# Find the start of the list.
__UpperCAmelCase = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
__UpperCAmelCase = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith('''1.''' ):
__UpperCAmelCase = lines[index].replace(
'''https://huggingface.co/docs/transformers/main/model_doc''' , '''https://huggingface.co/docs/transformers/model_doc''' , )
index += 1
with open(UpperCamelCase__ , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.writelines(UpperCamelCase__ )
def lowerCAmelCase ( ):
"""simple docstring"""
with open(REPLACE_FILES['''init'''] , '''r''' ) as f:
__UpperCAmelCase = f.read()
__UpperCAmelCase = REPLACE_PATTERNS['''init'''][0].search(UpperCamelCase__ ).groups()[0]
return packaging.version.parse(UpperCamelCase__ )
def lowerCAmelCase ( UpperCamelCase__ : Any=False ):
"""simple docstring"""
__UpperCAmelCase = get_version()
if patch and default_version.is_devrelease:
raise ValueError('''Can\'t create a patch version from the dev branch, checkout a released version!''' )
if default_version.is_devrelease:
__UpperCAmelCase = default_version.base_version
elif patch:
__UpperCAmelCase = f"""{default_version.major}.{default_version.minor}.{default_version.micro + 1}"""
else:
__UpperCAmelCase = f"""{default_version.major}.{default_version.minor + 1}.0"""
# Now let's ask nicely if that's the right one.
__UpperCAmelCase = input(f"""Which version are you releasing? [{default_version}]""" )
if len(UpperCamelCase__ ) == 0:
__UpperCAmelCase = default_version
print(f"""Updating version to {version}.""" )
global_version_update(UpperCamelCase__ , patch=UpperCamelCase__ )
if not patch:
print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' )
clean_main_ref_in_model_list()
def lowerCAmelCase ( ):
"""simple docstring"""
__UpperCAmelCase = get_version()
__UpperCAmelCase = f"""{current_version.major}.{current_version.minor + 1}.0.dev0"""
__UpperCAmelCase = current_version.base_version
# Check with the user we got that right.
__UpperCAmelCase = input(f"""Which version are we developing now? [{dev_version}]""" )
if len(UpperCamelCase__ ) == 0:
__UpperCAmelCase = dev_version
print(f"""Updating version to {version}.""" )
global_version_update(UpperCamelCase__ )
print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' )
clean_main_ref_in_model_list()
if __name__ == "__main__":
__lowerCAmelCase : Tuple = argparse.ArgumentParser()
parser.add_argument("--post_release", action="store_true", help="Whether this is pre or post release.")
parser.add_argument("--patch", action="store_true", help="Whether or not this is a patch release.")
__lowerCAmelCase : Tuple = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print("Nothing to do after a patch :-)")
else:
post_release_work()
| 654 | 1 |
"""simple docstring"""
import doctest
import glob
import importlib
import inspect
import os
import re
from contextlib import contextmanager
from functools import wraps
from unittest.mock import patch
import numpy as np
import pytest
from absl.testing import parameterized
import datasets
from datasets import load_metric
from .utils import for_all_test_methods, local, slow
# mark all tests as integration
a : Dict = pytest.mark.integration
a : Optional[int] = {"""comet"""}
a : List[str] = importlib.util.find_spec("""fairseq""") is not None
a : Optional[Any] = {"""code_eval"""}
a : Dict = os.name == """nt"""
a : Dict = {"""bertscore""", """frugalscore""", """perplexity"""}
a : Any = importlib.util.find_spec("""transformers""") is not None
def lowercase__(A ) ->int:
"""simple docstring"""
@wraps(A )
def wrapper(self , A ):
if not _has_fairseq and metric_name in REQUIRE_FAIRSEQ:
self.skipTest("\"test requires Fairseq\"" )
else:
test_case(self , A )
return wrapper
def lowercase__(A ) ->Any:
"""simple docstring"""
@wraps(A )
def wrapper(self , A ):
if not _has_transformers and metric_name in REQUIRE_TRANSFORMERS:
self.skipTest("\"test requires transformers\"" )
else:
test_case(self , A )
return wrapper
def lowercase__(A ) ->int:
"""simple docstring"""
@wraps(A )
def wrapper(self , A ):
if _on_windows and metric_name in UNSUPPORTED_ON_WINDOWS:
self.skipTest("\"test not supported on Windows\"" )
else:
test_case(self , A )
return wrapper
def lowercase__() ->Dict:
"""simple docstring"""
lowercase__ : int= [metric_dir.split(os.sep )[-2] for metric_dir in glob.glob("./metrics/*/" )]
return [{"testcase_name": x, "metric_name": x} for x in metrics if x != "gleu"] # gleu is unfinished
@parameterized.named_parameters(get_local_metric_names() )
@for_all_test_methods(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@local
class __UpperCAmelCase( parameterized.TestCase ):
"""simple docstring"""
__lowerCamelCase = {}
__lowerCamelCase = None
@pytest.mark.filterwarnings("ignore:metric_module_factory is deprecated:FutureWarning" )
@pytest.mark.filterwarnings("ignore:load_metric is deprecated:FutureWarning" )
def UpperCAmelCase_ ( self , snake_case__ ):
'''simple docstring'''
lowercase__ : Dict= "[...]"
lowercase__ : Tuple= importlib.import_module(
datasets.load.metric_module_factory(os.path.join("metrics" , snake_case__ ) ).module_path )
lowercase__ : Optional[Any]= datasets.load.import_main_class(metric_module.__name__ , dataset=snake_case__ )
# check parameters
lowercase__ : List[Any]= inspect.signature(metric._compute ).parameters
self.assertTrue(all(p.kind != p.VAR_KEYWORD for p in parameters.values() ) ) # no **kwargs
# run doctest
with self.patch_intensive_calls(snake_case__ , metric_module.__name__ ):
with self.use_local_metrics():
try:
lowercase__ : Union[str, Any]= doctest.testmod(snake_case__ , verbose=snake_case__ , raise_on_error=snake_case__ )
except doctest.UnexpectedException as e:
raise e.exc_info[1] # raise the exception that doctest caught
self.assertEqual(results.failed , 0 )
self.assertGreater(results.attempted , 1 )
@slow
def UpperCAmelCase_ ( self , snake_case__ ):
'''simple docstring'''
lowercase__ : Optional[int]= "[...]"
lowercase__ : Dict= importlib.import_module(
datasets.load.metric_module_factory(os.path.join("metrics" , snake_case__ ) ).module_path )
# run doctest
with self.use_local_metrics():
lowercase__ : Optional[Any]= doctest.testmod(snake_case__ , verbose=snake_case__ , raise_on_error=snake_case__ )
self.assertEqual(results.failed , 0 )
self.assertGreater(results.attempted , 1 )
@contextmanager
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ ):
'''simple docstring'''
if metric_name in self.INTENSIVE_CALLS_PATCHER:
with self.INTENSIVE_CALLS_PATCHER[metric_name](snake_case__ ):
yield
else:
yield
@contextmanager
def UpperCAmelCase_ ( self ):
'''simple docstring'''
def load_local_metric(snake_case__ , *snake_case__ , **snake_case__ ):
return load_metric(os.path.join("metrics" , snake_case__ ) , *snake_case__ , **snake_case__ )
with patch("datasets.load_metric" ) as mock_load_metric:
lowercase__ : Tuple= load_local_metric
yield
@classmethod
def UpperCAmelCase_ ( cls , snake_case__ ):
'''simple docstring'''
def wrapper(snake_case__ ):
lowercase__ : Union[str, Any]= contextmanager(snake_case__ )
lowercase__ : Union[str, Any]= patcher
return patcher
return wrapper
@LocalMetricTest.register_intensive_calls_patcher("bleurt" )
def lowercase__(A ) ->Optional[int]:
"""simple docstring"""
import tensorflow.compat.va as tf
from bleurt.score import Predictor
tf.flags.DEFINE_string("sv" , "" , "" ) # handle pytest cli flags
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def UpperCAmelCase_ ( self , snake_case__ ):
'''simple docstring'''
assert len(input_dict["input_ids"] ) == 2
return np.array([1.03, 1.04] )
# mock predict_fn which is supposed to do a forward pass with a bleurt model
with patch("bleurt.score._create_predictor" ) as mock_create_predictor:
lowercase__ : int= MockedPredictor()
yield
@LocalMetricTest.register_intensive_calls_patcher("bertscore" )
def lowercase__(A ) ->str:
"""simple docstring"""
import torch
def bert_cos_score_idf(A , A , *A , **A ):
return torch.tensor([[1.0, 1.0, 1.0]] * len(A ) )
# mock get_model which is supposed to do download a bert model
# mock bert_cos_score_idf which is supposed to do a forward pass with a bert model
with patch("bert_score.scorer.get_model" ), patch(
"bert_score.scorer.bert_cos_score_idf" ) as mock_bert_cos_score_idf:
lowercase__ : str= bert_cos_score_idf
yield
@LocalMetricTest.register_intensive_calls_patcher("comet" )
def lowercase__(A ) ->Optional[int]:
"""simple docstring"""
def load_from_checkpoint(A ):
class __UpperCAmelCase:
"""simple docstring"""
def UpperCAmelCase_ ( self , snake_case__ , *snake_case__ , **snake_case__ ):
'''simple docstring'''
assert len(snake_case__ ) == 2
lowercase__ : Any= [0.19, 0.92]
return scores, sum(snake_case__ ) / len(snake_case__ )
return Model()
# mock load_from_checkpoint which is supposed to do download a bert model
# mock load_from_checkpoint which is supposed to do download a bert model
with patch("comet.download_model" ) as mock_download_model:
lowercase__ : Union[str, Any]= None
with patch("comet.load_from_checkpoint" ) as mock_load_from_checkpoint:
lowercase__ : Tuple= load_from_checkpoint
yield
def lowercase__() ->Optional[Any]:
"""simple docstring"""
lowercase__ : Any= load_metric(os.path.join("metrics" , "seqeval" ) )
lowercase__ : Optional[int]= "ERROR"
lowercase__ : Optional[Any]= f'''Scheme should be one of [IOB1, IOB2, IOE1, IOE2, IOBES, BILOU], got {wrong_scheme}'''
with pytest.raises(A , match=re.escape(A ) ):
metric.compute(predictions=[] , references=[] , scheme=A )
| 218 |
"""simple docstring"""
from __future__ import annotations
from math import pi
def lowercase__(A , A , A ) ->dict[str, float]:
"""simple docstring"""
if (inductance, frequency, reactance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if inductance < 0:
raise ValueError("Inductance cannot be negative" )
if frequency < 0:
raise ValueError("Frequency cannot be negative" )
if reactance < 0:
raise ValueError("Inductive reactance cannot be negative" )
if inductance == 0:
return {"inductance": reactance / (2 * pi * frequency)}
elif frequency == 0:
return {"frequency": reactance / (2 * pi * inductance)}
elif reactance == 0:
return {"reactance": 2 * pi * frequency * inductance}
else:
raise ValueError("Exactly one argument must be 0" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 218 | 1 |
import math
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :Union[str, Any] = math.loga(math.sqrt(4 * positive_integer + 1 ) / 2 + 1 / 2 )
return exponent == int(SCREAMING_SNAKE_CASE )
def lowerCamelCase ( SCREAMING_SNAKE_CASE = 1 / 12_345 ):
'''simple docstring'''
__UpperCamelCase :Any = 0
__UpperCamelCase :Optional[int] = 0
__UpperCamelCase :Tuple = 3
while True:
__UpperCamelCase :Dict = (integer**2 - 1) / 4
# if candidate is an integer, then there is a partition for k
if partition_candidate == int(SCREAMING_SNAKE_CASE ):
__UpperCamelCase :Optional[int] = int(SCREAMING_SNAKE_CASE )
total_partitions += 1
if check_partition_perfect(SCREAMING_SNAKE_CASE ):
perfect_partitions += 1
if perfect_partitions > 0:
if perfect_partitions / total_partitions < max_proportion:
return int(SCREAMING_SNAKE_CASE )
integer += 1
if __name__ == "__main__":
print(F'{solution() = }')
| 701 | def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
return 1 if input_a == input_a else 0
def lowerCamelCase ( ):
'''simple docstring'''
assert xnor_gate(0 , 0 ) == 1
assert xnor_gate(0 , 1 ) == 0
assert xnor_gate(1 , 0 ) == 0
assert xnor_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(xnor_gate(0, 0))
print(xnor_gate(0, 1))
print(xnor_gate(1, 0))
print(xnor_gate(1, 1))
| 452 | 0 |
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel
if is_vision_available():
from transformers import MaskFormerImageProcessor
if is_vision_available():
from PIL import Image
class SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Optional[int] , UpperCAmelCase_ : int , UpperCAmelCase_ : int=2 , UpperCAmelCase_ : Optional[Any]=True , UpperCAmelCase_ : Any=False , UpperCAmelCase_ : Dict=10 , UpperCAmelCase_ : Any=3 , UpperCAmelCase_ : int=32 * 4 , UpperCAmelCase_ : str=32 * 6 , UpperCAmelCase_ : Optional[Any]=4 , UpperCAmelCase_ : Tuple=32 , ):
SCREAMING_SNAKE_CASE : str = parent
SCREAMING_SNAKE_CASE : Optional[Any] = batch_size
SCREAMING_SNAKE_CASE : List[Any] = is_training
SCREAMING_SNAKE_CASE : Union[str, Any] = use_auxiliary_loss
SCREAMING_SNAKE_CASE : Tuple = num_queries
SCREAMING_SNAKE_CASE : Optional[int] = num_channels
SCREAMING_SNAKE_CASE : Any = min_size
SCREAMING_SNAKE_CASE : Union[str, Any] = max_size
SCREAMING_SNAKE_CASE : Union[str, Any] = num_labels
SCREAMING_SNAKE_CASE : Tuple = mask_feature_size
def _A ( self : Dict ):
SCREAMING_SNAKE_CASE : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Tuple = torch.ones([self.batch_size, self.min_size, self.max_size] , device=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Tuple = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=UpperCAmelCase_ ) > 0.5
).float()
SCREAMING_SNAKE_CASE : Optional[Any] = (torch.rand((self.batch_size, self.num_labels) , device=UpperCAmelCase_ ) > 0.5).long()
SCREAMING_SNAKE_CASE : List[str] = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def _A ( self : Any ):
return MaskFormerConfig.from_backbone_and_decoder_configs(
backbone_config=SwinConfig(
depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig(
decoder_ffn_dim=128 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , )
def _A ( self : List[str] ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE : Dict = {"pixel_values": pixel_values, "pixel_mask": pixel_mask}
return config, inputs_dict
def _A ( self : Tuple , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[Any] ):
SCREAMING_SNAKE_CASE : Optional[int] = output.encoder_hidden_states
SCREAMING_SNAKE_CASE : List[Any] = output.pixel_decoder_hidden_states
SCREAMING_SNAKE_CASE : str = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(UpperCAmelCase_ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(UpperCAmelCase_ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(UpperCAmelCase_ ) , config.decoder_config.decoder_layers )
def _A ( self : List[Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : List[str]=False ):
with torch.no_grad():
SCREAMING_SNAKE_CASE : Optional[int] = MaskFormerModel(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
SCREAMING_SNAKE_CASE : List[str] = model(pixel_values=UpperCAmelCase_ , pixel_mask=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = model(UpperCAmelCase_ , output_hidden_states=UpperCAmelCase_ )
# the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the
# encoder and pixel decoder
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(UpperCAmelCase_ , UpperCAmelCase_ )
def _A ( self : Any , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : str ):
SCREAMING_SNAKE_CASE : Optional[int] = MaskFormerForInstanceSegmentation(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
def comm_check_on_output(UpperCAmelCase_ : int ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
SCREAMING_SNAKE_CASE : Dict = model(pixel_values=UpperCAmelCase_ , pixel_mask=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = model(UpperCAmelCase_ )
comm_check_on_output(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = model(
pixel_values=UpperCAmelCase_ , pixel_mask=UpperCAmelCase_ , mask_labels=UpperCAmelCase_ , class_labels=UpperCAmelCase_ )
comm_check_on_output(UpperCAmelCase_ )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class SCREAMING_SNAKE_CASE ( lowerCAmelCase , lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase_ : Union[str, Any] = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else ()
UpperCamelCase_ : List[str] = (
{'''feature-extraction''': MaskFormerModel, '''image-segmentation''': MaskFormerForInstanceSegmentation}
if is_torch_available()
else {}
)
UpperCamelCase_ : int = False
UpperCamelCase_ : Union[str, Any] = False
UpperCamelCase_ : int = False
UpperCamelCase_ : Optional[Any] = False
def _A ( self : Dict ):
SCREAMING_SNAKE_CASE : Any = MaskFormerModelTester(self )
SCREAMING_SNAKE_CASE : Tuple = ConfigTester(self , config_class=UpperCAmelCase_ , has_text_modality=UpperCAmelCase_ )
def _A ( self : List[str] ):
self.config_tester.run_common_tests()
def _A ( self : Dict ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(UpperCAmelCase_ , **UpperCAmelCase_ , output_hidden_states=UpperCAmelCase_ )
def _A ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*UpperCAmelCase_ )
@unittest.skip(reason="MaskFormer does not use inputs_embeds" )
def _A ( self : List[str] ):
pass
@unittest.skip(reason="MaskFormer does not have a get_input_embeddings method" )
def _A ( self : Optional[int] ):
pass
@unittest.skip(reason="MaskFormer is not a generative model" )
def _A ( self : List[str] ):
pass
@unittest.skip(reason="MaskFormer does not use token embeddings" )
def _A ( self : Union[str, Any] ):
pass
@require_torch_multi_gpu
@unittest.skip(
reason="MaskFormer has some layers using `add_module` which doesn't work well with `nn.DataParallel`" )
def _A ( self : List[Any] ):
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def _A ( self : Tuple ):
pass
def _A ( self : Any ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : List[str] = model_class(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE : Optional[int] = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE : Optional[int] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , UpperCAmelCase_ )
@slow
def _A ( self : List[Any] ):
for model_name in ["facebook/maskformer-swin-small-coco"]:
SCREAMING_SNAKE_CASE : Any = MaskFormerModel.from_pretrained(UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
def _A ( self : Tuple ):
SCREAMING_SNAKE_CASE : List[str] = (self.model_tester.min_size,) * 2
SCREAMING_SNAKE_CASE : Any = {
"pixel_values": torch.randn((2, 3, *size) , device=UpperCAmelCase_ ),
"mask_labels": torch.randn((2, 10, *size) , device=UpperCAmelCase_ ),
"class_labels": torch.zeros(2 , 10 , device=UpperCAmelCase_ ).long(),
}
SCREAMING_SNAKE_CASE : Tuple = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : int = model(**UpperCAmelCase_ )
self.assertTrue(outputs.loss is not None )
def _A ( self : List[Any] ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(UpperCAmelCase_ , **UpperCAmelCase_ , output_hidden_states=UpperCAmelCase_ )
def _A ( self : List[str] ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : List[Any] = model_class(UpperCAmelCase_ ).to(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Any = model(**UpperCAmelCase_ , output_attentions=UpperCAmelCase_ )
self.assertTrue(outputs.attentions is not None )
def _A ( self : Tuple ):
if not self.model_tester.is_training:
return
# only MaskFormerForInstanceSegmentation has the loss
SCREAMING_SNAKE_CASE : str = self.all_model_classes[1]
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE : Dict = model_class(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.train()
SCREAMING_SNAKE_CASE : Dict = model(UpperCAmelCase_ , mask_labels=UpperCAmelCase_ , class_labels=UpperCAmelCase_ ).loss
loss.backward()
def _A ( self : Optional[Any] ):
# only MaskFormerForInstanceSegmentation has the loss
SCREAMING_SNAKE_CASE : str = self.all_model_classes[1]
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE : List[Any] = True
SCREAMING_SNAKE_CASE : Dict = True
SCREAMING_SNAKE_CASE : Optional[int] = model_class(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.train()
SCREAMING_SNAKE_CASE : Tuple = model(UpperCAmelCase_ , mask_labels=UpperCAmelCase_ , class_labels=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : str = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
SCREAMING_SNAKE_CASE : str = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
# we requires_grad=True in inputs_embeds (line 2152), the original implementation don't
SCREAMING_SNAKE_CASE : List[Any] = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
SCREAMING_SNAKE_CASE : List[Any] = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=UpperCAmelCase_ )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
snake_case = 1e-4
def lowerCamelCase__ ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_vision
@slow
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _A ( self : Tuple ):
return (
MaskFormerImageProcessor.from_pretrained("facebook/maskformer-swin-small-coco" )
if is_vision_available()
else None
)
def _A ( self : Optional[int] ):
SCREAMING_SNAKE_CASE : List[Any] = MaskFormerModel.from_pretrained("facebook/maskformer-swin-small-coco" ).to(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Tuple = self.default_image_processor
SCREAMING_SNAKE_CASE : Union[str, Any] = prepare_img()
SCREAMING_SNAKE_CASE : Any = image_processor(UpperCAmelCase_ , return_tensors="pt" ).to(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(UpperCAmelCase_ , (1, 3, 800, 1088) )
with torch.no_grad():
SCREAMING_SNAKE_CASE : Optional[int] = model(**UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : List[str] = torch.tensor(
[[-0.0_482, 0.9_228, 0.4_951], [-0.2_547, 0.8_017, 0.8_527], [-0.0_069, 0.3_385, -0.0_089]] ).to(UpperCAmelCase_ )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , UpperCAmelCase_ , atol=UpperCAmelCase_ ) )
SCREAMING_SNAKE_CASE : int = torch.tensor(
[[-0.8_422, -0.8_434, -0.9_718], [-1.0_144, -0.5_565, -0.4_195], [-1.0_038, -0.4_484, -0.1_961]] ).to(UpperCAmelCase_ )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , UpperCAmelCase_ , atol=UpperCAmelCase_ ) )
SCREAMING_SNAKE_CASE : List[str] = torch.tensor(
[[0.2_852, -0.0_159, 0.9_735], [0.6_254, 0.1_858, 0.8_529], [-0.0_680, -0.4_116, 1.8_413]] ).to(UpperCAmelCase_ )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , UpperCAmelCase_ , atol=UpperCAmelCase_ ) )
def _A ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE : Optional[Any] = (
MaskFormerForInstanceSegmentation.from_pretrained("facebook/maskformer-swin-small-coco" )
.to(UpperCAmelCase_ )
.eval()
)
SCREAMING_SNAKE_CASE : Dict = self.default_image_processor
SCREAMING_SNAKE_CASE : List[str] = prepare_img()
SCREAMING_SNAKE_CASE : List[str] = image_processor(UpperCAmelCase_ , return_tensors="pt" ).to(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : List[str] = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(UpperCAmelCase_ , (1, 3, 800, 1088) )
with torch.no_grad():
SCREAMING_SNAKE_CASE : List[Any] = model(**UpperCAmelCase_ )
# masks_queries_logits
SCREAMING_SNAKE_CASE : Optional[Any] = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
SCREAMING_SNAKE_CASE : Optional[int] = [
[-1.3_737_124, -1.7_724_937, -1.9_364_233],
[-1.5_977_281, -1.9_867_939, -2.1_523_695],
[-1.5_795_398, -1.9_269_832, -2.093_942],
]
SCREAMING_SNAKE_CASE : List[Any] = torch.tensor(UpperCAmelCase_ ).to(UpperCAmelCase_ )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , UpperCAmelCase_ , atol=UpperCAmelCase_ ) )
# class_queries_logits
SCREAMING_SNAKE_CASE : int = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor(
[
[1.6_512E00, -5.2_572E00, -3.3_519E00],
[3.6_169E-02, -5.9_025E00, -2.9_313E00],
[1.0_766E-04, -7.7_630E00, -5.1_263E00],
] ).to(UpperCAmelCase_ )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , UpperCAmelCase_ , atol=UpperCAmelCase_ ) )
def _A ( self : int ):
SCREAMING_SNAKE_CASE : List[Any] = (
MaskFormerForInstanceSegmentation.from_pretrained("facebook/maskformer-resnet101-coco-stuff" )
.to(UpperCAmelCase_ )
.eval()
)
SCREAMING_SNAKE_CASE : int = self.default_image_processor
SCREAMING_SNAKE_CASE : Optional[Any] = prepare_img()
SCREAMING_SNAKE_CASE : Any = image_processor(UpperCAmelCase_ , return_tensors="pt" ).to(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : List[str] = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(UpperCAmelCase_ , (1, 3, 800, 1088) )
with torch.no_grad():
SCREAMING_SNAKE_CASE : Dict = model(**UpperCAmelCase_ )
# masks_queries_logits
SCREAMING_SNAKE_CASE : int = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
SCREAMING_SNAKE_CASE : Dict = [[-0.9_046, -2.6_366, -4.6_062], [-3.4_179, -5.7_890, -8.8_057], [-4.9_179, -7.6_560, -10.7_711]]
SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor(UpperCAmelCase_ ).to(UpperCAmelCase_ )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , UpperCAmelCase_ , atol=UpperCAmelCase_ ) )
# class_queries_logits
SCREAMING_SNAKE_CASE : str = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
SCREAMING_SNAKE_CASE : Dict = torch.tensor(
[[4.7_188, -3.2_585, -2.8_857], [6.6_871, -2.9_181, -1.2_487], [7.2_449, -2.2_764, -2.1_874]] ).to(UpperCAmelCase_ )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , UpperCAmelCase_ , atol=UpperCAmelCase_ ) )
def _A ( self : List[Any] ):
SCREAMING_SNAKE_CASE : Optional[Any] = (
MaskFormerForInstanceSegmentation.from_pretrained("facebook/maskformer-swin-small-coco" )
.to(UpperCAmelCase_ )
.eval()
)
SCREAMING_SNAKE_CASE : Dict = self.default_image_processor
SCREAMING_SNAKE_CASE : Optional[Any] = image_processor(
[np.zeros((3, 800, 1333) ), np.zeros((3, 800, 1333) )] , segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] , return_tensors="pt" , )
SCREAMING_SNAKE_CASE : Dict = inputs["pixel_values"].to(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = [el.to(UpperCAmelCase_ ) for el in inputs["mask_labels"]]
SCREAMING_SNAKE_CASE : Optional[int] = [el.to(UpperCAmelCase_ ) for el in inputs["class_labels"]]
with torch.no_grad():
SCREAMING_SNAKE_CASE : Any = model(**UpperCAmelCase_ )
self.assertTrue(outputs.loss is not None )
| 62 |
def a (lowerCAmelCase__ ):
__a = False
while is_sorted is False: # Until all the indices are traversed keep looping
__a = True
for i in range(0 , len(lowerCAmelCase__ ) - 1 , 2 ): # iterating over all even indices
if input_list[i] > input_list[i + 1]:
__a , __a = input_list[i + 1], input_list[i]
# swapping if elements not in order
__a = False
for i in range(1 , len(lowerCAmelCase__ ) - 1 , 2 ): # iterating over all odd indices
if input_list[i] > input_list[i + 1]:
__a , __a = input_list[i + 1], input_list[i]
# swapping if elements not in order
__a = False
return input_list
if __name__ == "__main__":
print('Enter list to be sorted')
SCREAMING_SNAKE_CASE = [int(x) for x in input().split()]
# inputing elements of the list in one line
SCREAMING_SNAKE_CASE = odd_even_sort(input_list)
print('The sorted list is')
print(sorted_list)
| 99 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
SCREAMING_SNAKE_CASE : str = {
"configuration_tapas": ["TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP", "TapasConfig"],
"tokenization_tapas": ["TapasTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : Dict = [
"TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST",
"TapasForMaskedLM",
"TapasForQuestionAnswering",
"TapasForSequenceClassification",
"TapasModel",
"TapasPreTrainedModel",
"load_tf_weights_in_tapas",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : str = [
"TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFTapasForMaskedLM",
"TFTapasForQuestionAnswering",
"TFTapasForSequenceClassification",
"TFTapasModel",
"TFTapasPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_tapas import TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP, TapasConfig
from .tokenization_tapas import TapasTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tapas import (
TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasPreTrainedModel,
load_tf_weights_in_tapas,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_tapas import (
TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTapasForMaskedLM,
TFTapasForQuestionAnswering,
TFTapasForSequenceClassification,
TFTapasModel,
TFTapasPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 441 |
from typing import Dict
from transformers import EvalPrediction, HfArgumentParser, TrainingArguments, is_torch_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
get_torch_dist_unique_port,
require_torch_multi_gpu,
require_torch_neuroncore,
)
from transformers.training_args import ParallelMode
from transformers.utils import logging
SCREAMING_SNAKE_CASE : List[str] = logging.get_logger(__name__)
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
from transformers import Trainer
class UpperCamelCase ( lowercase__ ):
'''simple docstring'''
def __init__( self , UpperCamelCase_ = 101 ):
lowercase_ :Tuple = length
def __len__( self ):
return self.length
def __getitem__( self , UpperCamelCase_ ):
return i
class UpperCamelCase :
'''simple docstring'''
def __call__( self , UpperCamelCase_ ):
return {"input_ids": torch.tensor(UpperCamelCase_ ), "labels": torch.tensor(UpperCamelCase_ )}
class UpperCamelCase ( nn.Module ):
'''simple docstring'''
def __init__( self ):
super().__init__()
# Add some (unused) params otherwise DDP will complain.
lowercase_ :List[Any] = nn.Linear(120 , 80 )
def UpperCamelCase ( self , UpperCamelCase_ , UpperCamelCase_=None ):
if labels is not None:
return torch.tensor(0.0 , device=input_ids.device ), input_ids
else:
return input_ids
class UpperCamelCase ( lowercase__ ):
'''simple docstring'''
@require_torch_neuroncore
def UpperCamelCase ( self ):
lowercase_ :int = f"--nproc_per_node=2\n --master_port={get_torch_dist_unique_port()}\n {self.test_file_dir}/test_trainer_distributed.py\n ".split()
lowercase_ :List[str] = self.get_auto_remove_tmp_dir()
lowercase_ :List[str] = f"--output_dir {output_dir}".split()
lowercase_ :Any = ['''torchrun'''] + distributed_args + args
execute_subprocess_async(UpperCamelCase_ , env=self.get_env() )
# successful return here == success - any errors would have caused an error in the sub-call
class UpperCamelCase ( lowercase__ ):
'''simple docstring'''
@require_torch_multi_gpu
def UpperCamelCase ( self ):
lowercase_ :Any = f"--nproc_per_node={torch.cuda.device_count()}\n --master_port={get_torch_dist_unique_port()}\n {self.test_file_dir}/test_trainer_distributed.py\n ".split()
lowercase_ :str = self.get_auto_remove_tmp_dir()
lowercase_ :List[str] = f"--output_dir {output_dir}".split()
lowercase_ :Dict = ['''torchrun'''] + distributed_args + args
execute_subprocess_async(UpperCamelCase_ , env=self.get_env() )
# successful return here == success - any errors would have caused an error in the sub-call
if __name__ == "__main__":
# The script below is meant to be run under torch.distributed, on a machine with multiple GPUs:
#
# PYTHONPATH="src" python -m torch.distributed.run --nproc_per_node 2 --output_dir output_dir ./tests/test_trainer_distributed.py
SCREAMING_SNAKE_CASE : Optional[int] = HfArgumentParser((TrainingArguments,))
SCREAMING_SNAKE_CASE : Optional[int] = parser.parse_args_into_dataclasses()[0]
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, "
f"distributed training: {training_args.parallel_mode != ParallelMode.NOT_DISTRIBUTED}"
)
# Essentially, what we want to verify in the distributed case is that we get all samples back,
# in the right order. (this is crucial for prediction for instance)
for dataset_length in [101, 40, 7]:
SCREAMING_SNAKE_CASE : Dict = DummyDataset(dataset_length)
def UpperCamelCase ( _a ) -> Dict:
'''simple docstring'''
lowercase_ :Optional[Any] = list(range(len(_a ) ) )
lowercase_ :Optional[Any] = p.predictions.tolist() == sequential and p.label_ids.tolist() == sequential
if not success and training_args.local_rank == 0:
logger.warning(
'''Predictions and/or labels do not match expected results:\n - predictions: '''
f"{p.predictions.tolist()}\n - labels: {p.label_ids.tolist()}\n - expected: {sequential}" )
return {"success": success}
SCREAMING_SNAKE_CASE : Optional[Any] = Trainer(
model=DummyModel(),
args=training_args,
data_collator=DummyDataCollator(),
eval_dataset=dataset,
compute_metrics=compute_metrics,
)
SCREAMING_SNAKE_CASE : Any = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
SCREAMING_SNAKE_CASE : Optional[int] = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
SCREAMING_SNAKE_CASE : Union[str, Any] = 2
SCREAMING_SNAKE_CASE : Any = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
SCREAMING_SNAKE_CASE : List[str] = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
SCREAMING_SNAKE_CASE : Optional[int] = None
| 441 | 1 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
import torch.nn.functional as F
from transformers import (
ClapTextConfig,
ClapTextModelWithProjection,
RobertaTokenizer,
SpeechTaHifiGan,
SpeechTaHifiGanConfig,
)
from diffusers import (
AudioLDMPipeline,
AutoencoderKL,
DDIMScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS, TEXT_TO_AUDIO_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __A ( A_ ,unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase : Union[str, Any] = AudioLDMPipeline
lowerCAmelCase : str = TEXT_TO_AUDIO_PARAMS
lowerCAmelCase : Union[str, Any] = TEXT_TO_AUDIO_BATCH_PARAMS
lowerCAmelCase : List[str] = frozenset(
[
"num_inference_steps",
"num_waveforms_per_prompt",
"generator",
"latents",
"output_type",
"return_dict",
"callback",
"callback_steps",
] )
def UpperCAmelCase ( self : Any ) -> List[Any]:
"""simple docstring"""
torch.manual_seed(0 )
lowercase__ : Any = UNetaDConditionModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') ,up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') ,cross_attention_dim=(32, 64) ,class_embed_type='''simple_projection''' ,projection_class_embeddings_input_dim=32 ,class_embeddings_concat=_snake_case ,)
lowercase__ : List[str] = DDIMScheduler(
beta_start=0.0_0085 ,beta_end=0.012 ,beta_schedule='''scaled_linear''' ,clip_sample=_snake_case ,set_alpha_to_one=_snake_case ,)
torch.manual_seed(0 )
lowercase__ : List[Any] = AutoencoderKL(
block_out_channels=[32, 64] ,in_channels=1 ,out_channels=1 ,down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] ,up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] ,latent_channels=4 ,)
torch.manual_seed(0 )
lowercase__ : Union[str, Any] = ClapTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1e-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1_000 ,projection_dim=32 ,)
lowercase__ : List[str] = ClapTextModelWithProjection(_snake_case )
lowercase__ : int = RobertaTokenizer.from_pretrained('''hf-internal-testing/tiny-random-roberta''' ,model_max_length=77 )
lowercase__ : Union[str, Any] = SpeechTaHifiGanConfig(
model_in_dim=8 ,sampling_rate=16_000 ,upsample_initial_channel=16 ,upsample_rates=[2, 2] ,upsample_kernel_sizes=[4, 4] ,resblock_kernel_sizes=[3, 7] ,resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5]] ,normalize_before=_snake_case ,)
lowercase__ : Any = SpeechTaHifiGan(_snake_case )
lowercase__ : List[Any] = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''vocoder''': vocoder,
}
return components
def UpperCAmelCase ( self : Optional[Any] ,_snake_case : Any ,_snake_case : Dict=0 ) -> List[str]:
"""simple docstring"""
if str(_snake_case ).startswith('''mps''' ):
lowercase__ : List[Any] = torch.manual_seed(_snake_case )
else:
lowercase__ : Optional[Any] = torch.Generator(device=_snake_case ).manual_seed(_snake_case )
lowercase__ : Dict = {
'''prompt''': '''A hammer hitting a wooden surface''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
}
return inputs
def UpperCAmelCase ( self : Tuple ) -> List[str]:
"""simple docstring"""
lowercase__ : Optional[int] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowercase__ : List[str] = self.get_dummy_components()
lowercase__ : str = AudioLDMPipeline(**_snake_case )
lowercase__ : Union[str, Any] = audioldm_pipe.to(_snake_case )
audioldm_pipe.set_progress_bar_config(disable=_snake_case )
lowercase__ : List[str] = self.get_dummy_inputs(_snake_case )
lowercase__ : Any = audioldm_pipe(**_snake_case )
lowercase__ : Optional[int] = output.audios[0]
assert audio.ndim == 1
assert len(_snake_case ) == 256
lowercase__ : str = audio[:10]
lowercase__ : Dict = np.array(
[-0.0050, 0.0050, -0.0060, 0.0033, -0.0026, 0.0033, -0.0027, 0.0033, -0.0028, 0.0033] )
assert np.abs(audio_slice - expected_slice ).max() < 1e-2
def UpperCAmelCase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
lowercase__ : Tuple = self.get_dummy_components()
lowercase__ : List[Any] = AudioLDMPipeline(**_snake_case )
lowercase__ : int = audioldm_pipe.to(_snake_case )
lowercase__ : Optional[Any] = audioldm_pipe.to(_snake_case )
audioldm_pipe.set_progress_bar_config(disable=_snake_case )
lowercase__ : Optional[int] = self.get_dummy_inputs(_snake_case )
lowercase__ : Optional[Any] = 3 * [inputs['''prompt''']]
# forward
lowercase__ : Optional[Any] = audioldm_pipe(**_snake_case )
lowercase__ : Optional[Any] = output.audios[0]
lowercase__ : List[Any] = self.get_dummy_inputs(_snake_case )
lowercase__ : Dict = 3 * [inputs.pop('''prompt''' )]
lowercase__ : int = audioldm_pipe.tokenizer(
_snake_case ,padding='''max_length''' ,max_length=audioldm_pipe.tokenizer.model_max_length ,truncation=_snake_case ,return_tensors='''pt''' ,)
lowercase__ : Dict = text_inputs['''input_ids'''].to(_snake_case )
lowercase__ : List[str] = audioldm_pipe.text_encoder(
_snake_case ,)
lowercase__ : Tuple = prompt_embeds.text_embeds
# additional L_2 normalization over each hidden-state
lowercase__ : Dict = F.normalize(_snake_case ,dim=-1 )
lowercase__ : Dict = prompt_embeds
# forward
lowercase__ : Tuple = audioldm_pipe(**_snake_case )
lowercase__ : Dict = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1e-2
def UpperCAmelCase ( self : Optional[Any] ) -> str:
"""simple docstring"""
lowercase__ : Union[str, Any] = self.get_dummy_components()
lowercase__ : List[str] = AudioLDMPipeline(**_snake_case )
lowercase__ : List[str] = audioldm_pipe.to(_snake_case )
lowercase__ : Optional[int] = audioldm_pipe.to(_snake_case )
audioldm_pipe.set_progress_bar_config(disable=_snake_case )
lowercase__ : List[Any] = self.get_dummy_inputs(_snake_case )
lowercase__ : Tuple = 3 * ['''this is a negative prompt''']
lowercase__ : Tuple = negative_prompt
lowercase__ : Any = 3 * [inputs['''prompt''']]
# forward
lowercase__ : Optional[Any] = audioldm_pipe(**_snake_case )
lowercase__ : str = output.audios[0]
lowercase__ : Optional[Any] = self.get_dummy_inputs(_snake_case )
lowercase__ : Union[str, Any] = 3 * [inputs.pop('''prompt''' )]
lowercase__ : str = []
for p in [prompt, negative_prompt]:
lowercase__ : Any = audioldm_pipe.tokenizer(
_snake_case ,padding='''max_length''' ,max_length=audioldm_pipe.tokenizer.model_max_length ,truncation=_snake_case ,return_tensors='''pt''' ,)
lowercase__ : List[Any] = text_inputs['''input_ids'''].to(_snake_case )
lowercase__ : Dict = audioldm_pipe.text_encoder(
_snake_case ,)
lowercase__ : Any = text_embeds.text_embeds
# additional L_2 normalization over each hidden-state
lowercase__ : Dict = F.normalize(_snake_case ,dim=-1 )
embeds.append(_snake_case )
lowercase__ , lowercase__ : Dict = embeds
# forward
lowercase__ : Optional[Any] = audioldm_pipe(**_snake_case )
lowercase__ : Union[str, Any] = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1e-2
def UpperCAmelCase ( self : int ) -> Any:
"""simple docstring"""
lowercase__ : List[Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowercase__ : List[str] = self.get_dummy_components()
lowercase__ : List[Any] = PNDMScheduler(skip_prk_steps=_snake_case )
lowercase__ : str = AudioLDMPipeline(**_snake_case )
lowercase__ : Optional[Any] = audioldm_pipe.to(_snake_case )
audioldm_pipe.set_progress_bar_config(disable=_snake_case )
lowercase__ : Optional[Any] = self.get_dummy_inputs(_snake_case )
lowercase__ : List[Any] = '''egg cracking'''
lowercase__ : Union[str, Any] = audioldm_pipe(**_snake_case ,negative_prompt=_snake_case )
lowercase__ : Tuple = output.audios[0]
assert audio.ndim == 1
assert len(_snake_case ) == 256
lowercase__ : List[str] = audio[:10]
lowercase__ : Dict = np.array(
[-0.0051, 0.0050, -0.0060, 0.0034, -0.0026, 0.0033, -0.0027, 0.0033, -0.0028, 0.0032] )
assert np.abs(audio_slice - expected_slice ).max() < 1e-2
def UpperCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ : Any = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowercase__ : Optional[Any] = self.get_dummy_components()
lowercase__ : str = PNDMScheduler(skip_prk_steps=_snake_case )
lowercase__ : Optional[int] = AudioLDMPipeline(**_snake_case )
lowercase__ : int = audioldm_pipe.to(_snake_case )
audioldm_pipe.set_progress_bar_config(disable=_snake_case )
lowercase__ : Any = '''A hammer hitting a wooden surface'''
# test num_waveforms_per_prompt=1 (default)
lowercase__ : Dict = audioldm_pipe(_snake_case ,num_inference_steps=2 ).audios
assert audios.shape == (1, 256)
# test num_waveforms_per_prompt=1 (default) for batch of prompts
lowercase__ : List[str] = 2
lowercase__ : int = audioldm_pipe([prompt] * batch_size ,num_inference_steps=2 ).audios
assert audios.shape == (batch_size, 256)
# test num_waveforms_per_prompt for single prompt
lowercase__ : Optional[Any] = 2
lowercase__ : List[Any] = audioldm_pipe(_snake_case ,num_inference_steps=2 ,num_waveforms_per_prompt=_snake_case ).audios
assert audios.shape == (num_waveforms_per_prompt, 256)
# test num_waveforms_per_prompt for batch of prompts
lowercase__ : Any = 2
lowercase__ : Optional[int] = audioldm_pipe(
[prompt] * batch_size ,num_inference_steps=2 ,num_waveforms_per_prompt=_snake_case ).audios
assert audios.shape == (batch_size * num_waveforms_per_prompt, 256)
def UpperCAmelCase ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ : str = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowercase__ : Any = self.get_dummy_components()
lowercase__ : Union[str, Any] = AudioLDMPipeline(**_snake_case )
lowercase__ : int = audioldm_pipe.to(_snake_case )
audioldm_pipe.set_progress_bar_config(disable=_snake_case )
lowercase__ : Dict = audioldm_pipe.vocoder.config.sampling_rate
lowercase__ : List[Any] = self.get_dummy_inputs(_snake_case )
lowercase__ : str = audioldm_pipe(audio_length_in_s=0.016 ,**_snake_case )
lowercase__ : Union[str, Any] = output.audios[0]
assert audio.ndim == 1
assert len(_snake_case ) / vocoder_sampling_rate == 0.016
lowercase__ : List[str] = audioldm_pipe(audio_length_in_s=0.032 ,**_snake_case )
lowercase__ : List[Any] = output.audios[0]
assert audio.ndim == 1
assert len(_snake_case ) / vocoder_sampling_rate == 0.032
def UpperCAmelCase ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ : Optional[Any] = self.get_dummy_components()
lowercase__ : List[Any] = AudioLDMPipeline(**_snake_case )
lowercase__ : int = audioldm_pipe.to(_snake_case )
audioldm_pipe.set_progress_bar_config(disable=_snake_case )
lowercase__ : str = ['''hey''']
lowercase__ : Union[str, Any] = audioldm_pipe(_snake_case ,num_inference_steps=1 )
lowercase__ : Tuple = output.audios.shape
assert audio_shape == (1, 256)
lowercase__ : List[str] = audioldm_pipe.vocoder.config
config.model_in_dim *= 2
lowercase__ : List[Any] = SpeechTaHifiGan(_snake_case ).to(_snake_case )
lowercase__ : Dict = audioldm_pipe(_snake_case ,num_inference_steps=1 )
lowercase__ : int = output.audios.shape
# waveform shape is unchanged, we just have 2x the number of mel channels in the spectrogram
assert audio_shape == (1, 256)
def UpperCAmelCase ( self : List[str] ) -> List[Any]:
"""simple docstring"""
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=_snake_case )
def UpperCAmelCase ( self : List[str] ) -> List[str]:
"""simple docstring"""
self._test_inference_batch_single_identical(test_mean_pixel_difference=_snake_case )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() ,reason='''XFormers attention is only available with CUDA and `xformers` installed''' ,)
def UpperCAmelCase ( self : Optional[int] ) -> Dict:
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=_snake_case )
@slow
class __A ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase ( self : Any ) -> Any:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase ( self : Optional[Any] ,_snake_case : str ,_snake_case : Union[str, Any]="cpu" ,_snake_case : Optional[int]=torch.floataa ,_snake_case : Optional[int]=0 ) -> List[Any]:
"""simple docstring"""
lowercase__ : List[Any] = torch.Generator(device=_snake_case ).manual_seed(_snake_case )
lowercase__ : List[Any] = np.random.RandomState(_snake_case ).standard_normal((1, 8, 128, 16) )
lowercase__ : List[Any] = torch.from_numpy(_snake_case ).to(device=_snake_case ,dtype=_snake_case )
lowercase__ : int = {
'''prompt''': '''A hammer hitting a wooden surface''',
'''latents''': latents,
'''generator''': generator,
'''num_inference_steps''': 3,
'''guidance_scale''': 2.5,
}
return inputs
def UpperCAmelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
lowercase__ : int = AudioLDMPipeline.from_pretrained('''cvssp/audioldm''' )
lowercase__ : str = audioldm_pipe.to(_snake_case )
audioldm_pipe.set_progress_bar_config(disable=_snake_case )
lowercase__ : Optional[int] = self.get_inputs(_snake_case )
lowercase__ : Union[str, Any] = 25
lowercase__ : int = audioldm_pipe(**_snake_case ).audios[0]
assert audio.ndim == 1
assert len(_snake_case ) == 81_920
lowercase__ : List[Any] = audio[77_230:77_240]
lowercase__ : Tuple = np.array(
[-0.4884, -0.4607, 0.0023, 0.5007, 0.5896, 0.5151, 0.3813, -0.0208, -0.3687, -0.4315] )
lowercase__ : Tuple = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 1e-2
def UpperCAmelCase ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
lowercase__ : Union[str, Any] = AudioLDMPipeline.from_pretrained('''cvssp/audioldm''' )
lowercase__ : int = LMSDiscreteScheduler.from_config(audioldm_pipe.scheduler.config )
lowercase__ : List[Any] = audioldm_pipe.to(_snake_case )
audioldm_pipe.set_progress_bar_config(disable=_snake_case )
lowercase__ : Union[str, Any] = self.get_inputs(_snake_case )
lowercase__ : Any = audioldm_pipe(**_snake_case ).audios[0]
assert audio.ndim == 1
assert len(_snake_case ) == 81_920
lowercase__ : int = audio[27_780:27_790]
lowercase__ : Tuple = np.array([-0.2131, -0.0873, -0.0124, -0.0189, 0.0569, 0.1373, 0.1883, 0.2886, 0.3297, 0.2212] )
lowercase__ : Dict = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 3e-2
| 560 |
"""simple docstring"""
import argparse
import io
import requests
import torch
from omegaconf import OmegaConf
from diffusers import AutoencoderKL
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import (
assign_to_checkpoint,
conv_attn_to_linear,
create_vae_diffusers_config,
renew_vae_attention_paths,
renew_vae_resnet_paths,
)
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> Any:
lowercase__ : Optional[Any] = checkpoint
lowercase__ : List[str] = {}
lowercase__ : Any = vae_state_dict['''encoder.conv_in.weight''']
lowercase__ : Any = vae_state_dict['''encoder.conv_in.bias''']
lowercase__ : str = vae_state_dict['''encoder.conv_out.weight''']
lowercase__ : List[str] = vae_state_dict['''encoder.conv_out.bias''']
lowercase__ : List[str] = vae_state_dict['''encoder.norm_out.weight''']
lowercase__ : Optional[int] = vae_state_dict['''encoder.norm_out.bias''']
lowercase__ : List[str] = vae_state_dict['''decoder.conv_in.weight''']
lowercase__ : int = vae_state_dict['''decoder.conv_in.bias''']
lowercase__ : Union[str, Any] = vae_state_dict['''decoder.conv_out.weight''']
lowercase__ : Optional[int] = vae_state_dict['''decoder.conv_out.bias''']
lowercase__ : str = vae_state_dict['''decoder.norm_out.weight''']
lowercase__ : Union[str, Any] = vae_state_dict['''decoder.norm_out.bias''']
lowercase__ : Optional[int] = vae_state_dict['''quant_conv.weight''']
lowercase__ : Union[str, Any] = vae_state_dict['''quant_conv.bias''']
lowercase__ : str = vae_state_dict['''post_quant_conv.weight''']
lowercase__ : Optional[int] = vae_state_dict['''post_quant_conv.bias''']
# Retrieves the keys for the encoder down blocks only
lowercase__ : str = len({'''.'''.join(layer.split('''.''' )[:3] ) for layer in vae_state_dict if '''encoder.down''' in layer} )
lowercase__ : Optional[Any] = {
layer_id: [key for key in vae_state_dict if f"""down.{layer_id}""" in key] for layer_id in range(__lowerCamelCase )
}
# Retrieves the keys for the decoder up blocks only
lowercase__ : Any = len({'''.'''.join(layer.split('''.''' )[:3] ) for layer in vae_state_dict if '''decoder.up''' in layer} )
lowercase__ : List[str] = {
layer_id: [key for key in vae_state_dict if f"""up.{layer_id}""" in key] for layer_id in range(__lowerCamelCase )
}
for i in range(__lowerCamelCase ):
lowercase__ : str = [key for key in down_blocks[i] if f"""down.{i}""" in key and f"""down.{i}.downsample""" not in key]
if f"""encoder.down.{i}.downsample.conv.weight""" in vae_state_dict:
lowercase__ : Any = vae_state_dict.pop(
f"""encoder.down.{i}.downsample.conv.weight""" )
lowercase__ : List[Any] = vae_state_dict.pop(
f"""encoder.down.{i}.downsample.conv.bias""" )
lowercase__ : int = renew_vae_resnet_paths(__lowerCamelCase )
lowercase__ : str = {'''old''': f"""down.{i}.block""", '''new''': f"""down_blocks.{i}.resnets"""}
assign_to_checkpoint(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , additional_replacements=[meta_path] , config=__lowerCamelCase )
lowercase__ : Union[str, Any] = [key for key in vae_state_dict if '''encoder.mid.block''' in key]
lowercase__ : Tuple = 2
for i in range(1 , num_mid_res_blocks + 1 ):
lowercase__ : str = [key for key in mid_resnets if f"""encoder.mid.block_{i}""" in key]
lowercase__ : Optional[Any] = renew_vae_resnet_paths(__lowerCamelCase )
lowercase__ : Tuple = {'''old''': f"""mid.block_{i}""", '''new''': f"""mid_block.resnets.{i - 1}"""}
assign_to_checkpoint(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , additional_replacements=[meta_path] , config=__lowerCamelCase )
lowercase__ : Dict = [key for key in vae_state_dict if '''encoder.mid.attn''' in key]
lowercase__ : Union[str, Any] = renew_vae_attention_paths(__lowerCamelCase )
lowercase__ : Dict = {'''old''': '''mid.attn_1''', '''new''': '''mid_block.attentions.0'''}
assign_to_checkpoint(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , additional_replacements=[meta_path] , config=__lowerCamelCase )
conv_attn_to_linear(__lowerCamelCase )
for i in range(__lowerCamelCase ):
lowercase__ : int = num_up_blocks - 1 - i
lowercase__ : int = [
key for key in up_blocks[block_id] if f"""up.{block_id}""" in key and f"""up.{block_id}.upsample""" not in key
]
if f"""decoder.up.{block_id}.upsample.conv.weight""" in vae_state_dict:
lowercase__ : Optional[int] = vae_state_dict[
f"""decoder.up.{block_id}.upsample.conv.weight"""
]
lowercase__ : List[str] = vae_state_dict[
f"""decoder.up.{block_id}.upsample.conv.bias"""
]
lowercase__ : Optional[int] = renew_vae_resnet_paths(__lowerCamelCase )
lowercase__ : List[str] = {'''old''': f"""up.{block_id}.block""", '''new''': f"""up_blocks.{i}.resnets"""}
assign_to_checkpoint(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , additional_replacements=[meta_path] , config=__lowerCamelCase )
lowercase__ : List[str] = [key for key in vae_state_dict if '''decoder.mid.block''' in key]
lowercase__ : Dict = 2
for i in range(1 , num_mid_res_blocks + 1 ):
lowercase__ : int = [key for key in mid_resnets if f"""decoder.mid.block_{i}""" in key]
lowercase__ : Dict = renew_vae_resnet_paths(__lowerCamelCase )
lowercase__ : str = {'''old''': f"""mid.block_{i}""", '''new''': f"""mid_block.resnets.{i - 1}"""}
assign_to_checkpoint(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , additional_replacements=[meta_path] , config=__lowerCamelCase )
lowercase__ : Dict = [key for key in vae_state_dict if '''decoder.mid.attn''' in key]
lowercase__ : Tuple = renew_vae_attention_paths(__lowerCamelCase )
lowercase__ : Any = {'''old''': '''mid.attn_1''', '''new''': '''mid_block.attentions.0'''}
assign_to_checkpoint(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , additional_replacements=[meta_path] , config=__lowerCamelCase )
conv_attn_to_linear(__lowerCamelCase )
return new_checkpoint
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , ) -> Optional[Any]:
# Only support V1
lowercase__ : List[Any] = requests.get(
''' https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml''' )
lowercase__ : List[str] = io.BytesIO(r.content )
lowercase__ : Union[str, Any] = OmegaConf.load(__lowerCamelCase )
lowercase__ : int = 5_12
lowercase__ : Optional[int] = '''cuda''' if torch.cuda.is_available() else '''cpu'''
if checkpoint_path.endswith('''safetensors''' ):
from safetensors import safe_open
lowercase__ : int = {}
with safe_open(__lowerCamelCase , framework='''pt''' , device='''cpu''' ) as f:
for key in f.keys():
lowercase__ : Optional[int] = f.get_tensor(__lowerCamelCase )
else:
lowercase__ : Tuple = torch.load(__lowerCamelCase , map_location=__lowerCamelCase )['''state_dict''']
# Convert the VAE model.
lowercase__ : Optional[int] = create_vae_diffusers_config(__lowerCamelCase , image_size=__lowerCamelCase )
lowercase__ : Tuple = custom_convert_ldm_vae_checkpoint(__lowerCamelCase , __lowerCamelCase )
lowercase__ : Dict = AutoencoderKL(**__lowerCamelCase )
vae.load_state_dict(__lowerCamelCase )
vae.save_pretrained(__lowerCamelCase )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
parser.add_argument('--vae_pt_path', default=None, type=str, required=True, help='Path to the VAE.pt to convert.')
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the VAE.pt to convert.')
lowerCAmelCase_ = parser.parse_args()
vae_pt_to_vae_diffuser(args.vae_pt_path, args.dump_path)
| 560 | 1 |
import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from tensorflow.keras.layers import LSTM, Dense
from tensorflow.keras.models import Sequential
if __name__ == "__main__":
UpperCAmelCase__ = pd.read_csv("sample_data.csv", header=None)
UpperCAmelCase__ = df.shape[:1][0]
# If you're using some other dataset input the target column
UpperCAmelCase__ = df.iloc[:, 1:2]
UpperCAmelCase__ = actual_data.values.reshape(len_data, 1)
UpperCAmelCase__ = MinMaxScaler().fit_transform(actual_data)
UpperCAmelCase__ = 10
UpperCAmelCase__ = 5
UpperCAmelCase__ = 20
UpperCAmelCase__ = len_data - periods * look_back
UpperCAmelCase__ = actual_data[:division]
UpperCAmelCase__ = actual_data[division - look_back :]
UpperCAmelCase__ , UpperCAmelCase__ = [], []
UpperCAmelCase__ , UpperCAmelCase__ = [], []
for i in range(0, len(train_data) - forward_days - look_back + 1):
train_x.append(train_data[i : i + look_back])
train_y.append(train_data[i + look_back : i + look_back + forward_days])
for i in range(0, len(test_data) - forward_days - look_back + 1):
test_x.append(test_data[i : i + look_back])
test_y.append(test_data[i + look_back : i + look_back + forward_days])
UpperCAmelCase__ = np.array(train_x)
UpperCAmelCase__ = np.array(test_x)
UpperCAmelCase__ = np.array([list(i.ravel()) for i in train_y])
UpperCAmelCase__ = np.array([list(i.ravel()) for i in test_y])
UpperCAmelCase__ = Sequential()
model.add(LSTM(128, input_shape=(look_back, 1), return_sequences=True))
model.add(LSTM(64, input_shape=(128, 1)))
model.add(Dense(forward_days))
model.compile(loss="mean_squared_error", optimizer="adam")
UpperCAmelCase__ = model.fit(
x_train, y_train, epochs=150, verbose=1, shuffle=True, batch_size=4
)
UpperCAmelCase__ = model.predict(x_test)
| 713 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, CycleDiffusionPipeline, DDIMScheduler, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class a ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ : Any = CycleDiffusionPipeline
UpperCamelCase_ : List[str] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
'negative_prompt',
'height',
'width',
'negative_prompt_embeds',
}
UpperCamelCase_ : int = PipelineTesterMixin.required_optional_params - {'latents'}
UpperCamelCase_ : Dict = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'source_prompt'} )
UpperCamelCase_ : Dict = IMAGE_TO_IMAGE_IMAGE_PARAMS
UpperCamelCase_ : Optional[Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS
def UpperCAmelCase_ ( self : int ) -> Dict:
"""simple docstring"""
torch.manual_seed(0 )
__lowercase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
__lowercase = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='''scaled_linear''' , num_train_timesteps=1_000 , clip_sample=lowerCamelCase__ , set_alpha_to_one=lowerCamelCase__ , )
torch.manual_seed(0 )
__lowercase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
__lowercase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
__lowercase = CLIPTextModel(lowerCamelCase__ )
__lowercase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
__lowercase = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def UpperCAmelCase_ ( self : Any , lowerCamelCase__ : int , lowerCamelCase__ : List[str]=0 ) -> Any:
"""simple docstring"""
__lowercase = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowerCamelCase__ ) ).to(lowerCamelCase__ )
__lowercase = image / 2 + 0.5
if str(lowerCamelCase__ ).startswith('''mps''' ):
__lowercase = torch.manual_seed(lowerCamelCase__ )
else:
__lowercase = torch.Generator(device=lowerCamelCase__ ).manual_seed(lowerCamelCase__ )
__lowercase = {
'''prompt''': '''An astronaut riding an elephant''',
'''source_prompt''': '''An astronaut riding a horse''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''eta''': 0.1,
'''strength''': 0.8,
'''guidance_scale''': 3,
'''source_guidance_scale''': 1,
'''output_type''': '''numpy''',
}
return inputs
def UpperCAmelCase_ ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
__lowercase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__lowercase = self.get_dummy_components()
__lowercase = CycleDiffusionPipeline(**lowerCamelCase__ )
__lowercase = pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
__lowercase = self.get_dummy_inputs(lowerCamelCase__ )
__lowercase = pipe(**lowerCamelCase__ )
__lowercase = output.images
__lowercase = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
__lowercase = np.array([0.4_4_5_9, 0.4_9_4_3, 0.4_5_4_4, 0.6_6_4_3, 0.5_4_7_4, 0.4_3_2_7, 0.5_7_0_1, 0.5_9_5_9, 0.5_1_7_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' )
def UpperCAmelCase_ ( self : str ) -> Optional[Any]:
"""simple docstring"""
__lowercase = self.get_dummy_components()
for name, module in components.items():
if hasattr(lowerCamelCase__ , '''half''' ):
__lowercase = module.half()
__lowercase = CycleDiffusionPipeline(**lowerCamelCase__ )
__lowercase = pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
__lowercase = self.get_dummy_inputs(lowerCamelCase__ )
__lowercase = pipe(**lowerCamelCase__ )
__lowercase = output.images
__lowercase = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
__lowercase = np.array([0.3_5_0_6, 0.4_5_4_3, 0.4_4_6, 0.4_5_7_5, 0.5_1_9_5, 0.4_1_5_5, 0.5_2_7_3, 0.5_1_8, 0.4_1_1_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def UpperCAmelCase_ ( self : str ) -> int:
"""simple docstring"""
return super().test_save_load_local()
@unittest.skip('''non-deterministic pipeline''' )
def UpperCAmelCase_ ( self : int ) -> Union[str, Any]:
"""simple docstring"""
return super().test_inference_batch_single_identical()
@skip_mps
def UpperCAmelCase_ ( self : int ) -> Optional[int]:
"""simple docstring"""
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def UpperCAmelCase_ ( self : List[Any] ) -> List[str]:
"""simple docstring"""
return super().test_save_load_optional_components()
@skip_mps
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
return super().test_attention_slicing_forward_pass()
@slow
@require_torch_gpu
class a ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase_ ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase_ ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
__lowercase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/cycle-diffusion/black_colored_car.png''' )
__lowercase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car_fp16.npy''' )
__lowercase = init_image.resize((512, 512) )
__lowercase = '''CompVis/stable-diffusion-v1-4'''
__lowercase = DDIMScheduler.from_pretrained(lowerCamelCase__ , subfolder='''scheduler''' )
__lowercase = CycleDiffusionPipeline.from_pretrained(
lowerCamelCase__ , scheduler=lowerCamelCase__ , safety_checker=lowerCamelCase__ , torch_dtype=torch.floataa , revision='''fp16''' )
pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
pipe.enable_attention_slicing()
__lowercase = '''A black colored car'''
__lowercase = '''A blue colored car'''
__lowercase = torch.manual_seed(0 )
__lowercase = pipe(
prompt=lowerCamelCase__ , source_prompt=lowerCamelCase__ , image=lowerCamelCase__ , num_inference_steps=100 , eta=0.1 , strength=0.8_5 , guidance_scale=3 , source_guidance_scale=1 , generator=lowerCamelCase__ , output_type='''np''' , )
__lowercase = output.images
# the values aren't exactly equal, but the images look the same visually
assert np.abs(image - expected_image ).max() < 5e-1
def UpperCAmelCase_ ( self : Tuple ) -> List[str]:
"""simple docstring"""
__lowercase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/cycle-diffusion/black_colored_car.png''' )
__lowercase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car.npy''' )
__lowercase = init_image.resize((512, 512) )
__lowercase = '''CompVis/stable-diffusion-v1-4'''
__lowercase = DDIMScheduler.from_pretrained(lowerCamelCase__ , subfolder='''scheduler''' )
__lowercase = CycleDiffusionPipeline.from_pretrained(lowerCamelCase__ , scheduler=lowerCamelCase__ , safety_checker=lowerCamelCase__ )
pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
pipe.enable_attention_slicing()
__lowercase = '''A black colored car'''
__lowercase = '''A blue colored car'''
__lowercase = torch.manual_seed(0 )
__lowercase = pipe(
prompt=lowerCamelCase__ , source_prompt=lowerCamelCase__ , image=lowerCamelCase__ , num_inference_steps=100 , eta=0.1 , strength=0.8_5 , guidance_scale=3 , source_guidance_scale=1 , generator=lowerCamelCase__ , output_type='''np''' , )
__lowercase = output.images
assert np.abs(image - expected_image ).max() < 2e-2
| 362 | 0 |
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConfig,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaForPreTraining,
WavaVecaProcessor,
logging,
)
from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification
logging.set_verbosity_info()
_a : List[Any] = logging.get_logger(__name__)
_a : str = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'adapter_layer': 'encoder.layers.*.adapter_layer',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
'pooling_layer.linear': 'projector',
'pooling_layer.projection': 'classifier',
}
_a : Tuple = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
'projector',
'classifier',
]
def lowerCamelCase__ ( SCREAMING_SNAKE_CASE : Any ):
UpperCAmelCase = {}
with open(__snake_case , 'r' ) as file:
for line_number, line in enumerate(__snake_case ):
UpperCAmelCase = line.strip()
if line:
UpperCAmelCase = line.split()
UpperCAmelCase = line_number
UpperCAmelCase = words[0]
UpperCAmelCase = value
return result
def lowerCamelCase__ ( SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : str ):
for attribute in key.split('.' ):
UpperCAmelCase = getattr(__snake_case , __snake_case )
UpperCAmelCase = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(__snake_case ):
UpperCAmelCase = PARAM_MAPPING[full_name.split('.' )[-1]]
UpperCAmelCase = 'param'
if weight_type is not None and weight_type != "param":
UpperCAmelCase = getattr(__snake_case , __snake_case ).shape
elif weight_type is not None and weight_type == "param":
UpperCAmelCase = hf_pointer
for attribute in hf_param_name.split('.' ):
UpperCAmelCase = getattr(__snake_case , __snake_case )
UpperCAmelCase = shape_pointer.shape
# let's reduce dimension
UpperCAmelCase = value[0]
else:
UpperCAmelCase = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}''' )
if weight_type == "weight":
UpperCAmelCase = value
elif weight_type == "weight_g":
UpperCAmelCase = value
elif weight_type == "weight_v":
UpperCAmelCase = value
elif weight_type == "bias":
UpperCAmelCase = value
elif weight_type == "param":
for attribute in hf_param_name.split('.' ):
UpperCAmelCase = getattr(__snake_case , __snake_case )
UpperCAmelCase = value
else:
UpperCAmelCase = value
logger.info(f'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' )
def lowerCamelCase__ ( SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Dict ):
UpperCAmelCase = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(__snake_case ):
UpperCAmelCase = PARAM_MAPPING[full_name.split('.' )[-1]]
UpperCAmelCase = 'param'
if weight_type is not None and weight_type != "param":
UpperCAmelCase = '.'.join([key, weight_type] )
elif weight_type is not None and weight_type == "param":
UpperCAmelCase = '.'.join([key, hf_param_name] )
else:
UpperCAmelCase = key
UpperCAmelCase = value if 'lm_head' in full_key else value[0]
_a : List[str] = {
'W_a': 'linear_1.weight',
'W_b': 'linear_2.weight',
'b_a': 'linear_1.bias',
'b_b': 'linear_2.bias',
'ln_W': 'norm.weight',
'ln_b': 'norm.bias',
}
def lowerCamelCase__ ( SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Tuple=None , SCREAMING_SNAKE_CASE : int=None ):
UpperCAmelCase = False
for key, mapped_key in MAPPING.items():
UpperCAmelCase = 'wav2vec2.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
UpperCAmelCase = True
if "*" in mapped_key:
UpperCAmelCase = name.split(__snake_case )[0].split('.' )[-2]
UpperCAmelCase = mapped_key.replace('*' , __snake_case )
if "weight_g" in name:
UpperCAmelCase = 'weight_g'
elif "weight_v" in name:
UpperCAmelCase = 'weight_v'
elif "bias" in name:
UpperCAmelCase = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
UpperCAmelCase = 'weight'
else:
UpperCAmelCase = None
if hf_dict is not None:
rename_dict(__snake_case , __snake_case , __snake_case , __snake_case , __snake_case )
else:
set_recursively(__snake_case , __snake_case , __snake_case , __snake_case , __snake_case )
return is_used
return is_used
def lowerCamelCase__ ( SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Optional[Any] ):
UpperCAmelCase = []
UpperCAmelCase = fairseq_model.state_dict()
UpperCAmelCase = hf_model.wavaveca.feature_extractor
for name, value in fairseq_dict.items():
UpperCAmelCase = False
if "conv_layers" in name:
load_conv_layer(
__snake_case , __snake_case , __snake_case , __snake_case , hf_model.config.feat_extract_norm == 'group' , )
UpperCAmelCase = True
else:
UpperCAmelCase = load_wavaveca_layer(__snake_case , __snake_case , __snake_case )
if not is_used:
unused_weights.append(__snake_case )
logger.warning(f'''Unused weights: {unused_weights}''' )
def lowerCamelCase__ ( SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : int ):
UpperCAmelCase = full_name.split('conv_layers.' )[-1]
UpperCAmelCase = name.split('.' )
UpperCAmelCase = int(items[0] )
UpperCAmelCase = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' )
UpperCAmelCase = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' )
UpperCAmelCase = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.''' )
UpperCAmelCase = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.''' )
UpperCAmelCase = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(__snake_case )
@torch.no_grad()
def lowerCamelCase__ ( SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Union[str, Any]=None , SCREAMING_SNAKE_CASE : Tuple=None , SCREAMING_SNAKE_CASE : Tuple=True , SCREAMING_SNAKE_CASE : Union[str, Any]=False ):
if config_path is not None:
UpperCAmelCase = WavaVecaConfig.from_pretrained(__snake_case )
else:
UpperCAmelCase = WavaVecaConfig()
if is_seq_class:
UpperCAmelCase = read_txt_into_dict(__snake_case )
UpperCAmelCase = idalabel
UpperCAmelCase = WavaVecaForSequenceClassification(__snake_case )
UpperCAmelCase = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=__snake_case , return_attention_mask=__snake_case , )
feature_extractor.save_pretrained(__snake_case )
elif is_finetuned:
if dict_path:
UpperCAmelCase = Dictionary.load(__snake_case )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
UpperCAmelCase = target_dict.pad_index
UpperCAmelCase = target_dict.bos_index
UpperCAmelCase = target_dict.eos_index
UpperCAmelCase = len(target_dict.symbols )
UpperCAmelCase = os.path.join(__snake_case , 'vocab.json' )
if not os.path.isdir(__snake_case ):
logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(__snake_case ) )
return
os.makedirs(__snake_case , exist_ok=__snake_case )
UpperCAmelCase = target_dict.indices
# fairseq has the <pad> and <s> switched
UpperCAmelCase = 0
UpperCAmelCase = 1
with open(__snake_case , 'w' , encoding='utf-8' ) as vocab_handle:
json.dump(__snake_case , __snake_case )
UpperCAmelCase = WavaVecaCTCTokenizer(
__snake_case , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=__snake_case , )
UpperCAmelCase = True if config.feat_extract_norm == 'layer' else False
UpperCAmelCase = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=__snake_case , return_attention_mask=__snake_case , )
UpperCAmelCase = WavaVecaProcessor(feature_extractor=__snake_case , tokenizer=__snake_case )
processor.save_pretrained(__snake_case )
UpperCAmelCase = WavaVecaForCTC(__snake_case )
else:
UpperCAmelCase = WavaVecaForPreTraining(__snake_case )
if is_finetuned or is_seq_class:
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
else:
UpperCAmelCase = argparse.Namespace(task='audio_pretraining' )
UpperCAmelCase = fairseq.tasks.setup_task(__snake_case )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=__snake_case )
UpperCAmelCase = model[0].eval()
recursively_load_weights(__snake_case , __snake_case , not is_finetuned )
hf_wavavec.save_pretrained(__snake_case )
if __name__ == "__main__":
_a : Tuple = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
parser.add_argument(
'--is_seq_class',
action='store_true',
help='Whether the model to convert is a fine-tuned sequence classification model or not',
)
_a : Tuple = parser.parse_args()
_a : Any = not args.not_finetuned and not args.is_seq_class
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.dict_path,
is_finetuned,
args.is_seq_class,
)
| 447 |
import argparse
import json
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
VideoMAEConfig,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEImageProcessor,
)
def _A ( __snake_case :Dict ) -> Optional[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = VideoMAEConfig()
set_architecture_configs(__snake_case , __snake_case )
if "finetuned" not in model_name:
__SCREAMING_SNAKE_CASE = False
if "finetuned" in model_name:
__SCREAMING_SNAKE_CASE = "huggingface/label-files"
if "kinetics" in model_name:
__SCREAMING_SNAKE_CASE = 400
__SCREAMING_SNAKE_CASE = "kinetics400-id2label.json"
elif "ssv2" in model_name:
__SCREAMING_SNAKE_CASE = 174
__SCREAMING_SNAKE_CASE = "something-something-v2-id2label.json"
else:
raise ValueError("Model name should either contain 'kinetics' or 'ssv2' in case it's fine-tuned." )
__SCREAMING_SNAKE_CASE = json.load(open(hf_hub_download(__snake_case , __snake_case , repo_type="dataset" ) , "r" ) )
__SCREAMING_SNAKE_CASE = {int(__snake_case ): v for k, v in idalabel.items()}
__SCREAMING_SNAKE_CASE = idalabel
__SCREAMING_SNAKE_CASE = {v: k for k, v in idalabel.items()}
return config
def _A ( __snake_case :Dict , __snake_case :Optional[Any] ) -> List[Any]:
"""simple docstring"""
if "small" in model_name:
__SCREAMING_SNAKE_CASE = 384
__SCREAMING_SNAKE_CASE = 1536
__SCREAMING_SNAKE_CASE = 12
__SCREAMING_SNAKE_CASE = 16
__SCREAMING_SNAKE_CASE = 12
__SCREAMING_SNAKE_CASE = 3
__SCREAMING_SNAKE_CASE = 192
__SCREAMING_SNAKE_CASE = 768
elif "large" in model_name:
__SCREAMING_SNAKE_CASE = 1024
__SCREAMING_SNAKE_CASE = 4096
__SCREAMING_SNAKE_CASE = 24
__SCREAMING_SNAKE_CASE = 16
__SCREAMING_SNAKE_CASE = 12
__SCREAMING_SNAKE_CASE = 8
__SCREAMING_SNAKE_CASE = 512
__SCREAMING_SNAKE_CASE = 2048
elif "huge" in model_name:
__SCREAMING_SNAKE_CASE = 1280
__SCREAMING_SNAKE_CASE = 5120
__SCREAMING_SNAKE_CASE = 32
__SCREAMING_SNAKE_CASE = 16
__SCREAMING_SNAKE_CASE = 12
__SCREAMING_SNAKE_CASE = 8
__SCREAMING_SNAKE_CASE = 640
__SCREAMING_SNAKE_CASE = 2560
elif "base" not in model_name:
raise ValueError("Model name should include either \"small\", \"base\", \"large\", or \"huge\"" )
def _A ( __snake_case :List[Any] ) -> Optional[int]:
"""simple docstring"""
if "encoder." in name:
__SCREAMING_SNAKE_CASE = name.replace("encoder." , "" )
if "cls_token" in name:
__SCREAMING_SNAKE_CASE = name.replace("cls_token" , "videomae.embeddings.cls_token" )
if "decoder_pos_embed" in name:
__SCREAMING_SNAKE_CASE = name.replace("decoder_pos_embed" , "decoder.decoder_pos_embed" )
if "pos_embed" in name and "decoder" not in name:
__SCREAMING_SNAKE_CASE = name.replace("pos_embed" , "videomae.embeddings.position_embeddings" )
if "patch_embed.proj" in name:
__SCREAMING_SNAKE_CASE = name.replace("patch_embed.proj" , "videomae.embeddings.patch_embeddings.projection" )
if "patch_embed.norm" in name:
__SCREAMING_SNAKE_CASE = name.replace("patch_embed.norm" , "videomae.embeddings.norm" )
if "decoder.blocks" in name:
__SCREAMING_SNAKE_CASE = name.replace("decoder.blocks" , "decoder.decoder_layers" )
if "blocks" in name:
__SCREAMING_SNAKE_CASE = name.replace("blocks" , "videomae.encoder.layer" )
if "attn.proj" in name:
__SCREAMING_SNAKE_CASE = name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name and "bias" not in name:
__SCREAMING_SNAKE_CASE = name.replace("attn" , "attention.self" )
if "attn" in name:
__SCREAMING_SNAKE_CASE = name.replace("attn" , "attention.attention" )
if "norm1" in name:
__SCREAMING_SNAKE_CASE = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
__SCREAMING_SNAKE_CASE = name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
__SCREAMING_SNAKE_CASE = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
__SCREAMING_SNAKE_CASE = name.replace("mlp.fc2" , "output.dense" )
if "decoder_embed" in name:
__SCREAMING_SNAKE_CASE = name.replace("decoder_embed" , "decoder.decoder_embed" )
if "decoder_norm" in name:
__SCREAMING_SNAKE_CASE = name.replace("decoder_norm" , "decoder.decoder_norm" )
if "decoder_pred" in name:
__SCREAMING_SNAKE_CASE = name.replace("decoder_pred" , "decoder.decoder_pred" )
if "norm.weight" in name and "decoder" not in name and "fc" not in name:
__SCREAMING_SNAKE_CASE = name.replace("norm.weight" , "videomae.layernorm.weight" )
if "norm.bias" in name and "decoder" not in name and "fc" not in name:
__SCREAMING_SNAKE_CASE = name.replace("norm.bias" , "videomae.layernorm.bias" )
if "head" in name and "decoder" not in name:
__SCREAMING_SNAKE_CASE = name.replace("head" , "classifier" )
return name
def _A ( __snake_case :Union[str, Any] , __snake_case :Optional[int] ) -> Optional[Any]:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
__SCREAMING_SNAKE_CASE = orig_state_dict.pop(__snake_case )
if key.startswith("encoder." ):
__SCREAMING_SNAKE_CASE = key.replace("encoder." , "" )
if "qkv" in key:
__SCREAMING_SNAKE_CASE = key.split("." )
if key.startswith("decoder.blocks" ):
__SCREAMING_SNAKE_CASE = config.decoder_hidden_size
__SCREAMING_SNAKE_CASE = int(key_split[2] )
__SCREAMING_SNAKE_CASE = "decoder.decoder_layers."
if "weight" in key:
__SCREAMING_SNAKE_CASE = val[:dim, :]
__SCREAMING_SNAKE_CASE = val[dim : dim * 2, :]
__SCREAMING_SNAKE_CASE = val[-dim:, :]
else:
__SCREAMING_SNAKE_CASE = config.hidden_size
__SCREAMING_SNAKE_CASE = int(key_split[1] )
__SCREAMING_SNAKE_CASE = "videomae.encoder.layer."
if "weight" in key:
__SCREAMING_SNAKE_CASE = val[:dim, :]
__SCREAMING_SNAKE_CASE = val[dim : dim * 2, :]
__SCREAMING_SNAKE_CASE = val[-dim:, :]
else:
__SCREAMING_SNAKE_CASE = val
return orig_state_dict
def _A ( ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = hf_hub_download(
repo_id="hf-internal-testing/spaghetti-video" , filename="eating_spaghetti.npy" , repo_type="dataset" )
__SCREAMING_SNAKE_CASE = np.load(__snake_case )
return list(__snake_case )
def _A ( __snake_case :Optional[int] , __snake_case :List[str] , __snake_case :Union[str, Any] , __snake_case :Optional[Any] ) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = get_videomae_config(__snake_case )
if "finetuned" in model_name:
__SCREAMING_SNAKE_CASE = VideoMAEForVideoClassification(__snake_case )
else:
__SCREAMING_SNAKE_CASE = VideoMAEForPreTraining(__snake_case )
# download original checkpoint, hosted on Google Drive
__SCREAMING_SNAKE_CASE = "pytorch_model.bin"
gdown.cached_download(__snake_case , __snake_case , quiet=__snake_case )
__SCREAMING_SNAKE_CASE = torch.load(__snake_case , map_location="cpu" )
if "model" in files:
__SCREAMING_SNAKE_CASE = files["model"]
else:
__SCREAMING_SNAKE_CASE = files["module"]
__SCREAMING_SNAKE_CASE = convert_state_dict(__snake_case , __snake_case )
model.load_state_dict(__snake_case )
model.eval()
# verify model on basic input
__SCREAMING_SNAKE_CASE = VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
__SCREAMING_SNAKE_CASE = prepare_video()
__SCREAMING_SNAKE_CASE = image_processor(__snake_case , return_tensors="pt" )
if "finetuned" not in model_name:
__SCREAMING_SNAKE_CASE = hf_hub_download(repo_id="hf-internal-testing/bool-masked-pos" , filename="bool_masked_pos.pt" )
__SCREAMING_SNAKE_CASE = torch.load(__snake_case )
__SCREAMING_SNAKE_CASE = model(**__snake_case )
__SCREAMING_SNAKE_CASE = outputs.logits
__SCREAMING_SNAKE_CASE = [
"videomae-small-finetuned-kinetics",
"videomae-small-finetuned-ssv2",
# Kinetics-400 checkpoints (short = pretrained only for 800 epochs instead of 1600)
"videomae-base-short",
"videomae-base-short-finetuned-kinetics",
"videomae-base",
"videomae-base-finetuned-kinetics",
"videomae-large",
"videomae-large-finetuned-kinetics",
"videomae-huge-finetuned-kinetics",
# Something-Something-v2 checkpoints (short = pretrained only for 800 epochs instead of 2400)
"videomae-base-short-ssv2",
"videomae-base-short-finetuned-ssv2",
"videomae-base-ssv2",
"videomae-base-finetuned-ssv2",
]
# NOTE: logits were tested with image_mean and image_std equal to [0.5, 0.5, 0.5] and [0.5, 0.5, 0.5]
if model_name == "videomae-small-finetuned-kinetics":
__SCREAMING_SNAKE_CASE = torch.Size([1, 400] )
__SCREAMING_SNAKE_CASE = torch.tensor([-0.9_2_9_1, -0.4_0_6_1, -0.9_3_0_7] )
elif model_name == "videomae-small-finetuned-ssv2":
__SCREAMING_SNAKE_CASE = torch.Size([1, 174] )
__SCREAMING_SNAKE_CASE = torch.tensor([0.2_6_7_1, -0.4_6_8_9, -0.8_2_3_5] )
elif model_name == "videomae-base":
__SCREAMING_SNAKE_CASE = torch.Size([1, 1408, 1536] )
__SCREAMING_SNAKE_CASE = torch.tensor([[0.7_7_3_9, 0.7_9_6_8, 0.7_0_8_9], [0.6_7_0_1, 0.7_4_8_7, 0.6_2_0_9], [0.4_2_8_7, 0.5_1_5_8, 0.4_7_7_3]] )
elif model_name == "videomae-base-short":
__SCREAMING_SNAKE_CASE = torch.Size([1, 1408, 1536] )
__SCREAMING_SNAKE_CASE = torch.tensor([[0.7_9_9_4, 0.9_6_1_2, 0.8_5_0_8], [0.7_4_0_1, 0.8_9_5_8, 0.8_3_0_2], [0.5_8_6_2, 0.7_4_6_8, 0.7_3_2_5]] )
# we verified the loss both for normalized and unnormalized targets for this one
__SCREAMING_SNAKE_CASE = torch.tensor([0.5_1_4_2] ) if config.norm_pix_loss else torch.tensor([0.6_4_6_9] )
elif model_name == "videomae-large":
__SCREAMING_SNAKE_CASE = torch.Size([1, 1408, 1536] )
__SCREAMING_SNAKE_CASE = torch.tensor([[0.7_1_4_9, 0.7_9_9_7, 0.6_9_6_6], [0.6_7_6_8, 0.7_8_6_9, 0.6_9_4_8], [0.5_1_3_9, 0.6_2_2_1, 0.5_6_0_5]] )
elif model_name == "videomae-large-finetuned-kinetics":
__SCREAMING_SNAKE_CASE = torch.Size([1, 400] )
__SCREAMING_SNAKE_CASE = torch.tensor([0.0_7_7_1, 0.0_0_1_1, -0.3_6_2_5] )
elif model_name == "videomae-huge-finetuned-kinetics":
__SCREAMING_SNAKE_CASE = torch.Size([1, 400] )
__SCREAMING_SNAKE_CASE = torch.tensor([0.2_4_3_3, 0.1_6_3_2, -0.4_8_9_4] )
elif model_name == "videomae-base-short-finetuned-kinetics":
__SCREAMING_SNAKE_CASE = torch.Size([1, 400] )
__SCREAMING_SNAKE_CASE = torch.tensor([0.6_5_8_8, 0.0_9_9_0, -0.2_4_9_3] )
elif model_name == "videomae-base-finetuned-kinetics":
__SCREAMING_SNAKE_CASE = torch.Size([1, 400] )
__SCREAMING_SNAKE_CASE = torch.tensor([0.3_6_6_9, -0.0_6_8_8, -0.2_4_2_1] )
elif model_name == "videomae-base-short-ssv2":
__SCREAMING_SNAKE_CASE = torch.Size([1, 1408, 1536] )
__SCREAMING_SNAKE_CASE = torch.tensor([[0.4_7_1_2, 0.5_2_9_6, 0.5_7_8_6], [0.2_2_7_8, 0.2_7_2_9, 0.4_0_2_6], [0.0_3_5_2, 0.0_7_3_0, 0.2_5_0_6]] )
elif model_name == "videomae-base-short-finetuned-ssv2":
__SCREAMING_SNAKE_CASE = torch.Size([1, 174] )
__SCREAMING_SNAKE_CASE = torch.tensor([-0.0_5_3_7, -0.1_5_3_9, -0.3_2_6_6] )
elif model_name == "videomae-base-ssv2":
__SCREAMING_SNAKE_CASE = torch.Size([1, 1408, 1536] )
__SCREAMING_SNAKE_CASE = torch.tensor([[0.8_1_3_1, 0.8_7_2_7, 0.8_5_4_6], [0.7_3_6_6, 0.9_3_7_7, 0.8_8_7_0], [0.5_9_3_5, 0.8_8_7_4, 0.8_5_6_4]] )
elif model_name == "videomae-base-finetuned-ssv2":
__SCREAMING_SNAKE_CASE = torch.Size([1, 174] )
__SCREAMING_SNAKE_CASE = torch.tensor([0.1_9_6_1, -0.8_3_3_7, -0.6_3_8_9] )
else:
raise ValueError(f'''Model name not supported. Should be one of {model_names}''' )
# verify logits
assert logits.shape == expected_shape
if "finetuned" in model_name:
assert torch.allclose(logits[0, :3] , __snake_case , atol=1e-4 )
else:
print("Logits:" , logits[0, :3, :3] )
assert torch.allclose(logits[0, :3, :3] , __snake_case , atol=1e-4 )
print("Logits ok!" )
# verify loss, if applicable
if model_name == "videomae-base-short":
__SCREAMING_SNAKE_CASE = outputs.loss
assert torch.allclose(__snake_case , __snake_case , atol=1e-4 )
print("Loss ok!" )
if pytorch_dump_folder_path is not None:
print(f'''Saving model and image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__snake_case )
model.save_pretrained(__snake_case )
if push_to_hub:
print("Pushing to the hub..." )
model.push_to_hub(__snake_case , organization="nielsr" )
if __name__ == "__main__":
_snake_case : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://drive.google.com/u/1/uc?id=1tEhLyskjb755TJ65ptsrafUG2llSwQE1&export=download&confirm=t&uuid=aa3276eb-fb7e-482a-adec-dc7171df14c4',
type=str,
help=(
'URL of the original PyTorch checkpoint (on Google Drive) you\'d like to convert. Should be a direct'
' download link.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='/Users/nielsrogge/Documents/VideoMAE/Test',
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--model_name', default='videomae-base', type=str, help='Name of the model.')
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
_snake_case : Optional[int] = parser.parse_args()
convert_videomae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 693 | 0 |
"""simple docstring"""
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class UpperCamelCase_ ( a_ ):
_A : Union[str, Any] = ['image_processor', 'tokenizer']
_A : Dict = 'AutoImageProcessor'
_A : int = 'AutoTokenizer'
def __init__( self , snake_case__ , snake_case__ ) -> Optional[int]:
"""simple docstring"""
super().__init__(snake_case__ , snake_case__ )
UpperCAmelCase = self.image_processor
def __call__( self , snake_case__=None , snake_case__=None , snake_case__=None , **snake_case__ ) -> str:
"""simple docstring"""
if text is None and images is None:
raise ValueError("""You have to specify either text or images. Both cannot be none.""" )
if text is not None:
UpperCAmelCase = self.tokenizer(snake_case__ , return_tensors=snake_case__ , **snake_case__ )
if images is not None:
UpperCAmelCase = self.image_processor(snake_case__ , return_tensors=snake_case__ , **snake_case__ )
if text is not None and images is not None:
UpperCAmelCase = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**snake_case__ ) , tensor_type=snake_case__ )
def UpperCamelCase_ ( self , *snake_case__ , **snake_case__ ) -> List[Any]:
"""simple docstring"""
return self.tokenizer.batch_decode(*snake_case__ , **snake_case__ )
def UpperCamelCase_ ( self , *snake_case__ , **snake_case__ ) -> Optional[int]:
"""simple docstring"""
return self.tokenizer.decode(*snake_case__ , **snake_case__ )
@property
def UpperCamelCase_ ( self ) -> Optional[int]:
"""simple docstring"""
return ["input_ids", "attention_mask", "pixel_values"]
| 705 |
"""simple docstring"""
import tempfile
import numpy as np
import torch
from transformers import AutoTokenizer, TaEncoderModel
from diffusers import DDPMScheduler, UNetaDConditionModel
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.pipelines.deepfloyd_if import IFWatermarker
from diffusers.utils.testing_utils import torch_device
from ..test_pipelines_common import to_np
class UpperCamelCase_ :
def UpperCamelCase_ ( self ) -> Any:
"""simple docstring"""
torch.manual_seed(0 )
UpperCAmelCase = TaEncoderModel.from_pretrained("""hf-internal-testing/tiny-random-t5""" )
torch.manual_seed(0 )
UpperCAmelCase = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-t5""" )
torch.manual_seed(0 )
UpperCAmelCase = UNetaDConditionModel(
sample_size=32 , layers_per_block=1 , block_out_channels=[32, 64] , down_block_types=[
"""ResnetDownsampleBlock2D""",
"""SimpleCrossAttnDownBlock2D""",
] , mid_block_type="""UNetMidBlock2DSimpleCrossAttn""" , up_block_types=["""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""] , in_channels=3 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type="""text""" , addition_embed_type_num_heads=2 , cross_attention_norm="""group_norm""" , resnet_time_scale_shift="""scale_shift""" , act_fn="""gelu""" , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
UpperCAmelCase = DDPMScheduler(
num_train_timesteps=10_00 , beta_schedule="""squaredcos_cap_v2""" , beta_start=0.0_001 , beta_end=0.02 , thresholding=snake_case__ , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type="""epsilon""" , variance_type="""learned_range""" , )
torch.manual_seed(0 )
UpperCAmelCase = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def UpperCamelCase_ ( self ) -> int:
"""simple docstring"""
torch.manual_seed(0 )
UpperCAmelCase = TaEncoderModel.from_pretrained("""hf-internal-testing/tiny-random-t5""" )
torch.manual_seed(0 )
UpperCAmelCase = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-t5""" )
torch.manual_seed(0 )
UpperCAmelCase = UNetaDConditionModel(
sample_size=32 , layers_per_block=[1, 2] , block_out_channels=[32, 64] , down_block_types=[
"""ResnetDownsampleBlock2D""",
"""SimpleCrossAttnDownBlock2D""",
] , mid_block_type="""UNetMidBlock2DSimpleCrossAttn""" , up_block_types=["""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""] , in_channels=6 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type="""text""" , addition_embed_type_num_heads=2 , cross_attention_norm="""group_norm""" , resnet_time_scale_shift="""scale_shift""" , act_fn="""gelu""" , class_embed_type="""timestep""" , mid_block_scale_factor=1.414 , time_embedding_act_fn="""gelu""" , time_embedding_dim=32 , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
UpperCAmelCase = DDPMScheduler(
num_train_timesteps=10_00 , beta_schedule="""squaredcos_cap_v2""" , beta_start=0.0_001 , beta_end=0.02 , thresholding=snake_case__ , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type="""epsilon""" , variance_type="""learned_range""" , )
torch.manual_seed(0 )
UpperCAmelCase = DDPMScheduler(
num_train_timesteps=10_00 , beta_schedule="""squaredcos_cap_v2""" , beta_start=0.0_001 , beta_end=0.02 , )
torch.manual_seed(0 )
UpperCAmelCase = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"image_noising_scheduler": image_noising_scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def UpperCamelCase_ ( self ) -> Tuple:
"""simple docstring"""
UpperCAmelCase = self.get_dummy_components()
UpperCAmelCase = self.pipeline_class(**snake_case__ )
pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
UpperCAmelCase = self.get_dummy_inputs(snake_case__ )
UpperCAmelCase = inputs["""prompt"""]
UpperCAmelCase = inputs["""generator"""]
UpperCAmelCase = inputs["""num_inference_steps"""]
UpperCAmelCase = inputs["""output_type"""]
if "image" in inputs:
UpperCAmelCase = inputs["""image"""]
else:
UpperCAmelCase = None
if "mask_image" in inputs:
UpperCAmelCase = inputs["""mask_image"""]
else:
UpperCAmelCase = None
if "original_image" in inputs:
UpperCAmelCase = inputs["""original_image"""]
else:
UpperCAmelCase = None
UpperCAmelCase , UpperCAmelCase = pipe.encode_prompt(snake_case__ )
# inputs with prompt converted to embeddings
UpperCAmelCase = {
"""prompt_embeds""": prompt_embeds,
"""negative_prompt_embeds""": negative_prompt_embeds,
"""generator""": generator,
"""num_inference_steps""": num_inference_steps,
"""output_type""": output_type,
}
if image is not None:
UpperCAmelCase = image
if mask_image is not None:
UpperCAmelCase = mask_image
if original_image is not None:
UpperCAmelCase = original_image
# set all optional components to None
for optional_component in pipe._optional_components:
setattr(snake_case__ , snake_case__ , snake_case__ )
UpperCAmelCase = pipe(**snake_case__ )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(snake_case__ )
UpperCAmelCase = self.pipeline_class.from_pretrained(snake_case__ )
pipe_loaded.to(snake_case__ )
pipe_loaded.set_progress_bar_config(disable=snake_case__ )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(snake_case__ , snake_case__ ) is None , f'''`{optional_component}` did not stay set to None after loading.''' , )
UpperCAmelCase = self.get_dummy_inputs(snake_case__ )
UpperCAmelCase = inputs["""generator"""]
UpperCAmelCase = inputs["""num_inference_steps"""]
UpperCAmelCase = inputs["""output_type"""]
# inputs with prompt converted to embeddings
UpperCAmelCase = {
"""prompt_embeds""": prompt_embeds,
"""negative_prompt_embeds""": negative_prompt_embeds,
"""generator""": generator,
"""num_inference_steps""": num_inference_steps,
"""output_type""": output_type,
}
if image is not None:
UpperCAmelCase = image
if mask_image is not None:
UpperCAmelCase = mask_image
if original_image is not None:
UpperCAmelCase = original_image
UpperCAmelCase = pipe_loaded(**snake_case__ )[0]
UpperCAmelCase = np.abs(to_np(snake_case__ ) - to_np(snake_case__ ) ).max()
self.assertLess(snake_case__ , 1e-4 )
def UpperCamelCase_ ( self ) -> int:
"""simple docstring"""
UpperCAmelCase = self.get_dummy_components()
UpperCAmelCase = self.pipeline_class(**snake_case__ )
pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
UpperCAmelCase = self.get_dummy_inputs(snake_case__ )
UpperCAmelCase = pipe(**snake_case__ )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(snake_case__ )
UpperCAmelCase = self.pipeline_class.from_pretrained(snake_case__ )
pipe_loaded.to(snake_case__ )
pipe_loaded.set_progress_bar_config(disable=snake_case__ )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
UpperCAmelCase = self.get_dummy_inputs(snake_case__ )
UpperCAmelCase = pipe_loaded(**snake_case__ )[0]
UpperCAmelCase = np.abs(to_np(snake_case__ ) - to_np(snake_case__ ) ).max()
self.assertLess(snake_case__ , 1e-4 )
| 378 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowercase : Dict = logging.get_logger(__name__)
__lowercase : int = {
'''microsoft/biogpt''': '''https://huggingface.co/microsoft/biogpt/resolve/main/config.json''',
# See all BioGPT models at https://huggingface.co/models?filter=biogpt
}
class __lowercase ( __lowerCamelCase ):
lowerCamelCase : Any = """biogpt"""
def __init__(self , A=4_2_3_8_4 , A=1_0_2_4 , A=2_4 , A=1_6 , A=4_0_9_6 , A="gelu" , A=0.1 , A=0.1 , A=1_0_2_4 , A=0.02 , A=1E-12 , A=True , A=True , A=0.0 , A=0.0 , A=1 , A=0 , A=2 , **A , ):
lowerCamelCase_ : Optional[Any] = vocab_size
lowerCamelCase_ : Tuple = max_position_embeddings
lowerCamelCase_ : int = hidden_size
lowerCamelCase_ : List[str] = num_hidden_layers
lowerCamelCase_ : Tuple = num_attention_heads
lowerCamelCase_ : List[str] = intermediate_size
lowerCamelCase_ : Any = hidden_act
lowerCamelCase_ : Dict = hidden_dropout_prob
lowerCamelCase_ : int = attention_probs_dropout_prob
lowerCamelCase_ : Dict = initializer_range
lowerCamelCase_ : str = layer_norm_eps
lowerCamelCase_ : str = scale_embedding
lowerCamelCase_ : int = use_cache
lowerCamelCase_ : List[Any] = layerdrop
lowerCamelCase_ : Optional[Any] = activation_dropout
super().__init__(pad_token_id=A , bos_token_id=A , eos_token_id=A , **A )
| 422 |
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
)
@flax.struct.dataclass
class UpperCAmelCase ( __lowerCamelCase ):
a__: jnp.ndarray
a__: jnp.ndarray
class UpperCAmelCase ( nn.Module ):
a__: int
a__: Tuple[int] = (16, 32, 96, 2_56)
a__: jnp.dtype = jnp.floataa
def _lowerCAmelCase ( self : List[Any] ):
lowercase : str = nn.Conv(
self.block_out_channels[0] , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
lowercase : Union[str, Any] = []
for i in range(len(self.block_out_channels ) - 1 ):
lowercase : List[Any] = self.block_out_channels[i]
lowercase : int = self.block_out_channels[i + 1]
lowercase : List[Any] = nn.Conv(
lowerCAmelCase , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(lowerCAmelCase )
lowercase : int = nn.Conv(
lowerCAmelCase , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(lowerCAmelCase )
lowercase : List[Any] = blocks
lowercase : Optional[Any] = nn.Conv(
self.conditioning_embedding_channels , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self : Optional[Any] , lowerCAmelCase : List[Any] ):
lowercase : Union[str, Any] = self.conv_in(lowerCAmelCase )
lowercase : Tuple = nn.silu(lowerCAmelCase )
for block in self.blocks:
lowercase : Optional[int] = block(lowerCAmelCase )
lowercase : Optional[int] = nn.silu(lowerCAmelCase )
lowercase : Tuple = self.conv_out(lowerCAmelCase )
return embedding
@flax_register_to_config
class UpperCAmelCase ( nn.Module , __lowerCamelCase , __lowerCamelCase ):
a__: int = 32
a__: int = 4
a__: Tuple[str] = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
a__: Union[bool, Tuple[bool]] = False
a__: Tuple[int] = (3_20, 6_40, 12_80, 12_80)
a__: int = 2
a__: Union[int, Tuple[int]] = 8
a__: Optional[Union[int, Tuple[int]]] = None
a__: int = 12_80
a__: float = 0.0
a__: bool = False
a__: jnp.dtype = jnp.floataa
a__: bool = True
a__: int = 0
a__: str = "rgb"
a__: Tuple[int] = (16, 32, 96, 2_56)
def _lowerCAmelCase ( self : Optional[int] , lowerCAmelCase : jax.random.KeyArray ):
# init input tensors
lowercase : str = (1, self.in_channels, self.sample_size, self.sample_size)
lowercase : Optional[Any] = jnp.zeros(lowerCAmelCase , dtype=jnp.floataa )
lowercase : Tuple = jnp.ones((1,) , dtype=jnp.intaa )
lowercase : int = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
lowercase : Union[str, Any] = (1, 3, self.sample_size * 8, self.sample_size * 8)
lowercase : Optional[Any] = jnp.zeros(lowerCAmelCase , dtype=jnp.floataa )
lowercase , lowercase : List[str] = jax.random.split(lowerCAmelCase )
lowercase : Tuple = {'''params''': params_rng, '''dropout''': dropout_rng}
return self.init(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )["params"]
def _lowerCAmelCase ( self : str ):
lowercase : Optional[int] = self.block_out_channels
lowercase : Any = block_out_channels[0] * 4
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
lowercase : Dict = self.num_attention_heads or self.attention_head_dim
# input
lowercase : List[Any] = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
lowercase : Any = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
lowercase : int = FlaxTimestepEmbedding(lowerCAmelCase , dtype=self.dtype )
lowercase : str = FlaxControlNetConditioningEmbedding(
conditioning_embedding_channels=block_out_channels[0] , block_out_channels=self.conditioning_embedding_out_channels , )
lowercase : Tuple = self.only_cross_attention
if isinstance(lowerCAmelCase , lowerCAmelCase ):
lowercase : Union[str, Any] = (only_cross_attention,) * len(self.down_block_types )
if isinstance(lowerCAmelCase , lowerCAmelCase ):
lowercase : Optional[int] = (num_attention_heads,) * len(self.down_block_types )
# down
lowercase : List[str] = []
lowercase : List[Any] = []
lowercase : List[str] = block_out_channels[0]
lowercase : int = nn.Conv(
lowerCAmelCase , kernel_size=(1, 1) , padding='''VALID''' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(lowerCAmelCase )
for i, down_block_type in enumerate(self.down_block_types ):
lowercase : List[Any] = output_channel
lowercase : str = block_out_channels[i]
lowercase : Any = i == len(lowerCAmelCase ) - 1
if down_block_type == "CrossAttnDownBlock2D":
lowercase : List[str] = FlaxCrossAttnDownBlockaD(
in_channels=lowerCAmelCase , out_channels=lowerCAmelCase , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , dtype=self.dtype , )
else:
lowercase : Optional[int] = FlaxDownBlockaD(
in_channels=lowerCAmelCase , out_channels=lowerCAmelCase , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(lowerCAmelCase )
for _ in range(self.layers_per_block ):
lowercase : List[Any] = nn.Conv(
lowerCAmelCase , kernel_size=(1, 1) , padding='''VALID''' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(lowerCAmelCase )
if not is_final_block:
lowercase : Dict = nn.Conv(
lowerCAmelCase , kernel_size=(1, 1) , padding='''VALID''' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(lowerCAmelCase )
lowercase : Optional[Any] = down_blocks
lowercase : List[str] = controlnet_down_blocks
# mid
lowercase : int = block_out_channels[-1]
lowercase : int = FlaxUNetMidBlockaDCrossAttn(
in_channels=lowerCAmelCase , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , dtype=self.dtype , )
lowercase : Optional[int] = nn.Conv(
lowerCAmelCase , kernel_size=(1, 1) , padding='''VALID''' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self : List[Any] , lowerCAmelCase : str , lowerCAmelCase : str , lowerCAmelCase : Tuple , lowerCAmelCase : Tuple , lowerCAmelCase : float = 1.0 , lowerCAmelCase : bool = True , lowerCAmelCase : bool = False , ):
lowercase : List[Any] = self.controlnet_conditioning_channel_order
if channel_order == "bgr":
lowercase : int = jnp.flip(lowerCAmelCase , axis=1 )
# 1. time
if not isinstance(lowerCAmelCase , jnp.ndarray ):
lowercase : Optional[int] = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(lowerCAmelCase , jnp.ndarray ) and len(timesteps.shape ) == 0:
lowercase : Dict = timesteps.astype(dtype=jnp.floataa )
lowercase : str = jnp.expand_dims(lowerCAmelCase , 0 )
lowercase : Union[str, Any] = self.time_proj(lowerCAmelCase )
lowercase : str = self.time_embedding(lowerCAmelCase )
# 2. pre-process
lowercase : Tuple = jnp.transpose(lowerCAmelCase , (0, 2, 3, 1) )
lowercase : Union[str, Any] = self.conv_in(lowerCAmelCase )
lowercase : Any = jnp.transpose(lowerCAmelCase , (0, 2, 3, 1) )
lowercase : Any = self.controlnet_cond_embedding(lowerCAmelCase )
sample += controlnet_cond
# 3. down
lowercase : Optional[Any] = (sample,)
for down_block in self.down_blocks:
if isinstance(lowerCAmelCase , lowerCAmelCase ):
lowercase , lowercase : str = down_block(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , deterministic=not train )
else:
lowercase , lowercase : str = down_block(lowerCAmelCase , lowerCAmelCase , deterministic=not train )
down_block_res_samples += res_samples
# 4. mid
lowercase : str = self.mid_block(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , deterministic=not train )
# 5. contronet blocks
lowercase : str = ()
for down_block_res_sample, controlnet_block in zip(lowerCAmelCase , self.controlnet_down_blocks ):
lowercase : int = controlnet_block(lowerCAmelCase )
controlnet_down_block_res_samples += (down_block_res_sample,)
lowercase : Dict = controlnet_down_block_res_samples
lowercase : int = self.controlnet_mid_block(lowerCAmelCase )
# 6. scaling
lowercase : Optional[int] = [sample * conditioning_scale for sample in down_block_res_samples]
mid_block_res_sample *= conditioning_scale
if not return_dict:
return (down_block_res_samples, mid_block_res_sample)
return FlaxControlNetOutput(
down_block_res_samples=lowerCAmelCase , mid_block_res_sample=lowerCAmelCase )
| 583 | 0 |
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class a__ ( lowerCAmelCase_ ):
@staticmethod
@abstractmethod
def UpperCAmelCase( lowerCamelCase_ : ArgumentParser ):
raise NotImplementedError()
@abstractmethod
def UpperCAmelCase( self : Optional[int] ):
raise NotImplementedError()
| 478 |
import argparse
import glob
import logging
import os
import sys
import time
from collections import defaultdict
from pathlib import Path
from typing import Dict, List, Tuple
import numpy as np
import pytorch_lightning as pl
import torch
from callbacks import SeqaSeqLoggingCallback, get_checkpoint_callback, get_early_stopping_callback
from torch import nn
from torch.utils.data import DataLoader
from transformers import MBartTokenizer, TaForConditionalGeneration
from transformers.models.bart.modeling_bart import shift_tokens_right
from utils import (
ROUGE_KEYS,
LegacySeqaSeqDataset,
SeqaSeqDataset,
assert_all_frozen,
calculate_bleu,
calculate_rouge,
check_output_dir,
flatten_list,
freeze_embeds,
freeze_params,
get_git_info,
label_smoothed_nll_loss,
lmap,
pickle_save,
save_git_info,
save_json,
use_task_specific_params,
)
# need the parent dir module
sys.path.insert(2, str(Path(__file__).resolve().parents[1]))
from lightning_base import BaseTransformer, add_generic_args, generic_train # noqa
__lowerCamelCase = logging.getLogger(__name__)
class a__ ( lowerCAmelCase_ ):
lowerCamelCase__: str = """summarization"""
lowerCamelCase__: List[str] = ["""loss"""]
lowerCamelCase__: List[str] = ROUGE_KEYS
lowerCamelCase__: Union[str, Any] = """rouge2"""
def __init__( self : Union[str, Any] , lowerCamelCase_ : Tuple , **lowerCamelCase_ : List[str] ):
if hparams.sortish_sampler and hparams.gpus > 1:
a_ : Any = False
elif hparams.max_tokens_per_batch is not None:
if hparams.gpus > 1:
raise NotImplementedError("""Dynamic Batch size does not work for multi-gpu training""" )
if hparams.sortish_sampler:
raise ValueError("""--sortish_sampler and --max_tokens_per_batch may not be used simultaneously""" )
super().__init__(lowerCamelCase_ , num_labels=lowerCamelCase_ , mode=self.mode , **lowerCamelCase_ )
use_task_specific_params(self.model , """summarization""" )
save_git_info(self.hparams.output_dir )
a_ : Any = Path(self.output_dir ) / """metrics.json"""
a_ : Tuple = Path(self.output_dir ) / """hparams.pkl"""
pickle_save(self.hparams , self.hparams_save_path )
a_ : Dict = 0
a_ : Optional[Any] = defaultdict(lowerCamelCase_ )
a_ : str = self.config.model_type
a_ : Union[str, Any] = self.config.tgt_vocab_size if self.model_type == """fsmt""" else self.config.vocab_size
a_ : dict = {
"data_dir": self.hparams.data_dir,
"max_source_length": self.hparams.max_source_length,
"prefix": self.model.config.prefix or "",
}
a_ : int = {
"""train""": self.hparams.n_train,
"""val""": self.hparams.n_val,
"""test""": self.hparams.n_test,
}
a_ : List[str] = {k: v if v >= 0 else None for k, v in n_observations_per_split.items()}
a_ : Optional[Any] = {
"""train""": self.hparams.max_target_length,
"""val""": self.hparams.val_max_target_length,
"""test""": self.hparams.test_max_target_length,
}
assert self.target_lens["train"] <= self.target_lens["val"], F'''target_lens: {self.target_lens}'''
assert self.target_lens["train"] <= self.target_lens["test"], F'''target_lens: {self.target_lens}'''
if self.hparams.freeze_embeds:
freeze_embeds(self.model )
if self.hparams.freeze_encoder:
freeze_params(self.model.get_encoder() )
assert_all_frozen(self.model.get_encoder() )
a_ : int = get_git_info()["""repo_sha"""]
a_ : Union[str, Any] = hparams.num_workers
a_ : Tuple = None # default to config
if self.model.config.decoder_start_token_id is None and isinstance(self.tokenizer , lowerCamelCase_ ):
a_ : Dict = self.tokenizer.lang_code_to_id[hparams.tgt_lang]
a_ : List[str] = self.decoder_start_token_id
a_ : Union[str, Any] = (
SeqaSeqDataset if hasattr(self.tokenizer , """prepare_seq2seq_batch""" ) else LegacySeqaSeqDataset
)
a_ : Union[str, Any] = False
a_ : Any = self.model.config.num_beams if self.hparams.eval_beams is None else self.hparams.eval_beams
if self.hparams.eval_max_gen_length is not None:
a_ : Union[str, Any] = self.hparams.eval_max_gen_length
else:
a_ : Optional[Any] = self.model.config.max_length
a_ : Optional[int] = self.default_val_metric if self.hparams.val_metric is None else self.hparams.val_metric
def UpperCAmelCase( self : List[Any] , lowerCamelCase_ : Dict[str, torch.Tensor] ):
a_ : Optional[Any] = {
k: self.tokenizer.batch_decode(v.tolist() ) if """mask""" not in k else v.shape for k, v in batch.items()
}
save_json(lowerCamelCase_ , Path(self.output_dir ) / """text_batch.json""" )
save_json({k: v.tolist() for k, v in batch.items()} , Path(self.output_dir ) / """tok_batch.json""" )
a_ : List[Any] = True
return readable_batch
def UpperCAmelCase( self : Union[str, Any] , lowerCamelCase_ : str , **lowerCamelCase_ : Tuple ):
return self.model(lowerCamelCase_ , **lowerCamelCase_ )
def UpperCAmelCase( self : Tuple , lowerCamelCase_ : List[int] ):
a_ : str = self.tokenizer.batch_decode(
lowerCamelCase_ , skip_special_tokens=lowerCamelCase_ , clean_up_tokenization_spaces=lowerCamelCase_ )
return lmap(str.strip , lowerCamelCase_ )
def UpperCAmelCase( self : Optional[int] , lowerCamelCase_ : dict ):
a_ : int = self.tokenizer.pad_token_id
a_ , a_ : Optional[int] = batch["""input_ids"""], batch["""attention_mask"""]
a_ : Optional[Any] = batch["""labels"""]
if isinstance(self.model , lowerCamelCase_ ):
a_ : List[Any] = self.model._shift_right(lowerCamelCase_ )
else:
a_ : Dict = shift_tokens_right(lowerCamelCase_ , lowerCamelCase_ )
if not self.already_saved_batch: # This would be slightly better if it only happened on rank zero
a_ : int = decoder_input_ids
self.save_readable_batch(lowerCamelCase_ )
a_ : str = self(lowerCamelCase_ , attention_mask=lowerCamelCase_ , decoder_input_ids=lowerCamelCase_ , use_cache=lowerCamelCase_ )
a_ : int = outputs["""logits"""]
if self.hparams.label_smoothing == 0:
# Same behavior as modeling_bart.py, besides ignoring pad_token_id
a_ : Any = nn.CrossEntropyLoss(ignore_index=lowerCamelCase_ )
assert lm_logits.shape[-1] == self.vocab_size
a_ : Union[str, Any] = ce_loss_fct(lm_logits.view(-1 , lm_logits.shape[-1] ) , tgt_ids.view(-1 ) )
else:
a_ : int = nn.functional.log_softmax(lowerCamelCase_ , dim=-1 )
a_ , a_ : str = label_smoothed_nll_loss(
lowerCamelCase_ , lowerCamelCase_ , self.hparams.label_smoothing , ignore_index=lowerCamelCase_ )
return (loss,)
@property
def UpperCAmelCase( self : Union[str, Any] ):
return self.tokenizer.pad_token_id
def UpperCAmelCase( self : str , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : List[Any] ):
a_ : Dict = self._step(lowerCamelCase_ )
a_ : Optional[int] = dict(zip(self.loss_names , lowerCamelCase_ ) )
# tokens per batch
a_ : str = batch["""input_ids"""].ne(self.pad ).sum() + batch["""labels"""].ne(self.pad ).sum()
a_ : Optional[int] = batch["""input_ids"""].shape[0]
a_ : int = batch["""input_ids"""].eq(self.pad ).sum()
a_ : str = batch["""input_ids"""].eq(self.pad ).float().mean()
# TODO(SS): make a wandb summary metric for this
return {"loss": loss_tensors[0], "log": logs}
def UpperCAmelCase( self : int , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Tuple ):
return self._generative_step(lowerCamelCase_ )
def UpperCAmelCase( self : Any , lowerCamelCase_ : List[str] , lowerCamelCase_ : Optional[int]="val" ):
self.step_count += 1
a_ : Optional[Any] = {k: torch.stack([x[k] for x in outputs] ).mean() for k in self.loss_names}
a_ : Tuple = losses["""loss"""]
a_ : Optional[int] = {
k: np.array([x[k] for x in outputs] ).mean() for k in self.metric_names + ["""gen_time""", """gen_len"""]
}
a_ : List[Any] = (
generative_metrics[self.val_metric] if self.val_metric in generative_metrics else losses[self.val_metric]
)
a_ : torch.FloatTensor = torch.tensor(lowerCamelCase_ ).type_as(lowerCamelCase_ )
generative_metrics.update({k: v.item() for k, v in losses.items()} )
losses.update(lowerCamelCase_ )
a_ : str = {F'''{prefix}_avg_{k}''': x for k, x in losses.items()}
a_ : Union[str, Any] = self.step_count
self.metrics[prefix].append(lowerCamelCase_ ) # callback writes this to self.metrics_save_path
a_ : List[str] = flatten_list([x["""preds"""] for x in outputs] )
return {
"log": all_metrics,
"preds": preds,
F'''{prefix}_loss''': loss,
F'''{prefix}_{self.val_metric}''': metric_tensor,
}
def UpperCAmelCase( self : Any , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Optional[Any] ):
return calculate_rouge(lowerCamelCase_ , lowerCamelCase_ )
def UpperCAmelCase( self : int , lowerCamelCase_ : dict ):
a_ : Dict = time.time()
# parser.add_argument('--eval_max_gen_length', type=int, default=None, help='never generate more than n tokens')
a_ : Any = self.model.generate(
batch["""input_ids"""] , attention_mask=batch["""attention_mask"""] , use_cache=lowerCamelCase_ , decoder_start_token_id=self.decoder_start_token_id , num_beams=self.eval_beams , max_length=self.eval_max_length , )
a_ : Optional[int] = (time.time() - ta) / batch["""input_ids"""].shape[0]
a_ : List[str] = self.ids_to_clean_text(lowerCamelCase_ )
a_ : List[str] = self.ids_to_clean_text(batch["""labels"""] )
a_ : Tuple = self._step(lowerCamelCase_ )
a_ : List[str] = dict(zip(self.loss_names , lowerCamelCase_ ) )
a_ : Dict = self.calc_generative_metrics(lowerCamelCase_ , lowerCamelCase_ )
a_ : Optional[int] = np.mean(lmap(lowerCamelCase_ , lowerCamelCase_ ) )
base_metrics.update(gen_time=lowerCamelCase_ , gen_len=lowerCamelCase_ , preds=lowerCamelCase_ , target=lowerCamelCase_ , **lowerCamelCase_ )
return base_metrics
def UpperCAmelCase( self : int , lowerCamelCase_ : str , lowerCamelCase_ : Optional[int] ):
return self._generative_step(lowerCamelCase_ )
def UpperCAmelCase( self : str , lowerCamelCase_ : Any ):
return self.validation_epoch_end(lowerCamelCase_ , prefix="""test""" )
def UpperCAmelCase( self : Any , lowerCamelCase_ : Any ):
a_ : List[str] = self.n_obs[type_path]
a_ : Dict = self.target_lens[type_path]
a_ : Optional[Any] = self.dataset_class(
self.tokenizer , type_path=lowerCamelCase_ , n_obs=lowerCamelCase_ , max_target_length=lowerCamelCase_ , **self.dataset_kwargs , )
return dataset
def UpperCAmelCase( self : Union[str, Any] , lowerCamelCase_ : str , lowerCamelCase_ : int , lowerCamelCase_ : bool = False ):
a_ : List[str] = self.get_dataset(lowerCamelCase_ )
if self.hparams.sortish_sampler and type_path != "test" and type_path != "val":
a_ : List[str] = dataset.make_sortish_sampler(lowerCamelCase_ , distributed=self.hparams.gpus > 1 )
return DataLoader(
lowerCamelCase_ , batch_size=lowerCamelCase_ , collate_fn=dataset.collate_fn , shuffle=lowerCamelCase_ , num_workers=self.num_workers , sampler=lowerCamelCase_ , )
elif self.hparams.max_tokens_per_batch is not None and type_path != "test" and type_path != "val":
a_ : int = dataset.make_dynamic_sampler(
self.hparams.max_tokens_per_batch , distributed=self.hparams.gpus > 1 )
return DataLoader(
lowerCamelCase_ , batch_sampler=lowerCamelCase_ , collate_fn=dataset.collate_fn , num_workers=self.num_workers , )
else:
return DataLoader(
lowerCamelCase_ , batch_size=lowerCamelCase_ , collate_fn=dataset.collate_fn , shuffle=lowerCamelCase_ , num_workers=self.num_workers , sampler=lowerCamelCase_ , )
def UpperCAmelCase( self : Any ):
a_ : int = self.get_dataloader("""train""" , batch_size=self.hparams.train_batch_size , shuffle=lowerCamelCase_ )
return dataloader
def UpperCAmelCase( self : Dict ):
return self.get_dataloader("""val""" , batch_size=self.hparams.eval_batch_size )
def UpperCAmelCase( self : List[Any] ):
return self.get_dataloader("""test""" , batch_size=self.hparams.eval_batch_size )
@staticmethod
def UpperCAmelCase( lowerCamelCase_ : Dict , lowerCamelCase_ : Optional[Any] ):
BaseTransformer.add_model_specific_args(lowerCamelCase_ , lowerCamelCase_ )
add_generic_args(lowerCamelCase_ , lowerCamelCase_ )
parser.add_argument(
"""--max_source_length""" , default=1_0_2_4 , type=lowerCamelCase_ , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--max_target_length""" , default=5_6 , type=lowerCamelCase_ , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--val_max_target_length""" , default=1_4_2 , type=lowerCamelCase_ , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--test_max_target_length""" , default=1_4_2 , type=lowerCamelCase_ , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument("""--freeze_encoder""" , action="""store_true""" )
parser.add_argument("""--freeze_embeds""" , action="""store_true""" )
parser.add_argument("""--sortish_sampler""" , action="""store_true""" , default=lowerCamelCase_ )
parser.add_argument("""--overwrite_output_dir""" , action="""store_true""" , default=lowerCamelCase_ )
parser.add_argument("""--max_tokens_per_batch""" , type=lowerCamelCase_ , default=lowerCamelCase_ )
parser.add_argument("""--logger_name""" , type=lowerCamelCase_ , choices=["""default""", """wandb""", """wandb_shared"""] , default="""default""" )
parser.add_argument("""--n_train""" , type=lowerCamelCase_ , default=-1 , required=lowerCamelCase_ , help="""# examples. -1 means use all.""" )
parser.add_argument("""--n_val""" , type=lowerCamelCase_ , default=5_0_0 , required=lowerCamelCase_ , help="""# examples. -1 means use all.""" )
parser.add_argument("""--n_test""" , type=lowerCamelCase_ , default=-1 , required=lowerCamelCase_ , help="""# examples. -1 means use all.""" )
parser.add_argument(
"""--task""" , type=lowerCamelCase_ , default="""summarization""" , required=lowerCamelCase_ , help="""# examples. -1 means use all.""" )
parser.add_argument("""--label_smoothing""" , type=lowerCamelCase_ , default=0.0 , required=lowerCamelCase_ )
parser.add_argument("""--src_lang""" , type=lowerCamelCase_ , default="""""" , required=lowerCamelCase_ )
parser.add_argument("""--tgt_lang""" , type=lowerCamelCase_ , default="""""" , required=lowerCamelCase_ )
parser.add_argument("""--eval_beams""" , type=lowerCamelCase_ , default=lowerCamelCase_ , required=lowerCamelCase_ )
parser.add_argument(
"""--val_metric""" , type=lowerCamelCase_ , default=lowerCamelCase_ , required=lowerCamelCase_ , choices=["""bleu""", """rouge2""", """loss""", None] )
parser.add_argument("""--eval_max_gen_length""" , type=lowerCamelCase_ , default=lowerCamelCase_ , help="""never generate more than n tokens""" )
parser.add_argument("""--save_top_k""" , type=lowerCamelCase_ , default=1 , required=lowerCamelCase_ , help="""How many checkpoints to save""" )
parser.add_argument(
"""--early_stopping_patience""" , type=lowerCamelCase_ , default=-1 , required=lowerCamelCase_ , help=(
"""-1 means never early stop. early_stopping_patience is measured in validation checks, not epochs. So"""
""" val_check_interval will effect it."""
) , )
return parser
class a__ ( lowerCAmelCase_ ):
lowerCamelCase__: List[Any] = """translation"""
lowerCamelCase__: int = ["""loss"""]
lowerCamelCase__: List[str] = ["""bleu"""]
lowerCamelCase__: Optional[Any] = """bleu"""
def __init__( self : Optional[Any] , lowerCamelCase_ : Optional[Any] , **lowerCamelCase_ : Tuple ):
super().__init__(lowerCamelCase_ , **lowerCamelCase_ )
a_ : Union[str, Any] = hparams.src_lang
a_ : Optional[int] = hparams.tgt_lang
def UpperCAmelCase( self : Optional[Any] , lowerCamelCase_ : Tuple , lowerCamelCase_ : int ):
return calculate_bleu(lowerCamelCase_ , lowerCamelCase_ )
def _a ( __UpperCamelCase , __UpperCamelCase=None ):
Path(args.output_dir ).mkdir(exist_ok=__UpperCamelCase )
check_output_dir(__UpperCamelCase , expected_items=3 )
if model is None:
if "summarization" in args.task:
a_ : SummarizationModule = SummarizationModule(__UpperCamelCase )
else:
a_ : SummarizationModule = TranslationModule(__UpperCamelCase )
a_ : Dict = Path(args.data_dir ).name
if (
args.logger_name == "default"
or args.fast_dev_run
or str(args.output_dir ).startswith("""/tmp""" )
or str(args.output_dir ).startswith("""/var""" )
):
a_ : Union[str, Any] = True # don't pollute wandb logs unnecessarily
elif args.logger_name == "wandb":
from pytorch_lightning.loggers import WandbLogger
a_ : Optional[int] = os.environ.get("""WANDB_PROJECT""" , __UpperCamelCase )
a_ : str = WandbLogger(name=model.output_dir.name , project=__UpperCamelCase )
elif args.logger_name == "wandb_shared":
from pytorch_lightning.loggers import WandbLogger
a_ : Optional[int] = WandbLogger(name=model.output_dir.name , project=F'''hf_{dataset}''' )
if args.early_stopping_patience >= 0:
a_ : Any = get_early_stopping_callback(model.val_metric , args.early_stopping_patience )
else:
a_ : str = False
a_ : Tuple = args.val_metric == """loss"""
a_ : pl.Trainer = generic_train(
__UpperCamelCase , __UpperCamelCase , logging_callback=SeqaSeqLoggingCallback() , checkpoint_callback=get_checkpoint_callback(
args.output_dir , model.val_metric , args.save_top_k , __UpperCamelCase ) , early_stopping_callback=__UpperCamelCase , logger=__UpperCamelCase , )
pickle_save(model.hparams , model.output_dir / """hparams.pkl""" )
if not args.do_predict:
return model
a_ : Union[str, Any] = """"""
a_ : int = sorted(glob.glob(os.path.join(args.output_dir , """*.ckpt""" ) , recursive=__UpperCamelCase ) )
if checkpoints:
a_ : Tuple = checkpoints[-1]
a_ : Tuple = checkpoints[-1]
trainer.logger.log_hyperparams(model.hparams )
# test() without a model tests using the best checkpoint automatically
trainer.test()
return model
if __name__ == "__main__":
__lowerCamelCase = argparse.ArgumentParser()
__lowerCamelCase = pl.Trainer.add_argparse_args(parser)
__lowerCamelCase = SummarizationModule.add_model_specific_args(parser, os.getcwd())
__lowerCamelCase = parser.parse_args()
main(args)
| 478 | 1 |
__SCREAMING_SNAKE_CASE = {
"""meter""": """m""",
"""kilometer""": """km""",
"""megametre""": """Mm""",
"""gigametre""": """Gm""",
"""terametre""": """Tm""",
"""petametre""": """Pm""",
"""exametre""": """Em""",
"""zettametre""": """Zm""",
"""yottametre""": """Ym""",
}
# Exponent of the factor(meter)
__SCREAMING_SNAKE_CASE = {
"""m""": 0,
"""km""": 3,
"""Mm""": 6,
"""Gm""": 9,
"""Tm""": 12,
"""Pm""": 15,
"""Em""": 18,
"""Zm""": 21,
"""Ym""": 24,
}
def SCREAMING_SNAKE_CASE__ ( lowerCAmelCase_ : Any ,lowerCAmelCase_ : Any ,lowerCAmelCase_ : Optional[Any] ) -> float:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any =from_type.lower().strip('s' )
SCREAMING_SNAKE_CASE_ : int =to_type.lower().strip('s' )
SCREAMING_SNAKE_CASE_ : Any =UNIT_SYMBOL.get(lowerCAmelCase_ ,lowerCAmelCase_ )
SCREAMING_SNAKE_CASE_ : str =UNIT_SYMBOL.get(lowerCAmelCase_ ,lowerCAmelCase_ )
if from_sanitized not in METRIC_CONVERSION:
SCREAMING_SNAKE_CASE_ : Optional[int] =(
F"""Invalid \'from_type\' value: {from_type!r}.\n"""
F"""Conversion abbreviations are: {', '.join(lowerCAmelCase_ )}"""
)
raise ValueError(lowerCAmelCase_ )
if to_sanitized not in METRIC_CONVERSION:
SCREAMING_SNAKE_CASE_ : Dict =(
F"""Invalid \'to_type\' value: {to_type!r}.\n"""
F"""Conversion abbreviations are: {', '.join(lowerCAmelCase_ )}"""
)
raise ValueError(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] =METRIC_CONVERSION[from_sanitized]
SCREAMING_SNAKE_CASE_ : str =METRIC_CONVERSION[to_sanitized]
SCREAMING_SNAKE_CASE_ : Tuple =1
if from_exponent > to_exponent:
SCREAMING_SNAKE_CASE_ : Union[str, Any] =from_exponent - to_exponent
else:
SCREAMING_SNAKE_CASE_ : Dict =-(to_exponent - from_exponent)
return value * pow(10 ,lowerCAmelCase_ )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 220 |
import argparse
import gc
import json
import os
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
lowerCamelCase_ : int = 16
lowerCamelCase_ : str = 32
def A__ ( lowerCamelCase ) -> int:
return int(x / 2**20 )
class _UpperCamelCase :
'''simple docstring'''
def __enter__( self : int ):
gc.collect()
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated() # reset the peak gauge to zero
UpperCamelCase_: str = torch.cuda.memory_allocated()
return self
def __exit__( self : List[Any] , *snake_case_ : Union[str, Any] ):
gc.collect()
torch.cuda.empty_cache()
UpperCamelCase_: List[str] = torch.cuda.memory_allocated()
UpperCamelCase_: int = torch.cuda.max_memory_allocated()
UpperCamelCase_: Optional[int] = bamb(self.end - self.begin )
UpperCamelCase_: Tuple = bamb(self.peak - self.begin )
# print(f"delta used/peak {self.used:4d}/{self.peaked:4d}")
def A__ ( lowerCamelCase , lowerCamelCase = 16 , lowerCamelCase = "bert-base-cased" , lowerCamelCase = 3_20 , lowerCamelCase = 1_60 , ) -> Dict:
UpperCamelCase_: str = AutoTokenizer.from_pretrained(lowerCamelCase )
UpperCamelCase_: List[str] = load_dataset(
"""glue""" , """mrpc""" , split={"""train""": F'''train[:{n_train}]''', """validation""": F'''validation[:{n_val}]'''} )
def tokenize_function(lowerCamelCase ):
# max_length=None => use the model max length (it's actually the default)
UpperCamelCase_: Optional[Any] = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=lowerCamelCase , max_length=lowerCamelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
UpperCamelCase_: Dict = datasets.map(
lowerCamelCase , batched=lowerCamelCase , remove_columns=["""idx""", """sentence1""", """sentence2"""] , load_from_cache_file=lowerCamelCase )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
UpperCamelCase_: int = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(lowerCamelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(lowerCamelCase , padding="""max_length""" , max_length=1_28 , return_tensors="""pt""" )
return tokenizer.pad(lowerCamelCase , padding="""longest""" , return_tensors="""pt""" )
# Instantiate dataloaders.
UpperCamelCase_: List[str] = DataLoader(
tokenized_datasets["""train"""] , shuffle=lowerCamelCase , collate_fn=lowerCamelCase , batch_size=lowerCamelCase )
UpperCamelCase_: Optional[Any] = DataLoader(
tokenized_datasets["""validation"""] , shuffle=lowerCamelCase , collate_fn=lowerCamelCase , batch_size=lowerCamelCase )
return train_dataloader, eval_dataloader
def A__ ( lowerCamelCase , lowerCamelCase ) -> Union[str, Any]:
# Initialize accelerator
UpperCamelCase_: Any = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCamelCase_: str = config["""lr"""]
UpperCamelCase_: Any = int(config["""num_epochs"""] )
UpperCamelCase_: Tuple = int(config["""seed"""] )
UpperCamelCase_: Dict = int(config["""batch_size"""] )
UpperCamelCase_: Dict = args.model_name_or_path
set_seed(lowerCamelCase )
UpperCamelCase_, UpperCamelCase_: int = get_dataloaders(lowerCamelCase , lowerCamelCase , lowerCamelCase , args.n_train , args.n_val )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCamelCase_: Any = AutoModelForSequenceClassification.from_pretrained(lowerCamelCase , return_dict=lowerCamelCase )
# Instantiate optimizer
UpperCamelCase_: Tuple = (
AdamW
if accelerator.state.deepspeed_plugin is None
or """optimizer""" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
UpperCamelCase_: List[Any] = optimizer_cls(params=model.parameters() , lr=lowerCamelCase )
if accelerator.state.deepspeed_plugin is not None:
UpperCamelCase_: Any = accelerator.state.deepspeed_plugin.deepspeed_config[
"""gradient_accumulation_steps"""
]
else:
UpperCamelCase_: List[Any] = 1
UpperCamelCase_: Optional[int] = (len(lowerCamelCase ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
UpperCamelCase_: List[Any] = get_linear_schedule_with_warmup(
optimizer=lowerCamelCase , num_warmup_steps=0 , num_training_steps=lowerCamelCase , )
else:
UpperCamelCase_: Dict = DummyScheduler(lowerCamelCase , total_num_steps=lowerCamelCase , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCamelCase_, UpperCamelCase_, UpperCamelCase_, UpperCamelCase_, UpperCamelCase_: str = accelerator.prepare(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
# We need to keep track of how many total steps we have iterated over
UpperCamelCase_: str = 0
# We also need to keep track of the stating epoch so files are named properly
UpperCamelCase_: Dict = 0
# Now we train the model
UpperCamelCase_: Dict = {}
for epoch in range(lowerCamelCase , lowerCamelCase ):
with TorchTracemalloc() as tracemalloc:
model.train()
for step, batch in enumerate(lowerCamelCase ):
UpperCamelCase_: Union[str, Any] = model(**lowerCamelCase )
UpperCamelCase_: str = outputs.loss
UpperCamelCase_: List[str] = loss / gradient_accumulation_steps
accelerator.backward(lowerCamelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
# Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage
accelerator.print("""Memory before entering the train : {}""".format(bamb(tracemalloc.begin ) ) )
accelerator.print("""Memory consumed at the end of the train (end-begin): {}""".format(tracemalloc.used ) )
accelerator.print("""Peak Memory consumed during the train (max-begin): {}""".format(tracemalloc.peaked ) )
accelerator.print(
"""Total Peak Memory consumed during the train (max): {}""".format(
tracemalloc.peaked + bamb(tracemalloc.begin ) ) )
UpperCamelCase_: Optional[Any] = tracemalloc.peaked + bamb(tracemalloc.begin )
if args.peak_memory_upper_bound is not None:
assert (
train_total_peak_memory[F'''epoch-{epoch}'''] <= args.peak_memory_upper_bound
), "Peak memory usage exceeded the upper bound"
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , """peak_memory_utilization.json""" ) , """w""" ) as f:
json.dump(lowerCamelCase , lowerCamelCase )
def A__ ( ) -> Optional[Any]:
UpperCamelCase_: Any = argparse.ArgumentParser(description="""Simple example of training script tracking peak GPU memory usage.""" )
parser.add_argument(
"""--model_name_or_path""" , type=lowerCamelCase , default="""bert-base-cased""" , help="""Path to pretrained model or model identifier from huggingface.co/models.""" , required=lowerCamelCase , )
parser.add_argument(
"""--output_dir""" , type=lowerCamelCase , default=""".""" , help="""Optional save directory where all checkpoint folders will be stored. Default is the current working directory.""" , )
parser.add_argument(
"""--peak_memory_upper_bound""" , type=lowerCamelCase , default=lowerCamelCase , help="""The upper bound of peak memory usage in MB. If set, the training will throw an error if the peak memory usage exceeds this value.""" , )
parser.add_argument(
"""--n_train""" , type=lowerCamelCase , default=3_20 , help="""Number of training examples to use.""" , )
parser.add_argument(
"""--n_val""" , type=lowerCamelCase , default=1_60 , help="""Number of validation examples to use.""" , )
parser.add_argument(
"""--num_epochs""" , type=lowerCamelCase , default=1 , help="""Number of train epochs.""" , )
UpperCamelCase_: Optional[int] = parser.parse_args()
UpperCamelCase_: Union[str, Any] = {"""lr""": 2E-5, """num_epochs""": args.num_epochs, """seed""": 42, """batch_size""": 16}
training_function(lowerCamelCase , lowerCamelCase )
if __name__ == "__main__":
main()
| 548 | 0 |
'''simple docstring'''
from __future__ import annotations
def _lowerCamelCase ( lowercase : list[float] , lowercase : Optional[int] ) -> Any:
print(F'Vertex\tShortest Distance from vertex {src}' )
for i, d in enumerate(lowercase ):
print(F'{i}\t\t{d}' )
def _lowerCamelCase ( lowercase : list[dict[str, int]] , lowercase : list[float] , lowercase : int ) -> Any:
for j in range(lowercase ):
_a , _a , _a = (graph[j][k] for k in ["src", "dst", "weight"])
if distance[u] != float("inf" ) and distance[u] + w < distance[v]:
return True
return False
def _lowerCamelCase ( lowercase : list[dict[str, int]] , lowercase : int , lowercase : int , lowercase : int ) -> list[float]:
_a = [float("inf" )] * vertex_count
_a = 0.0
for _ in range(vertex_count - 1 ):
for j in range(lowercase ):
_a , _a , _a = (graph[j][k] for k in ["src", "dst", "weight"])
if distance[u] != float("inf" ) and distance[u] + w < distance[v]:
_a = distance[u] + w
_a = check_negative_cycle(lowercase , lowercase , lowercase )
if negative_cycle_exists:
raise Exception("Negative cycle found" )
return distance
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase_ : Union[str, Any] = int(input('Enter number of vertices: ').strip())
lowerCAmelCase_ : Optional[Any] = int(input('Enter number of edges: ').strip())
lowerCAmelCase_ : list[dict[str, int]] = [{} for _ in range(E)]
for i in range(E):
print('Edge ', i + 1)
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : List[str] = (
int(x)
for x in input('Enter source, destination, weight: ').strip().split(' ')
)
lowerCAmelCase_ : Optional[int] = {'src': src, 'dst': dest, 'weight': weight}
lowerCAmelCase_ : Dict = int(input('\nEnter shortest path source:').strip())
lowerCAmelCase_ : Optional[Any] = bellman_ford(graph, V, E, source)
print_distance(shortest_distance, 0)
| 521 |
'''simple docstring'''
import copy
from typing import TYPE_CHECKING, Any, Mapping, Optional, OrderedDict
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto.configuration_auto import AutoConfig
if TYPE_CHECKING:
from ... import PreTrainedTokenizerBase, TensorType
lowerCAmelCase_ : List[Any] = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
__a ='vision-encoder-decoder'
__a =True
def __init__( self : Optional[int] , **__a : Any ):
super().__init__(**__a )
if "encoder" not in kwargs or "decoder" not in kwargs:
raise ValueError(
f'A configuraton of type {self.model_type} cannot be instantiated because '
f'not both `encoder` and `decoder` sub-configurations are passed, but only {kwargs}' )
_a = kwargs.pop("encoder" )
_a = encoder_config.pop("model_type" )
_a = kwargs.pop("decoder" )
_a = decoder_config.pop("model_type" )
_a = AutoConfig.for_model(__a , **__a )
_a = AutoConfig.for_model(__a , **__a )
_a = True
@classmethod
def UpperCamelCase__ ( cls : int , __a : PretrainedConfig , __a : PretrainedConfig , **__a : Any ):
logger.info("Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config" )
_a = True
_a = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **__a )
def UpperCamelCase__ ( self : int ):
_a = copy.deepcopy(self.__dict__ )
_a = self.encoder.to_dict()
_a = self.decoder.to_dict()
_a = self.__class__.model_type
return output
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
__a =version.parse('1.11' )
@property
def UpperCamelCase__ ( self : Optional[int] ):
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def UpperCamelCase__ ( self : List[str] ):
return 1e-4
@property
def UpperCamelCase__ ( self : List[Any] ):
return OrderedDict({"last_hidden_state": {0: "batch", 1: "encoder_sequence"}} )
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
@property
def UpperCamelCase__ ( self : Optional[Any] ):
_a = OrderedDict()
_a = {0: "batch", 1: "past_decoder_sequence + sequence"}
_a = {0: "batch", 1: "past_decoder_sequence + sequence"}
_a = {0: "batch", 1: "encoder_sequence"}
return common_inputs
def UpperCamelCase__ ( self : List[str] , __a : "PreTrainedTokenizerBase" , __a : int = -1 , __a : int = -1 , __a : bool = False , __a : Optional["TensorType"] = None , ):
import torch
_a = OrderedDict()
_a = super().generate_dummy_inputs(
__a , batch_size=__a , seq_length=__a , is_pair=__a , framework=__a )
_a , _a = dummy_input["input_ids"].shape
_a = (batch, encoder_sequence, self._config.encoder_hidden_size)
_a = dummy_input.pop("input_ids" )
_a = dummy_input.pop("attention_mask" )
_a = torch.zeros(__a )
return common_inputs
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
@property
def UpperCamelCase__ ( self : int ):
pass
def UpperCamelCase__ ( self : List[str] , __a : PretrainedConfig ):
return VisionEncoderDecoderEncoderOnnxConfig(__a )
def UpperCamelCase__ ( self : Any , __a : PretrainedConfig , __a : PretrainedConfig , __a : str = "default" ):
_a = encoder_config.hidden_size
return VisionEncoderDecoderDecoderOnnxConfig(__a , __a )
| 521 | 1 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers import (
TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
BertConfig,
DPRConfig,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
class lowercase :
def __init__( self : Union[str, Any] , _UpperCamelCase : Optional[Any] , _UpperCamelCase : List[Any]=13 , _UpperCamelCase : List[Any]=7 , _UpperCamelCase : int=True , _UpperCamelCase : int=True , _UpperCamelCase : List[str]=True , _UpperCamelCase : List[Any]=True , _UpperCamelCase : Optional[int]=99 , _UpperCamelCase : Dict=32 , _UpperCamelCase : List[str]=2 , _UpperCamelCase : Optional[int]=4 , _UpperCamelCase : Union[str, Any]=37 , _UpperCamelCase : Optional[Any]="gelu" , _UpperCamelCase : List[Any]=0.1 , _UpperCamelCase : Union[str, Any]=0.1 , _UpperCamelCase : int=512 , _UpperCamelCase : str=16 , _UpperCamelCase : Any=2 , _UpperCamelCase : Union[str, Any]=0.0_2 , _UpperCamelCase : Any=3 , _UpperCamelCase : Dict=4 , _UpperCamelCase : str=None , _UpperCamelCase : Any=0 , ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = seq_length
SCREAMING_SNAKE_CASE = is_training
SCREAMING_SNAKE_CASE = use_input_mask
SCREAMING_SNAKE_CASE = use_token_type_ids
SCREAMING_SNAKE_CASE = use_labels
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = max_position_embeddings
SCREAMING_SNAKE_CASE = type_vocab_size
SCREAMING_SNAKE_CASE = type_sequence_label_size
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = num_labels
SCREAMING_SNAKE_CASE = num_choices
SCREAMING_SNAKE_CASE = scope
SCREAMING_SNAKE_CASE = projection_dim
def __snake_case( self : List[str] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE = None
if self.use_input_mask:
# follow test_modeling_tf_ctrl.py
SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
if self.use_labels:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_UpperCamelCase , initializer_range=self.initializer_range , )
SCREAMING_SNAKE_CASE = DPRConfig(projection_dim=self.projection_dim , **config.to_dict() )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __snake_case( self : Optional[Any] , _UpperCamelCase : Dict , _UpperCamelCase : List[str] , _UpperCamelCase : Dict , _UpperCamelCase : Dict , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Optional[Any] , _UpperCamelCase : List[str] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = TFDPRContextEncoder(config=_UpperCamelCase )
SCREAMING_SNAKE_CASE = model(_UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase )
SCREAMING_SNAKE_CASE = model(_UpperCamelCase , token_type_ids=_UpperCamelCase )
SCREAMING_SNAKE_CASE = model(_UpperCamelCase )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def __snake_case( self : List[Any] , _UpperCamelCase : List[str] , _UpperCamelCase : List[str] , _UpperCamelCase : Optional[int] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Dict , _UpperCamelCase : List[str] , _UpperCamelCase : Optional[Any] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = TFDPRQuestionEncoder(config=_UpperCamelCase )
SCREAMING_SNAKE_CASE = model(_UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase )
SCREAMING_SNAKE_CASE = model(_UpperCamelCase , token_type_ids=_UpperCamelCase )
SCREAMING_SNAKE_CASE = model(_UpperCamelCase )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def __snake_case( self : Optional[int] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : List[Any] , _UpperCamelCase : Optional[Any] , _UpperCamelCase : List[Any] , _UpperCamelCase : Tuple , _UpperCamelCase : Any , _UpperCamelCase : Dict ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE = TFDPRReader(config=_UpperCamelCase )
SCREAMING_SNAKE_CASE = model(_UpperCamelCase , attention_mask=_UpperCamelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.relevance_logits.shape , (self.batch_size,) )
def __snake_case( self : Optional[int] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) ,
) = config_and_inputs
SCREAMING_SNAKE_CASE = {"input_ids": input_ids}
return config, inputs_dict
@require_tf
class lowercase ( a , a , unittest.TestCase ):
lowercase__ : str = (
(
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
if is_tf_available()
else ()
)
lowercase__ : Dict = {"""feature-extraction""": TFDPRQuestionEncoder} if is_tf_available() else {}
lowercase__ : int = False
lowercase__ : str = False
lowercase__ : str = False
lowercase__ : Tuple = False
lowercase__ : List[str] = False
def __snake_case( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = TFDPRModelTester(self )
SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=_UpperCamelCase , hidden_size=37 )
def __snake_case( self : Tuple ) -> List[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def __snake_case( self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_context_encoder(*_UpperCamelCase )
def __snake_case( self : List[str] ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_question_encoder(*_UpperCamelCase )
def __snake_case( self : Any ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_reader(*_UpperCamelCase )
@slow
def __snake_case( self : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE = TFDPRContextEncoder.from_pretrained(_UpperCamelCase )
self.assertIsNotNone(_UpperCamelCase )
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE = TFDPRContextEncoder.from_pretrained(_UpperCamelCase )
self.assertIsNotNone(_UpperCamelCase )
for model_name in TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE = TFDPRQuestionEncoder.from_pretrained(_UpperCamelCase )
self.assertIsNotNone(_UpperCamelCase )
for model_name in TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE = TFDPRReader.from_pretrained(_UpperCamelCase )
self.assertIsNotNone(_UpperCamelCase )
@require_tf
class lowercase ( unittest.TestCase ):
@slow
def __snake_case( self : int ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE = TFDPRQuestionEncoder.from_pretrained("facebook/dpr-question_encoder-single-nq-base" )
SCREAMING_SNAKE_CASE = tf.constant(
[[101, 7_592, 1_010, 2_003, 2_026, 3_899, 10_140, 1_029, 102]] ) # [CLS] hello, is my dog cute? [SEP]
SCREAMING_SNAKE_CASE = model(_UpperCamelCase )[0] # embedding shape = (1, 768)
# compare the actual values for a slice.
SCREAMING_SNAKE_CASE = tf.constant(
[
[
0.0_3_2_3_6_2_5_3,
0.1_2_7_5_3_3_3_5,
0.1_6_8_1_8_5_0_9,
0.0_0_2_7_9_7_8_6,
0.3_8_9_6_9_3_3,
0.2_4_2_6_4_9_4_5,
0.2_1_7_8_9_7_1,
-0.0_2_3_3_5_2_2_7,
-0.0_8_4_8_1_9_5_9,
-0.1_4_3_2_4_1_1_7,
]
] )
self.assertTrue(numpy.allclose(output[:, :10].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 403 | class lowercase : # Public class to implement a graph
def __init__( self : Union[str, Any] , _UpperCamelCase : int , _UpperCamelCase : int , _UpperCamelCase : list[list[bool]] ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE = row
SCREAMING_SNAKE_CASE = col
SCREAMING_SNAKE_CASE = graph
def __snake_case( self : Optional[int] , _UpperCamelCase : int , _UpperCamelCase : int , _UpperCamelCase : list[list[bool]] ) -> bool:
'''simple docstring'''
return (
0 <= i < self.ROW
and 0 <= j < self.COL
and not visited[i][j]
and self.graph[i][j]
)
def __snake_case( self : Dict , _UpperCamelCase : int , _UpperCamelCase : int , _UpperCamelCase : list[list[bool]] ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE = [-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order
SCREAMING_SNAKE_CASE = [-1, 0, 1, -1, 1, -1, 0, 1]
SCREAMING_SNAKE_CASE = True # Make those cells visited
for k in range(8 ):
if self.is_safe(i + row_nbr[k] , j + col_nbr[k] , _UpperCamelCase ):
self.diffs(i + row_nbr[k] , j + col_nbr[k] , _UpperCamelCase )
def __snake_case( self : Any ) -> int: # And finally, count all islands.
'''simple docstring'''
SCREAMING_SNAKE_CASE = [[False for j in range(self.COL )] for i in range(self.ROW )]
SCREAMING_SNAKE_CASE = 0
for i in range(self.ROW ):
for j in range(self.COL ):
if visited[i][j] is False and self.graph[i][j] == 1:
self.diffs(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
count += 1
return count
| 403 | 1 |
"""simple docstring"""
def _snake_case ( snake_case__ : Tuple , snake_case__ : List[Any] ):
A = 0
A = len(snake_case__ ) - 1
while left <= right:
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
A = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(snake_case__ ):
return None
A = sorted_collection[point]
if current_item == item:
return point
else:
if point < left:
A = left
A = point
elif point > right:
A = right
A = point
else:
if item < current_item:
A = point - 1
else:
A = point + 1
return None
def _snake_case ( snake_case__ : Optional[Any] , snake_case__ : int , snake_case__ : Union[str, Any] , snake_case__ : List[Any] ):
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
A = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(snake_case__ ):
return None
if sorted_collection[point] == item:
return point
elif point < left:
return interpolation_search_by_recursion(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
elif point > right:
return interpolation_search_by_recursion(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
else:
if sorted_collection[point] > item:
return interpolation_search_by_recursion(
snake_case__ , snake_case__ , snake_case__ , point - 1 )
else:
return interpolation_search_by_recursion(
snake_case__ , snake_case__ , point + 1 , snake_case__ )
def _snake_case ( snake_case__ : Dict ):
if collection != sorted(snake_case__ ):
raise ValueError('Collection must be ascending sorted' )
return True
if __name__ == "__main__":
import sys
_lowercase = 0
if debug == 1:
_lowercase = [10, 30, 40, 45, 50, 66, 77, 93]
try:
__assert_sorted(collection)
except ValueError:
sys.exit('''Sequence must be ascending sorted to apply interpolation search''')
_lowercase = 67
_lowercase = interpolation_search(collection, target)
if result is not None:
print(F"""{target} found at positions: {result}""")
else:
print('''Not found''') | 715 |
"""simple docstring"""
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
_lowercase = subprocess.check_output('''git merge-base main HEAD'''.split()).decode('''utf-8''')
_lowercase = subprocess.check_output(F"""git diff --name-only {fork_point_sha}""".split()).decode('''utf-8''').split()
_lowercase = '''|'''.join(sys.argv[1:])
_lowercase = re.compile(rF"""^({joined_dirs}).*?\.py$""")
_lowercase = [x for x in modified_files if regex.match(x)]
print(''' '''.join(relevant_modified_files), end='''''') | 22 | 0 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..models.auto import AutoProcessor
from ..models.vision_encoder_decoder import VisionEncoderDecoderModel
from ..utils import is_vision_available
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A :List[Any] = "naver-clova-ix/donut-base-finetuned-docvqa"
A :Union[str, Any] = (
"This is a tool that answers a question about an document (pdf). It takes an input named `document` which "
"should be the document containing the information, as well as a `question` that is the question about the "
"document. It returns a text that contains the answer to the question."
)
A :Union[str, Any] = "document_qa"
A :Optional[int] = AutoProcessor
A :List[Any] = VisionEncoderDecoderModel
A :Tuple = ["image", "text"]
A :Tuple = ["text"]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ):
"""simple docstring"""
if not is_vision_available():
raise ValueError("Pillow must be installed to use the DocumentQuestionAnsweringTool." )
super().__init__(*__UpperCAmelCase , **__UpperCAmelCase )
def _A ( self , __UpperCAmelCase , __UpperCAmelCase ):
"""simple docstring"""
a__ : Optional[int] = "<s_docvqa><s_question>{user_input}</s_question><s_answer>"
a__ : int = task_prompt.replace("{user_input}" , __UpperCAmelCase )
a__ : Union[str, Any] = self.pre_processor.tokenizer(
__UpperCAmelCase , add_special_tokens=__UpperCAmelCase , return_tensors="pt" ).input_ids
a__ : Dict = self.pre_processor(__UpperCAmelCase , return_tensors="pt" ).pixel_values
return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values}
def _A ( self , __UpperCAmelCase ):
"""simple docstring"""
return self.model.generate(
inputs["pixel_values"].to(self.device ) , decoder_input_ids=inputs["decoder_input_ids"].to(self.device ) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=__UpperCAmelCase , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=__UpperCAmelCase , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=__UpperCAmelCase , ).sequences
def _A ( self , __UpperCAmelCase ):
"""simple docstring"""
a__ : Dict = self.pre_processor.batch_decode(__UpperCAmelCase )[0]
a__ : List[Any] = sequence.replace(self.pre_processor.tokenizer.eos_token , "" )
a__ : Union[str, Any] = sequence.replace(self.pre_processor.tokenizer.pad_token , "" )
a__ : Union[str, Any] = re.sub(R"<.*?>" , "" , __UpperCAmelCase , count=1 ).strip() # remove first task start token
a__ : str = self.pre_processor.tokenajson(__UpperCAmelCase )
return sequence["answer"]
| 191 |
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = {
"""Salesforce/codegen-350M-nl""": """https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json""",
"""Salesforce/codegen-350M-multi""": """https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json""",
"""Salesforce/codegen-350M-mono""": """https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json""",
"""Salesforce/codegen-2B-nl""": """https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json""",
"""Salesforce/codegen-2B-multi""": """https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json""",
"""Salesforce/codegen-2B-mono""": """https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json""",
"""Salesforce/codegen-6B-nl""": """https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json""",
"""Salesforce/codegen-6B-multi""": """https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json""",
"""Salesforce/codegen-6B-mono""": """https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json""",
"""Salesforce/codegen-16B-nl""": """https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json""",
"""Salesforce/codegen-16B-multi""": """https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json""",
"""Salesforce/codegen-16B-mono""": """https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json""",
}
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A :str = "codegen"
A :Tuple = {
"max_position_embeddings": "n_positions",
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self , __UpperCAmelCase=5_0400 , __UpperCAmelCase=2048 , __UpperCAmelCase=2048 , __UpperCAmelCase=4096 , __UpperCAmelCase=28 , __UpperCAmelCase=16 , __UpperCAmelCase=64 , __UpperCAmelCase=None , __UpperCAmelCase="gelu_new" , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=1E-5 , __UpperCAmelCase=0.0_2 , __UpperCAmelCase=True , __UpperCAmelCase=5_0256 , __UpperCAmelCase=5_0256 , __UpperCAmelCase=False , **__UpperCAmelCase , ):
"""simple docstring"""
a__ : Dict = vocab_size
a__ : int = n_ctx
a__ : str = n_positions
a__ : Union[str, Any] = n_embd
a__ : Union[str, Any] = n_layer
a__ : Optional[int] = n_head
a__ : Dict = n_inner
a__ : Optional[Any] = rotary_dim
a__ : str = activation_function
a__ : List[Any] = resid_pdrop
a__ : str = embd_pdrop
a__ : List[str] = attn_pdrop
a__ : str = layer_norm_epsilon
a__ : Any = initializer_range
a__ : List[str] = use_cache
a__ : Tuple = bos_token_id
a__ : List[Any] = eos_token_id
super().__init__(
bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , tie_word_embeddings=__UpperCAmelCase , **__UpperCAmelCase )
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , __UpperCAmelCase , __UpperCAmelCase = "default" , __UpperCAmelCase = None , __UpperCAmelCase = False , ):
"""simple docstring"""
super().__init__(__UpperCAmelCase , task=__UpperCAmelCase , patching_specs=__UpperCAmelCase , use_past=__UpperCAmelCase )
if not getattr(self._config , "pad_token_id" , __UpperCAmelCase ):
# TODO: how to do that better?
a__ : Union[str, Any] = 0
@property
def _A ( self ):
"""simple docstring"""
a__ : Optional[Any] = OrderedDict({"input_ids": {0: "batch", 1: "sequence"}} )
if self.use_past:
self.fill_with_past_key_values_(__UpperCAmelCase , direction="inputs" )
a__ : Any = {0: "batch", 1: "past_sequence + sequence"}
else:
a__ : List[Any] = {0: "batch", 1: "sequence"}
return common_inputs
@property
def _A ( self ):
"""simple docstring"""
return self._config.n_layer
@property
def _A ( self ):
"""simple docstring"""
return self._config.n_head
def _A ( self , __UpperCAmelCase , __UpperCAmelCase = -1 , __UpperCAmelCase = -1 , __UpperCAmelCase = False , __UpperCAmelCase = None , ):
"""simple docstring"""
a__ : Dict = super(__UpperCAmelCase , self ).generate_dummy_inputs(
__UpperCAmelCase , batch_size=__UpperCAmelCase , seq_length=__UpperCAmelCase , is_pair=__UpperCAmelCase , framework=__UpperCAmelCase )
# We need to order the input in the way they appears in the forward()
a__ : Union[str, Any] = OrderedDict({"input_ids": common_inputs["input_ids"]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
a__ , a__ : Union[str, Any] = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
a__ : Any = seqlen + 2
a__ : str = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
a__ : str = [
(torch.zeros(__UpperCAmelCase ), torch.zeros(__UpperCAmelCase )) for _ in range(self.num_layers )
]
a__ : Union[str, Any] = common_inputs["attention_mask"]
if self.use_past:
a__ : str = ordered_inputs["attention_mask"].dtype
a__ : Union[str, Any] = torch.cat(
[ordered_inputs["attention_mask"], torch.ones(__UpperCAmelCase , __UpperCAmelCase , dtype=__UpperCAmelCase )] , dim=1 )
return ordered_inputs
@property
def _A ( self ):
"""simple docstring"""
return 13
| 191 | 1 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTConfig,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
__magic_name__ = logging.get_logger(__name__)
def lowerCamelCase ( lowerCamelCase : Dict):
A_ : List[str] = MobileViTConfig()
# size of the architecture
if "mobilevit_s" in mobilevit_name:
A_ : Union[str, Any] = [144, 192, 240]
A_ : int = [16, 32, 64, 96, 128, 160, 640]
elif "mobilevit_xs" in mobilevit_name:
A_ : List[str] = [96, 120, 144]
A_ : Any = [16, 32, 48, 64, 80, 96, 384]
elif "mobilevit_xxs" in mobilevit_name:
A_ : Any = [64, 80, 96]
A_ : List[str] = [16, 16, 24, 48, 64, 80, 320]
A_ : Any = 0.05
A_ : List[Any] = 2.0
if mobilevit_name.startswith("""deeplabv3_"""):
A_ : int = 512
A_ : Optional[int] = 16
A_ : List[Any] = 21
A_ : List[str] = """pascal-voc-id2label.json"""
else:
A_ : str = 1000
A_ : Any = """imagenet-1k-id2label.json"""
A_ : Any = """huggingface/label-files"""
A_ : List[str] = json.load(open(hf_hub_download(lowerCamelCase , lowerCamelCase , repo_type="""dataset""") , """r"""))
A_ : str = {int(lowerCamelCase): v for k, v in idalabel.items()}
A_ : Any = idalabel
A_ : List[str] = {v: k for k, v in idalabel.items()}
return config
def lowerCamelCase ( lowerCamelCase : Optional[Any] , lowerCamelCase : int=False):
for i in range(1 , 6):
if F'layer_{i}.' in name:
A_ : Tuple = name.replace(F'layer_{i}.' , F'encoder.layer.{i - 1}.')
if "conv_1." in name:
A_ : Union[str, Any] = name.replace("""conv_1.""" , """conv_stem.""")
if ".block." in name:
A_ : Optional[Any] = name.replace(""".block.""" , """.""")
if "exp_1x1" in name:
A_ : Union[str, Any] = name.replace("""exp_1x1""" , """expand_1x1""")
if "red_1x1" in name:
A_ : int = name.replace("""red_1x1""" , """reduce_1x1""")
if ".local_rep.conv_3x3." in name:
A_ : List[str] = name.replace(""".local_rep.conv_3x3.""" , """.conv_kxk.""")
if ".local_rep.conv_1x1." in name:
A_ : Optional[int] = name.replace(""".local_rep.conv_1x1.""" , """.conv_1x1.""")
if ".norm." in name:
A_ : Tuple = name.replace(""".norm.""" , """.normalization.""")
if ".conv." in name:
A_ : List[Any] = name.replace(""".conv.""" , """.convolution.""")
if ".conv_proj." in name:
A_ : str = name.replace(""".conv_proj.""" , """.conv_projection.""")
for i in range(0 , 2):
for j in range(0 , 4):
if F'.{i}.{j}.' in name:
A_ : Tuple = name.replace(F'.{i}.{j}.' , F'.{i}.layer.{j}.')
for i in range(2 , 6):
for j in range(0 , 4):
if F'.{i}.{j}.' in name:
A_ : Dict = name.replace(F'.{i}.{j}.' , F'.{i}.')
if "expand_1x1" in name:
A_ : Union[str, Any] = name.replace("""expand_1x1""" , """downsampling_layer.expand_1x1""")
if "conv_3x3" in name:
A_ : str = name.replace("""conv_3x3""" , """downsampling_layer.conv_3x3""")
if "reduce_1x1" in name:
A_ : Union[str, Any] = name.replace("""reduce_1x1""" , """downsampling_layer.reduce_1x1""")
for i in range(2 , 5):
if F'.global_rep.{i}.weight' in name:
A_ : List[Any] = name.replace(F'.global_rep.{i}.weight' , """.layernorm.weight""")
if F'.global_rep.{i}.bias' in name:
A_ : Optional[int] = name.replace(F'.global_rep.{i}.bias' , """.layernorm.bias""")
if ".global_rep." in name:
A_ : Optional[Any] = name.replace(""".global_rep.""" , """.transformer.""")
if ".pre_norm_mha.0." in name:
A_ : int = name.replace(""".pre_norm_mha.0.""" , """.layernorm_before.""")
if ".pre_norm_mha.1.out_proj." in name:
A_ : Dict = name.replace(""".pre_norm_mha.1.out_proj.""" , """.attention.output.dense.""")
if ".pre_norm_ffn.0." in name:
A_ : Dict = name.replace(""".pre_norm_ffn.0.""" , """.layernorm_after.""")
if ".pre_norm_ffn.1." in name:
A_ : Any = name.replace(""".pre_norm_ffn.1.""" , """.intermediate.dense.""")
if ".pre_norm_ffn.4." in name:
A_ : Union[str, Any] = name.replace(""".pre_norm_ffn.4.""" , """.output.dense.""")
if ".transformer." in name:
A_ : Any = name.replace(""".transformer.""" , """.transformer.layer.""")
if ".aspp_layer." in name:
A_ : int = name.replace(""".aspp_layer.""" , """.""")
if ".aspp_pool." in name:
A_ : Tuple = name.replace(""".aspp_pool.""" , """.""")
if "seg_head." in name:
A_ : Optional[int] = name.replace("""seg_head.""" , """segmentation_head.""")
if "segmentation_head.classifier.classifier." in name:
A_ : List[str] = name.replace("""segmentation_head.classifier.classifier.""" , """segmentation_head.classifier.""")
if "classifier.fc." in name:
A_ : str = name.replace("""classifier.fc.""" , """classifier.""")
elif (not base_model) and ("segmentation_head." not in name):
A_ : str = """mobilevit.""" + name
return name
def lowerCamelCase ( lowerCamelCase : Optional[Any] , lowerCamelCase : Optional[Any] , lowerCamelCase : Optional[int]=False):
if base_model:
A_ : Dict = """"""
else:
A_ : Any = """mobilevit."""
for key in orig_state_dict.copy().keys():
A_ : List[Any] = orig_state_dict.pop(lowerCamelCase)
if key[:8] == "encoder.":
A_ : int = key[8:]
if "qkv" in key:
A_ : Any = key.split(""".""")
A_ : str = int(key_split[0][6:]) - 1
A_ : int = int(key_split[3])
A_ : Optional[Any] = model.get_submodule(F'{model_prefix}encoder.layer.{layer_num}')
A_ : Tuple = layer.transformer.layer[transformer_num].attention.attention.all_head_size
A_ : Optional[Any] = (
F'{model_prefix}encoder.layer.{layer_num}.transformer.layer.{transformer_num}.attention.attention.'
)
if "weight" in key:
A_ : Dict = val[:dim, :]
A_ : Optional[int] = val[dim : dim * 2, :]
A_ : List[Any] = val[-dim:, :]
else:
A_ : Optional[Any] = val[:dim]
A_ : List[Any] = val[dim : dim * 2]
A_ : Any = val[-dim:]
else:
A_ : List[str] = val
return orig_state_dict
def lowerCamelCase ( ):
A_ : List[Any] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
A_ : Dict = Image.open(requests.get(lowerCamelCase , stream=lowerCamelCase).raw)
return im
@torch.no_grad()
def lowerCamelCase ( lowerCamelCase : List[str] , lowerCamelCase : Any , lowerCamelCase : Optional[int] , lowerCamelCase : int=False):
A_ : Optional[Any] = get_mobilevit_config(lowerCamelCase)
# load original state_dict
A_ : List[Any] = torch.load(lowerCamelCase , map_location="""cpu""")
# load 🤗 model
if mobilevit_name.startswith("""deeplabv3_"""):
A_ : List[str] = MobileViTForSemanticSegmentation(lowerCamelCase).eval()
else:
A_ : str = MobileViTForImageClassification(lowerCamelCase).eval()
A_ : str = convert_state_dict(lowerCamelCase , lowerCamelCase)
model.load_state_dict(lowerCamelCase)
# Check outputs on an image, prepared by MobileViTImageProcessor
A_ : Optional[Any] = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32)
A_ : Any = image_processor(images=prepare_img() , return_tensors="""pt""")
A_ : List[Any] = model(**lowerCamelCase)
A_ : Dict = outputs.logits
if mobilevit_name.startswith("""deeplabv3_"""):
assert logits.shape == (1, 21, 32, 32)
if mobilevit_name == "deeplabv3_mobilevit_s":
A_ : int = torch.tensor(
[
[[6.2065, 6.1292, 6.2070], [6.1079, 6.1254, 6.1747], [6.0042, 6.1071, 6.1034]],
[[-6.9253, -6.8653, -7.0398], [-7.3218, -7.3983, -7.3670], [-7.1961, -7.2482, -7.1569]],
[[-4.4723, -4.4348, -4.3769], [-5.3629, -5.4632, -5.4598], [-5.1587, -5.3402, -5.5059]],
])
elif mobilevit_name == "deeplabv3_mobilevit_xs":
A_ : Tuple = torch.tensor(
[
[[5.4449, 5.5733, 5.6314], [5.1815, 5.3930, 5.5963], [5.1656, 5.4333, 5.4853]],
[[-9.4423, -9.7766, -9.6714], [-9.1581, -9.5720, -9.5519], [-9.1006, -9.6458, -9.5703]],
[[-7.7721, -7.3716, -7.1583], [-8.4599, -8.0624, -7.7944], [-8.4172, -7.8366, -7.5025]],
])
elif mobilevit_name == "deeplabv3_mobilevit_xxs":
A_ : Tuple = torch.tensor(
[
[[6.9811, 6.9743, 7.3123], [7.1777, 7.1931, 7.3938], [7.5633, 7.8050, 7.8901]],
[[-10.5536, -10.2332, -10.2924], [-10.2336, -9.8624, -9.5964], [-10.8840, -10.8158, -10.6659]],
[[-3.4938, -3.0631, -2.8620], [-3.4205, -2.8135, -2.6875], [-3.4179, -2.7945, -2.8750]],
])
else:
raise ValueError(F'Unknown mobilevit_name: {mobilevit_name}')
assert torch.allclose(logits[0, :3, :3, :3] , lowerCamelCase , atol=1E-4)
else:
assert logits.shape == (1, 1000)
if mobilevit_name == "mobilevit_s":
A_ : Tuple = torch.tensor([-0.9866, 0.2392, -1.1241])
elif mobilevit_name == "mobilevit_xs":
A_ : Any = torch.tensor([-2.4761, -0.9399, -1.9587])
elif mobilevit_name == "mobilevit_xxs":
A_ : Union[str, Any] = torch.tensor([-1.9364, -1.2327, -0.4653])
else:
raise ValueError(F'Unknown mobilevit_name: {mobilevit_name}')
assert torch.allclose(logits[0, :3] , lowerCamelCase , atol=1E-4)
Path(lowerCamelCase).mkdir(exist_ok=lowerCamelCase)
print(F'Saving model {mobilevit_name} to {pytorch_dump_folder_path}')
model.save_pretrained(lowerCamelCase)
print(F'Saving image processor to {pytorch_dump_folder_path}')
image_processor.save_pretrained(lowerCamelCase)
if push_to_hub:
A_ : str = {
"""mobilevit_s""": """mobilevit-small""",
"""mobilevit_xs""": """mobilevit-x-small""",
"""mobilevit_xxs""": """mobilevit-xx-small""",
"""deeplabv3_mobilevit_s""": """deeplabv3-mobilevit-small""",
"""deeplabv3_mobilevit_xs""": """deeplabv3-mobilevit-x-small""",
"""deeplabv3_mobilevit_xxs""": """deeplabv3-mobilevit-xx-small""",
}
print("""Pushing to the hub...""")
A_ : Union[str, Any] = model_mapping[mobilevit_name]
image_processor.push_to_hub(lowerCamelCase , organization="""apple""")
model.push_to_hub(lowerCamelCase , organization="""apple""")
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--mobilevit_name',
default='mobilevit_s',
type=str,
help=(
'Name of the MobileViT model you\'d like to convert. Should be one of \'mobilevit_s\', \'mobilevit_xs\','
' \'mobilevit_xxs\', \'deeplabv3_mobilevit_s\', \'deeplabv3_mobilevit_xs\', \'deeplabv3_mobilevit_xxs\'.'
),
)
parser.add_argument(
'--checkpoint_path', required=True, type=str, help='Path to the original state dict (.pt file).'
)
parser.add_argument(
'--pytorch_dump_folder_path', required=True, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
__magic_name__ = parser.parse_args()
convert_movilevit_checkpoint(
args.mobilevit_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 27 |
'''simple docstring'''
from __future__ import annotations
def lowerCamelCase ( lowerCamelCase : dict , lowerCamelCase : str):
A_ , A_ : List[Any] = set(lowerCamelCase), [start]
while stack:
A_ : Optional[Any] = stack.pop()
explored.add(lowerCamelCase)
# Differences from BFS:
# 1) pop last element instead of first one
# 2) add adjacent elements to stack without exploring them
for adj in reversed(graph[v]):
if adj not in explored:
stack.append(lowerCamelCase)
return explored
__magic_name__ = {
'A': ['B', 'C', 'D'],
'B': ['A', 'D', 'E'],
'C': ['A', 'F'],
'D': ['B', 'D'],
'E': ['B', 'F'],
'F': ['C', 'E', 'G'],
'G': ['F'],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
print(depth_first_search(G, 'A'))
| 27 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__SCREAMING_SNAKE_CASE : List[Any] ={'''configuration_reformer''': ['''REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ReformerConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Union[str, Any] =['''ReformerTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : str =['''ReformerTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : List[str] =[
'''REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ReformerAttention''',
'''ReformerForMaskedLM''',
'''ReformerForQuestionAnswering''',
'''ReformerForSequenceClassification''',
'''ReformerLayer''',
'''ReformerModel''',
'''ReformerModelWithLMHead''',
'''ReformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer import ReformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer_fast import ReformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_reformer import (
REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ReformerAttention,
ReformerForMaskedLM,
ReformerForQuestionAnswering,
ReformerForSequenceClassification,
ReformerLayer,
ReformerModel,
ReformerModelWithLMHead,
ReformerPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE : List[str] =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 428 |
import functools
def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ):
lowercase = len(lowerCAmelCase__ )
lowercase = len(lowerCAmelCase__ )
@functools.cache
def min_distance(lowerCAmelCase__ ,lowerCAmelCase__ ) -> int:
# if first word index is overflow - delete all from the second word
if indexa >= len_worda:
return len_worda - indexa
# if second word index is overflow - delete all from the first word
if indexa >= len_worda:
return len_worda - indexa
lowercase = int(worda[indexa] != worda[indexa] ) # current letters not identical
return min(
1 + min_distance(indexa + 1 ,lowerCAmelCase__ ) ,1 + min_distance(lowerCAmelCase__ ,indexa + 1 ) ,diff + min_distance(indexa + 1 ,indexa + 1 ) ,)
return min_distance(0 ,0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 428 | 1 |
import tempfile
import torch
from diffusers import PNDMScheduler
from .test_schedulers import SchedulerCommonTest
class _UpperCAmelCase ( _lowerCamelCase ):
a = (PNDMScheduler,)
a = (("num_inference_steps", 50),)
def _lowerCamelCase ( self , **a__ ):
A_ : int = {
"num_train_timesteps": 1000,
"beta_start": 0.0001,
"beta_end": 0.02,
"beta_schedule": "linear",
}
config.update(**UpperCamelCase_ )
return config
def _lowerCamelCase ( self , a__=0 , **a__ ):
A_ : Any = dict(self.forward_default_kwargs )
A_ : Dict = kwargs.pop("""num_inference_steps""" , UpperCamelCase_ )
A_ : str = self.dummy_sample
A_ : Optional[Any] = 0.1 * sample
A_ : Any = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
A_ : List[Any] = self.get_scheduler_config(**UpperCamelCase_ )
A_ : Tuple = scheduler_class(**UpperCamelCase_ )
scheduler.set_timesteps(UpperCamelCase_ )
# copy over dummy past residuals
A_ : List[Any] = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(UpperCamelCase_ )
A_ : int = scheduler_class.from_pretrained(UpperCamelCase_ )
new_scheduler.set_timesteps(UpperCamelCase_ )
# copy over dummy past residuals
A_ : Any = dummy_past_residuals[:]
A_ : Any = scheduler.step_prk(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ ).prev_sample
A_ : Dict = new_scheduler.step_prk(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
A_ : List[str] = scheduler.step_plms(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ ).prev_sample
A_ : int = new_scheduler.step_plms(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def _lowerCamelCase ( self ):
pass
def _lowerCamelCase ( self , a__=0 , **a__ ):
A_ : str = dict(self.forward_default_kwargs )
A_ : Optional[Any] = kwargs.pop("""num_inference_steps""" , UpperCamelCase_ )
A_ : int = self.dummy_sample
A_ : int = 0.1 * sample
A_ : Dict = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
A_ : Optional[int] = self.get_scheduler_config()
A_ : Optional[int] = scheduler_class(**UpperCamelCase_ )
scheduler.set_timesteps(UpperCamelCase_ )
# copy over dummy past residuals (must be after setting timesteps)
A_ : Union[str, Any] = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(UpperCamelCase_ )
A_ : List[Any] = scheduler_class.from_pretrained(UpperCamelCase_ )
# copy over dummy past residuals
new_scheduler.set_timesteps(UpperCamelCase_ )
# copy over dummy past residual (must be after setting timesteps)
A_ : Optional[int] = dummy_past_residuals[:]
A_ : str = scheduler.step_prk(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ ).prev_sample
A_ : str = new_scheduler.step_prk(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
A_ : Any = scheduler.step_plms(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ ).prev_sample
A_ : List[str] = new_scheduler.step_plms(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def _lowerCamelCase ( self , **a__ ):
A_ : Dict = self.scheduler_classes[0]
A_ : Optional[int] = self.get_scheduler_config(**UpperCamelCase_ )
A_ : Dict = scheduler_class(**UpperCamelCase_ )
A_ : Optional[Any] = 10
A_ : Optional[Any] = self.dummy_model()
A_ : Dict = self.dummy_sample_deter
scheduler.set_timesteps(UpperCamelCase_ )
for i, t in enumerate(scheduler.prk_timesteps ):
A_ : str = model(UpperCamelCase_ , UpperCamelCase_ )
A_ : Union[str, Any] = scheduler.step_prk(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ).prev_sample
for i, t in enumerate(scheduler.plms_timesteps ):
A_ : Any = model(UpperCamelCase_ , UpperCamelCase_ )
A_ : List[Any] = scheduler.step_plms(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ).prev_sample
return sample
def _lowerCamelCase ( self ):
A_ : List[Any] = dict(self.forward_default_kwargs )
A_ : Optional[int] = kwargs.pop("""num_inference_steps""" , UpperCamelCase_ )
for scheduler_class in self.scheduler_classes:
A_ : str = self.get_scheduler_config()
A_ : Tuple = scheduler_class(**UpperCamelCase_ )
A_ : Any = self.dummy_sample
A_ : List[str] = 0.1 * sample
if num_inference_steps is not None and hasattr(UpperCamelCase_ , """set_timesteps""" ):
scheduler.set_timesteps(UpperCamelCase_ )
elif num_inference_steps is not None and not hasattr(UpperCamelCase_ , """set_timesteps""" ):
A_ : List[str] = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
A_ : int = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
A_ : Optional[Any] = dummy_past_residuals[:]
A_ : Dict = scheduler.step_prk(UpperCamelCase_ , 0 , UpperCamelCase_ , **UpperCamelCase_ ).prev_sample
A_ : int = scheduler.step_prk(UpperCamelCase_ , 1 , UpperCamelCase_ , **UpperCamelCase_ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
A_ : Optional[int] = scheduler.step_plms(UpperCamelCase_ , 0 , UpperCamelCase_ , **UpperCamelCase_ ).prev_sample
A_ : Union[str, Any] = scheduler.step_plms(UpperCamelCase_ , 1 , UpperCamelCase_ , **UpperCamelCase_ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def _lowerCamelCase ( self ):
for timesteps in [100, 1000]:
self.check_over_configs(num_train_timesteps=UpperCamelCase_ )
def _lowerCamelCase ( self ):
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=UpperCamelCase_ )
A_ : str = self.scheduler_classes[0]
A_ : str = self.get_scheduler_config(steps_offset=1 )
A_ : List[str] = scheduler_class(**UpperCamelCase_ )
scheduler.set_timesteps(10 )
assert torch.equal(
scheduler.timesteps , torch.LongTensor(
[901, 851, 851, 801, 801, 751, 751, 701, 701, 651, 651, 601, 601, 501, 401, 301, 201, 101, 1] ) , )
def _lowerCamelCase ( self ):
for beta_start, beta_end in zip([0.0001, 0.001] , [0.002, 0.02] ):
self.check_over_configs(beta_start=UpperCamelCase_ , beta_end=UpperCamelCase_ )
def _lowerCamelCase ( self ):
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=UpperCamelCase_ )
def _lowerCamelCase ( self ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=UpperCamelCase_ )
def _lowerCamelCase ( self ):
for t in [1, 5, 10]:
self.check_over_forward(time_step=UpperCamelCase_ )
def _lowerCamelCase ( self ):
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ):
self.check_over_forward(num_inference_steps=UpperCamelCase_ )
def _lowerCamelCase ( self ):
# earlier version of set_timesteps() caused an error indexing alpha's with inference steps as power of 3
A_ : Dict = 27
for scheduler_class in self.scheduler_classes:
A_ : Optional[Any] = self.dummy_sample
A_ : int = 0.1 * sample
A_ : Dict = self.get_scheduler_config()
A_ : str = scheduler_class(**UpperCamelCase_ )
scheduler.set_timesteps(UpperCamelCase_ )
# before power of 3 fix, would error on first step, so we only need to do two
for i, t in enumerate(scheduler.prk_timesteps[:2] ):
A_ : int = scheduler.step_prk(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ).prev_sample
def _lowerCamelCase ( self ):
with self.assertRaises(UpperCamelCase_ ):
A_ : Tuple = self.scheduler_classes[0]
A_ : List[str] = self.get_scheduler_config()
A_ : Optional[Any] = scheduler_class(**UpperCamelCase_ )
scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample ).prev_sample
def _lowerCamelCase ( self ):
A_ : Any = self.full_loop()
A_ : Tuple = torch.sum(torch.abs(UpperCamelCase_ ) )
A_ : int = torch.mean(torch.abs(UpperCamelCase_ ) )
assert abs(result_sum.item() - 198.1318 ) < 1E-2
assert abs(result_mean.item() - 0.2580 ) < 1E-3
def _lowerCamelCase ( self ):
A_ : List[Any] = self.full_loop(prediction_type="""v_prediction""" )
A_ : Any = torch.sum(torch.abs(UpperCamelCase_ ) )
A_ : List[Any] = torch.mean(torch.abs(UpperCamelCase_ ) )
assert abs(result_sum.item() - 67.3986 ) < 1E-2
assert abs(result_mean.item() - 0.0878 ) < 1E-3
def _lowerCamelCase ( self ):
# We specify different beta, so that the first alpha is 0.99
A_ : str = self.full_loop(set_alpha_to_one=UpperCamelCase_ , beta_start=0.01 )
A_ : Tuple = torch.sum(torch.abs(UpperCamelCase_ ) )
A_ : List[str] = torch.mean(torch.abs(UpperCamelCase_ ) )
assert abs(result_sum.item() - 230.0399 ) < 1E-2
assert abs(result_mean.item() - 0.2995 ) < 1E-3
def _lowerCamelCase ( self ):
# We specify different beta, so that the first alpha is 0.99
A_ : Dict = self.full_loop(set_alpha_to_one=UpperCamelCase_ , beta_start=0.01 )
A_ : Optional[int] = torch.sum(torch.abs(UpperCamelCase_ ) )
A_ : Tuple = torch.mean(torch.abs(UpperCamelCase_ ) )
assert abs(result_sum.item() - 186.9482 ) < 1E-2
assert abs(result_mean.item() - 0.2434 ) < 1E-3
| 710 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
UniSpeechConfig,
UniSpeechForCTC,
UniSpeechForPreTraining,
WavaVecaFeatureExtractor,
WavaVecaPhonemeCTCTokenizer,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """ctc_proj""",
"""mask_emb""": """masked_spec_embed""",
}
_lowerCAmelCase = [
"""ctc_proj""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
]
def _lowerCAmelCase ( _lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ):
'''simple docstring'''
for attribute in key.split(""".""" ):
if is_finetuned:
if attribute in ["quantizer", "project_q", "project_hid"]:
# those layers are only relevant for pretraining and should be dropped
return
if attribute == "ctc_proj":
# we should rename `ctc_proj` to `lm_head` for fine-tuned phoneme models
A_ : Tuple = """lm_head"""
A_ : int = getattr(_lowerCAmelCase ,_lowerCAmelCase )
if weight_type is not None:
A_ : List[str] = getattr(_lowerCAmelCase ,_lowerCAmelCase ).shape
else:
A_ : Optional[Any] = hf_pointer.shape
assert hf_shape == value.shape, (
f"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
A_ : Union[str, Any] = value
elif weight_type == "weight_g":
A_ : Any = value
elif weight_type == "weight_v":
A_ : Tuple = value
elif weight_type == "bias":
A_ : List[Any] = value
else:
A_ : int = value
logger.info(f"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def _lowerCAmelCase ( _lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ):
'''simple docstring'''
A_ : Dict = []
A_ : List[Any] = fairseq_model.state_dict()
A_ : Any = hf_model.unispeech.feature_extractor
for name, value in fairseq_dict.items():
A_ : Any = False
if "conv_layers" in name:
load_conv_layer(
_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,hf_model.config.feat_extract_norm == """group""" ,)
A_ : int = True
else:
for key, mapped_key in MAPPING.items():
A_ : Any = """unispeech.""" + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
A_ : Dict = True
if "*" in mapped_key:
A_ : Any = name.split(_lowerCAmelCase )[0].split(""".""" )[-2]
A_ : Optional[int] = mapped_key.replace("""*""" ,_lowerCAmelCase )
if "weight_g" in name:
A_ : Tuple = """weight_g"""
elif "weight_v" in name:
A_ : Optional[int] = """weight_v"""
elif "bias" in name:
A_ : List[Any] = """bias"""
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
A_ : Optional[int] = """weight"""
else:
A_ : Dict = None
set_recursively(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase )
continue
if not is_used:
unused_weights.append(_lowerCAmelCase )
logger.warning(f"""Unused weights: {unused_weights}""" )
def _lowerCAmelCase ( _lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ):
'''simple docstring'''
A_ : Union[str, Any] = full_name.split("""conv_layers.""" )[-1]
A_ : Union[str, Any] = name.split(""".""" )
A_ : List[str] = int(items[0] )
A_ : Optional[Any] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
A_ : List[Any] = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
A_ : int = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
A_ : List[str] = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
A_ : Tuple = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(_lowerCAmelCase )
@torch.no_grad()
def _lowerCAmelCase ( _lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase=None ,_lowerCAmelCase=None ,_lowerCAmelCase=True ):
'''simple docstring'''
if config_path is not None:
A_ : Optional[int] = UniSpeechConfig.from_pretrained(_lowerCAmelCase )
else:
A_ : List[str] = UniSpeechConfig()
if is_finetuned:
if dict_path:
A_ : str = Dictionary.load_from_json(_lowerCAmelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
A_ : List[Any] = target_dict.pad_index
A_ : str = target_dict.bos_index
A_ : Optional[Any] = target_dict.eos_index
A_ : Optional[int] = len(target_dict.symbols )
A_ : str = os.path.join(_lowerCAmelCase ,"""vocab.json""" )
if not os.path.isdir(_lowerCAmelCase ):
logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(_lowerCAmelCase ) )
return
os.makedirs(_lowerCAmelCase ,exist_ok=_lowerCAmelCase )
A_ : Optional[int] = target_dict.indices
# fairseq has the <pad> and <s> switched
A_ : Dict = 4_2
A_ : Optional[Any] = 4_3
with open(_lowerCAmelCase ,"""w""" ,encoding="""utf-8""" ) as vocab_handle:
json.dump(_lowerCAmelCase ,_lowerCAmelCase )
A_ : Tuple = WavaVecaPhonemeCTCTokenizer(
_lowerCAmelCase ,unk_token=target_dict.unk_word ,pad_token=target_dict.pad_word ,bos_token=target_dict.bos_word ,eos_token=target_dict.eos_word ,word_delimiter_token="""|""" ,do_lower_case=_lowerCAmelCase ,)
A_ : Any = True if config.feat_extract_norm == """layer""" else False
A_ : List[str] = WavaVecaFeatureExtractor(
feature_size=1 ,sampling_rate=1_6_0_0_0 ,padding_value=0 ,do_normalize=_lowerCAmelCase ,return_attention_mask=_lowerCAmelCase ,)
A_ : Optional[int] = WavaVecaProcessor(feature_extractor=_lowerCAmelCase ,tokenizer=_lowerCAmelCase )
processor.save_pretrained(_lowerCAmelCase )
A_ : int = UniSpeechForCTC(_lowerCAmelCase )
else:
A_ : str = UniSpeechForPreTraining(_lowerCAmelCase )
if is_finetuned:
A_ , A_ , A_ : Union[str, Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] ,arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] ), """w2v_path""": checkpoint_path} )
else:
A_ , A_ , A_ : Any = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
A_ : List[str] = model[0].eval()
recursively_load_weights(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase )
hf_unispeech.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
_lowerCAmelCase = parser.parse_args()
convert_unispeech_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 481 | 0 |
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFAutoModel, is_tensorflow_text_available, is_tf_available
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_tensorflow_text_available():
from transformers.models.bert import TFBertTokenizer
lowerCAmelCase__ : Dict =['bert-base-uncased', 'bert-base-cased']
lowerCAmelCase__ : Optional[int] ='hf-internal-testing/tiny-bert-tf-only'
if is_tf_available():
class __lowercase (tf.keras.Model ):
"""simple docstring"""
def __init__( self , lowerCAmelCase__ ):
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE_ : Any = tokenizer
SCREAMING_SNAKE_CASE_ : Tuple = AutoConfig.from_pretrained(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Dict = TFAutoModel.from_config(lowerCAmelCase__ )
def UpperCamelCase__ ( self , lowerCAmelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = self.tokenizer(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.bert(**lowerCAmelCase__ )
return out["pooler_output"]
@require_tf
@require_tensorflow_text
class __lowercase (unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase__ ( self ):
"""simple docstring"""
super().setUp()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [
BertTokenizer.from_pretrained(lowerCAmelCase__ ) for checkpoint in (TOKENIZER_CHECKPOINTS * 2)
] # repeat for when fast_bert_tokenizer=false
SCREAMING_SNAKE_CASE_ : str = [TFBertTokenizer.from_pretrained(lowerCAmelCase__ ) for checkpoint in TOKENIZER_CHECKPOINTS] + [
TFBertTokenizer.from_pretrained(lowerCAmelCase__ , use_fast_bert_tokenizer=lowerCAmelCase__ )
for checkpoint in TOKENIZER_CHECKPOINTS
]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
SCREAMING_SNAKE_CASE_ : List[str] = [
'This is a straightforward English test sentence.',
'This one has some weird characters\rto\nsee\r\nif those\u00E9break things.',
'Now we\'re going to add some Chinese: 一 二 三 一二三',
'And some much more rare Chinese: 齉 堃 齉堃',
'Je vais aussi écrire en français pour tester les accents',
'Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ',
]
SCREAMING_SNAKE_CASE_ : int = list(zip(self.test_sentences , self.test_sentences[::-1] ) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ):
for test_inputs in (self.test_sentences, self.paired_sentences):
SCREAMING_SNAKE_CASE_ : Optional[int] = tokenizer(lowerCAmelCase__ , return_tensors='tf' , padding='longest' )
SCREAMING_SNAKE_CASE_ : List[str] = tf_tokenizer(lowerCAmelCase__ )
for key in python_outputs.keys():
self.assertTrue(tf.reduce_all(python_outputs[key].shape == tf_outputs[key].shape ) )
self.assertTrue(tf.reduce_all(tf.cast(python_outputs[key] , tf.intaa ) == tf_outputs[key] ) )
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
for tf_tokenizer in self.tf_tokenizers:
SCREAMING_SNAKE_CASE_ : str = tf_tokenizer(self.paired_sentences )
SCREAMING_SNAKE_CASE_ : Tuple = tf_tokenizer(
text=[sentence[0] for sentence in self.paired_sentences] , text_pair=[sentence[1] for sentence in self.paired_sentences] , )
for key in merged_outputs.keys():
self.assertTrue(tf.reduce_all(tf.cast(merged_outputs[key] , tf.intaa ) == separated_outputs[key] ) )
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
for tf_tokenizer in self.tf_tokenizers:
SCREAMING_SNAKE_CASE_ : Any = tf.function(lowerCAmelCase__ )
for test_inputs in (self.test_sentences, self.paired_sentences):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tf.constant(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = compiled_tokenizer(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Tuple = tf_tokenizer(lowerCAmelCase__ )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
for tf_tokenizer in self.tf_tokenizers:
SCREAMING_SNAKE_CASE_ : Tuple = ModelToSave(tokenizer=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Any = tf.convert_to_tensor(self.test_sentences )
SCREAMING_SNAKE_CASE_ : List[Any] = model(lowerCAmelCase__ ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
SCREAMING_SNAKE_CASE_ : Dict = Path(lowerCAmelCase__ ) / 'saved.model'
model.save(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Any = tf.keras.models.load_model(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : List[str] = loaded_model(lowerCAmelCase__ )
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertLessEqual(tf.reduce_max(tf.abs(out - loaded_output ) ) , 1E-5 )
| 101 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_distilbert import DistilBertTokenizer
__SCREAMING_SNAKE_CASE : int = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : Optional[Any] = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
__SCREAMING_SNAKE_CASE : Union[str, Any] = {
'''vocab_file''': {
'''distilbert-base-uncased''': '''https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt''',
'''distilbert-base-uncased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt'''
),
'''distilbert-base-cased''': '''https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt''',
'''distilbert-base-cased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt'''
),
'''distilbert-base-german-cased''': '''https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt''',
'''distilbert-base-multilingual-cased''': (
'''https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''distilbert-base-uncased''': '''https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json''',
'''distilbert-base-uncased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json'''
),
'''distilbert-base-cased''': '''https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json''',
'''distilbert-base-cased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json'''
),
'''distilbert-base-german-cased''': (
'''https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json'''
),
'''distilbert-base-multilingual-cased''': (
'''https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json'''
),
},
}
__SCREAMING_SNAKE_CASE : str = {
'''distilbert-base-uncased''': 5_1_2,
'''distilbert-base-uncased-distilled-squad''': 5_1_2,
'''distilbert-base-cased''': 5_1_2,
'''distilbert-base-cased-distilled-squad''': 5_1_2,
'''distilbert-base-german-cased''': 5_1_2,
'''distilbert-base-multilingual-cased''': 5_1_2,
}
__SCREAMING_SNAKE_CASE : str = {
'''distilbert-base-uncased''': {'''do_lower_case''': True},
'''distilbert-base-uncased-distilled-squad''': {'''do_lower_case''': True},
'''distilbert-base-cased''': {'''do_lower_case''': False},
'''distilbert-base-cased-distilled-squad''': {'''do_lower_case''': False},
'''distilbert-base-german-cased''': {'''do_lower_case''': False},
'''distilbert-base-multilingual-cased''': {'''do_lower_case''': False},
}
class lowerCamelCase_( A__ ):
'''simple docstring'''
lowercase__ : List[Any] = VOCAB_FILES_NAMES
lowercase__ : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
lowercase__ : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ : str = PRETRAINED_INIT_CONFIGURATION
lowercase__ : int = ['input_ids', 'attention_mask']
lowercase__ : Tuple = DistilBertTokenizer
def __init__( self , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=True , lowerCamelCase__="[UNK]" , lowerCamelCase__="[SEP]" , lowerCamelCase__="[PAD]" , lowerCamelCase__="[CLS]" , lowerCamelCase__="[MASK]" , lowerCamelCase__=True , lowerCamelCase__=None , **lowerCamelCase__ , ):
super().__init__(
lowerCamelCase__ , tokenizer_file=lowerCamelCase__ , do_lower_case=lowerCamelCase__ , unk_token=lowerCamelCase__ , sep_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , cls_token=lowerCamelCase__ , mask_token=lowerCamelCase__ , tokenize_chinese_chars=lowerCamelCase__ , strip_accents=lowerCamelCase__ , **lowerCamelCase__ , )
_lowerCamelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , lowerCamelCase__ ) != do_lower_case
or normalizer_state.get('''strip_accents''' , lowerCamelCase__ ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , lowerCamelCase__ ) != tokenize_chinese_chars
):
_lowerCamelCase = getattr(lowerCamelCase__ , normalizer_state.pop('''type''' ) )
_lowerCamelCase = do_lower_case
_lowerCamelCase = strip_accents
_lowerCamelCase = tokenize_chinese_chars
_lowerCamelCase = normalizer_class(**lowerCamelCase__ )
_lowerCamelCase = do_lower_case
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__=None ):
_lowerCamelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ = None ):
_lowerCamelCase = [self.sep_token_id]
_lowerCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ = None ):
_lowerCamelCase = self._tokenizer.model.save(lowerCamelCase__ , name=lowerCamelCase__ )
return tuple(lowerCamelCase__ )
| 661 | 0 |
'''simple docstring'''
import gc
import unittest
from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, FillMaskPipeline, pipeline
from transformers.pipelines import PipelineException
from transformers.testing_utils import (
is_pipeline_test,
is_torch_available,
nested_simplify,
require_tf,
require_torch,
require_torch_gpu,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class __lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
snake_case__ : Tuple = MODEL_FOR_MASKED_LM_MAPPING
snake_case__ : Tuple = TF_MODEL_FOR_MASKED_LM_MAPPING
def a_ ( self ):
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
if is_torch_available():
import torch
torch.cuda.empty_cache()
@require_tf
def a_ ( self ):
__SCREAMING_SNAKE_CASE : Dict = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , top_k=2 , framework="tf" )
__SCREAMING_SNAKE_CASE : List[str] = unmasker("My name is <mask>" )
self.assertEqual(
nested_simplify(a__ , decimals=6 ) , [
{"sequence": "My name is grouped", "score": 2.1e-05, "token": 38015, "token_str": " grouped"},
{"sequence": "My name is accuser", "score": 2.1e-05, "token": 25506, "token_str": " accuser"},
] , )
__SCREAMING_SNAKE_CASE : str = unmasker("The largest city in France is <mask>" )
self.assertEqual(
nested_simplify(a__ , decimals=6 ) , [
{
"sequence": "The largest city in France is grouped",
"score": 2.1e-05,
"token": 38015,
"token_str": " grouped",
},
{
"sequence": "The largest city in France is accuser",
"score": 2.1e-05,
"token": 25506,
"token_str": " accuser",
},
] , )
__SCREAMING_SNAKE_CASE : List[str] = unmasker("My name is <mask>" , targets=[" Patrick", " Clara", " Teven"] , top_k=3 )
self.assertEqual(
nested_simplify(a__ , decimals=6 ) , [
{"sequence": "My name is Clara", "score": 2e-05, "token": 13606, "token_str": " Clara"},
{"sequence": "My name is Patrick", "score": 2e-05, "token": 3499, "token_str": " Patrick"},
{"sequence": "My name is Te", "score": 1.9e-05, "token": 2941, "token_str": " Te"},
] , )
@require_torch
def a_ ( self ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , top_k=2 , framework="pt" )
__SCREAMING_SNAKE_CASE : str = unmasker("My name is <mask>" )
self.assertEqual(
nested_simplify(a__ , decimals=6 ) , [
{"sequence": "My name is Maul", "score": 2.2e-05, "token": 35676, "token_str": " Maul"},
{"sequence": "My name isELS", "score": 2.2e-05, "token": 16416, "token_str": "ELS"},
] , )
__SCREAMING_SNAKE_CASE : Tuple = unmasker("The largest city in France is <mask>" )
self.assertEqual(
nested_simplify(a__ , decimals=6 ) , [
{
"sequence": "The largest city in France is Maul",
"score": 2.2e-05,
"token": 35676,
"token_str": " Maul",
},
{"sequence": "The largest city in France isELS", "score": 2.2e-05, "token": 16416, "token_str": "ELS"},
] , )
__SCREAMING_SNAKE_CASE : int = unmasker("My name is <mask>" , targets=[" Patrick", " Clara", " Teven"] , top_k=3 )
self.assertEqual(
nested_simplify(a__ , decimals=6 ) , [
{"sequence": "My name is Patrick", "score": 2.1e-05, "token": 3499, "token_str": " Patrick"},
{"sequence": "My name is Te", "score": 2e-05, "token": 2941, "token_str": " Te"},
{"sequence": "My name is Clara", "score": 2e-05, "token": 13606, "token_str": " Clara"},
] , )
__SCREAMING_SNAKE_CASE : Tuple = unmasker("My name is <mask> <mask>" , top_k=2 )
self.assertEqual(
nested_simplify(a__ , decimals=6 ) , [
[
{
"score": 2.2e-05,
"token": 35676,
"token_str": " Maul",
"sequence": "<s>My name is Maul<mask></s>",
},
{"score": 2.2e-05, "token": 16416, "token_str": "ELS", "sequence": "<s>My name isELS<mask></s>"},
],
[
{
"score": 2.2e-05,
"token": 35676,
"token_str": " Maul",
"sequence": "<s>My name is<mask> Maul</s>",
},
{"score": 2.2e-05, "token": 16416, "token_str": "ELS", "sequence": "<s>My name is<mask>ELS</s>"},
],
] , )
@require_torch_gpu
def a_ ( self ):
__SCREAMING_SNAKE_CASE : List[str] = pipeline("fill-mask" , model="hf-internal-testing/tiny-random-distilbert" , device=0 , framework="pt" )
# convert model to fp16
pipe.model.half()
__SCREAMING_SNAKE_CASE : str = pipe("Paris is the [MASK] of France." )
# We actually don't care about the result, we just want to make sure
# it works, meaning the float16 tensor got casted back to float32
# for postprocessing.
self.assertIsInstance(a__ , a__ )
@slow
@require_torch
def a_ ( self ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = pipeline(task="fill-mask" , model="distilroberta-base" , top_k=2 , framework="pt" )
self.run_large_test(a__ )
@slow
@require_tf
def a_ ( self ):
__SCREAMING_SNAKE_CASE : Any = pipeline(task="fill-mask" , model="distilroberta-base" , top_k=2 , framework="tf" )
self.run_large_test(a__ )
def a_ ( self , a__ ):
__SCREAMING_SNAKE_CASE : Dict = unmasker("My name is <mask>" )
self.assertEqual(
nested_simplify(a__ ) , [
{"sequence": "My name is John", "score": 0.008, "token": 610, "token_str": " John"},
{"sequence": "My name is Chris", "score": 0.007, "token": 1573, "token_str": " Chris"},
] , )
__SCREAMING_SNAKE_CASE : Any = unmasker("The largest city in France is <mask>" )
self.assertEqual(
nested_simplify(a__ ) , [
{
"sequence": "The largest city in France is Paris",
"score": 0.251,
"token": 2201,
"token_str": " Paris",
},
{
"sequence": "The largest city in France is Lyon",
"score": 0.214,
"token": 12790,
"token_str": " Lyon",
},
] , )
__SCREAMING_SNAKE_CASE : Optional[Any] = unmasker("My name is <mask>" , targets=[" Patrick", " Clara", " Teven"] , top_k=3 )
self.assertEqual(
nested_simplify(a__ ) , [
{"sequence": "My name is Patrick", "score": 0.005, "token": 3499, "token_str": " Patrick"},
{"sequence": "My name is Clara", "score": 0.000, "token": 13606, "token_str": " Clara"},
{"sequence": "My name is Te", "score": 0.000, "token": 2941, "token_str": " Te"},
] , )
@require_torch
def a_ ( self ):
__SCREAMING_SNAKE_CASE : List[Any] = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , framework="pt" )
__SCREAMING_SNAKE_CASE : Optional[Any] = None
__SCREAMING_SNAKE_CASE : str = None
self.run_pipeline_test(a__ , [] )
@require_tf
def a_ ( self ):
__SCREAMING_SNAKE_CASE : int = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , framework="tf" )
__SCREAMING_SNAKE_CASE : List[str] = None
__SCREAMING_SNAKE_CASE : Any = None
self.run_pipeline_test(a__ , [] )
def a_ ( self , a__ , a__ , a__ ):
if tokenizer is None or tokenizer.mask_token_id is None:
self.skipTest("The provided tokenizer has no mask token, (probably reformer or wav2vec2)" )
__SCREAMING_SNAKE_CASE : List[Any] = FillMaskPipeline(model=a__ , tokenizer=a__ )
__SCREAMING_SNAKE_CASE : int = [
f'This is another {tokenizer.mask_token} test',
]
return fill_masker, examples
def a_ ( self , a__ , a__ ):
__SCREAMING_SNAKE_CASE : List[str] = fill_masker.tokenizer
__SCREAMING_SNAKE_CASE : str = fill_masker.model
__SCREAMING_SNAKE_CASE : Optional[Any] = fill_masker(
f'This is a {tokenizer.mask_token}' , )
self.assertEqual(
a__ , [
{"sequence": ANY(a__ ), "score": ANY(a__ ), "token": ANY(a__ ), "token_str": ANY(a__ )},
{"sequence": ANY(a__ ), "score": ANY(a__ ), "token": ANY(a__ ), "token_str": ANY(a__ )},
{"sequence": ANY(a__ ), "score": ANY(a__ ), "token": ANY(a__ ), "token_str": ANY(a__ )},
{"sequence": ANY(a__ ), "score": ANY(a__ ), "token": ANY(a__ ), "token_str": ANY(a__ )},
{"sequence": ANY(a__ ), "score": ANY(a__ ), "token": ANY(a__ ), "token_str": ANY(a__ )},
] , )
__SCREAMING_SNAKE_CASE : str = fill_masker([f'This is a {tokenizer.mask_token}'] )
self.assertEqual(
a__ , [
{"sequence": ANY(a__ ), "score": ANY(a__ ), "token": ANY(a__ ), "token_str": ANY(a__ )},
{"sequence": ANY(a__ ), "score": ANY(a__ ), "token": ANY(a__ ), "token_str": ANY(a__ )},
{"sequence": ANY(a__ ), "score": ANY(a__ ), "token": ANY(a__ ), "token_str": ANY(a__ )},
{"sequence": ANY(a__ ), "score": ANY(a__ ), "token": ANY(a__ ), "token_str": ANY(a__ )},
{"sequence": ANY(a__ ), "score": ANY(a__ ), "token": ANY(a__ ), "token_str": ANY(a__ )},
] , )
__SCREAMING_SNAKE_CASE : str = fill_masker([f'This is a {tokenizer.mask_token}', f'Another {tokenizer.mask_token} great test.'] )
self.assertEqual(
a__ , [
[
{"sequence": ANY(a__ ), "score": ANY(a__ ), "token": ANY(a__ ), "token_str": ANY(a__ )},
{"sequence": ANY(a__ ), "score": ANY(a__ ), "token": ANY(a__ ), "token_str": ANY(a__ )},
{"sequence": ANY(a__ ), "score": ANY(a__ ), "token": ANY(a__ ), "token_str": ANY(a__ )},
{"sequence": ANY(a__ ), "score": ANY(a__ ), "token": ANY(a__ ), "token_str": ANY(a__ )},
{"sequence": ANY(a__ ), "score": ANY(a__ ), "token": ANY(a__ ), "token_str": ANY(a__ )},
],
[
{"sequence": ANY(a__ ), "score": ANY(a__ ), "token": ANY(a__ ), "token_str": ANY(a__ )},
{"sequence": ANY(a__ ), "score": ANY(a__ ), "token": ANY(a__ ), "token_str": ANY(a__ )},
{"sequence": ANY(a__ ), "score": ANY(a__ ), "token": ANY(a__ ), "token_str": ANY(a__ )},
{"sequence": ANY(a__ ), "score": ANY(a__ ), "token": ANY(a__ ), "token_str": ANY(a__ )},
{"sequence": ANY(a__ ), "score": ANY(a__ ), "token": ANY(a__ ), "token_str": ANY(a__ )},
],
] , )
with self.assertRaises(a__ ):
fill_masker([None] )
# No mask_token is not supported
with self.assertRaises(a__ ):
fill_masker("This is" )
self.run_test_top_k(a__ , a__ )
self.run_test_targets(a__ , a__ )
self.run_test_top_k_targets(a__ , a__ )
self.fill_mask_with_duplicate_targets_and_top_k(a__ , a__ )
self.fill_mask_with_multiple_masks(a__ , a__ )
def a_ ( self , a__ , a__ ):
__SCREAMING_SNAKE_CASE : List[Any] = tokenizer.get_vocab()
__SCREAMING_SNAKE_CASE : Dict = sorted(vocab.keys() )[:2]
# Pipeline argument
__SCREAMING_SNAKE_CASE : Any = FillMaskPipeline(model=a__ , tokenizer=a__ , targets=a__ )
__SCREAMING_SNAKE_CASE : List[str] = fill_masker(f'This is a {tokenizer.mask_token}' )
self.assertEqual(
a__ , [
{"sequence": ANY(a__ ), "score": ANY(a__ ), "token": ANY(a__ ), "token_str": ANY(a__ )},
{"sequence": ANY(a__ ), "score": ANY(a__ ), "token": ANY(a__ ), "token_str": ANY(a__ )},
] , )
__SCREAMING_SNAKE_CASE : Any = {vocab[el] for el in targets}
self.assertEqual({el["token"] for el in outputs} , a__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el["token_str"] for el in outputs} , set(a__ ) )
# Call argument
__SCREAMING_SNAKE_CASE : Any = FillMaskPipeline(model=a__ , tokenizer=a__ )
__SCREAMING_SNAKE_CASE : int = fill_masker(f'This is a {tokenizer.mask_token}' , targets=a__ )
self.assertEqual(
a__ , [
{"sequence": ANY(a__ ), "score": ANY(a__ ), "token": ANY(a__ ), "token_str": ANY(a__ )},
{"sequence": ANY(a__ ), "score": ANY(a__ ), "token": ANY(a__ ), "token_str": ANY(a__ )},
] , )
__SCREAMING_SNAKE_CASE : Optional[Any] = {vocab[el] for el in targets}
self.assertEqual({el["token"] for el in outputs} , a__ )
__SCREAMING_SNAKE_CASE : List[str] = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el["token_str"] for el in outputs} , set(a__ ) )
# Score equivalence
__SCREAMING_SNAKE_CASE : Optional[int] = fill_masker(f'This is a {tokenizer.mask_token}' , targets=a__ )
__SCREAMING_SNAKE_CASE : Any = [top_mask["token_str"] for top_mask in outputs]
__SCREAMING_SNAKE_CASE : Dict = [top_mask["score"] for top_mask in outputs]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(a__ ) == set(a__ ):
__SCREAMING_SNAKE_CASE : List[str] = fill_masker(f'This is a {tokenizer.mask_token}' , targets=a__ )
__SCREAMING_SNAKE_CASE : Any = [top_mask["score"] for top_mask in unmasked_targets]
self.assertEqual(nested_simplify(a__ ) , nested_simplify(a__ ) )
# Raises with invalid
with self.assertRaises(a__ ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = fill_masker(f'This is a {tokenizer.mask_token}' , targets=[] )
# For some tokenizers, `""` is actually in the vocabulary and the expected error won't raised
if "" not in tokenizer.get_vocab():
with self.assertRaises(a__ ):
__SCREAMING_SNAKE_CASE : List[Any] = fill_masker(f'This is a {tokenizer.mask_token}' , targets=[""] )
with self.assertRaises(a__ ):
__SCREAMING_SNAKE_CASE : str = fill_masker(f'This is a {tokenizer.mask_token}' , targets="" )
def a_ ( self , a__ , a__ ):
__SCREAMING_SNAKE_CASE : Optional[Any] = FillMaskPipeline(model=a__ , tokenizer=a__ , top_k=2 )
__SCREAMING_SNAKE_CASE : Any = fill_masker(f'This is a {tokenizer.mask_token}' )
self.assertEqual(
a__ , [
{"sequence": ANY(a__ ), "score": ANY(a__ ), "token": ANY(a__ ), "token_str": ANY(a__ )},
{"sequence": ANY(a__ ), "score": ANY(a__ ), "token": ANY(a__ ), "token_str": ANY(a__ )},
] , )
__SCREAMING_SNAKE_CASE : str = FillMaskPipeline(model=a__ , tokenizer=a__ )
__SCREAMING_SNAKE_CASE : Tuple = fill_masker(f'This is a {tokenizer.mask_token}' , top_k=2 )
self.assertEqual(
a__ , [
{"sequence": ANY(a__ ), "score": ANY(a__ ), "token": ANY(a__ ), "token_str": ANY(a__ )},
{"sequence": ANY(a__ ), "score": ANY(a__ ), "token": ANY(a__ ), "token_str": ANY(a__ )},
] , )
self.assertEqual(nested_simplify(a__ ) , nested_simplify(a__ ) )
def a_ ( self , a__ , a__ ):
__SCREAMING_SNAKE_CASE : List[str] = tokenizer.get_vocab()
__SCREAMING_SNAKE_CASE : Any = FillMaskPipeline(model=a__ , tokenizer=a__ )
# top_k=2, ntargets=3
__SCREAMING_SNAKE_CASE : Optional[Any] = sorted(vocab.keys() )[:3]
__SCREAMING_SNAKE_CASE : Optional[int] = fill_masker(f'This is a {tokenizer.mask_token}' , top_k=2 , targets=a__ )
# If we use the most probably targets, and filter differently, we should still
# have the same results
__SCREAMING_SNAKE_CASE : Optional[int] = [el["token_str"] for el in sorted(a__ , key=lambda a__ : x["score"] , reverse=a__ )]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(a__ ).issubset(a__ ):
__SCREAMING_SNAKE_CASE : int = fill_masker(f'This is a {tokenizer.mask_token}' , top_k=3 , targets=a__ )
# They should yield exactly the same result
self.assertEqual(nested_simplify(a__ ) , nested_simplify(a__ ) )
def a_ ( self , a__ , a__ ):
__SCREAMING_SNAKE_CASE : List[str] = FillMaskPipeline(model=a__ , tokenizer=a__ )
__SCREAMING_SNAKE_CASE : str = tokenizer.get_vocab()
# String duplicates + id duplicates
__SCREAMING_SNAKE_CASE : Any = sorted(vocab.keys() )[:3]
__SCREAMING_SNAKE_CASE : Union[str, Any] = [targets[0], targets[1], targets[0], targets[2], targets[1]]
__SCREAMING_SNAKE_CASE : Any = fill_masker(f'My name is {tokenizer.mask_token}' , targets=a__ , top_k=10 )
# The target list contains duplicates, so we can't output more
# than them
self.assertEqual(len(a__ ) , 3 )
def a_ ( self , a__ , a__ ):
__SCREAMING_SNAKE_CASE : Optional[int] = FillMaskPipeline(model=a__ , tokenizer=a__ )
__SCREAMING_SNAKE_CASE : List[str] = fill_masker(
f'This is a {tokenizer.mask_token} {tokenizer.mask_token} {tokenizer.mask_token}' , top_k=2 )
self.assertEqual(
a__ , [
[
{"sequence": ANY(a__ ), "score": ANY(a__ ), "token": ANY(a__ ), "token_str": ANY(a__ )},
{"sequence": ANY(a__ ), "score": ANY(a__ ), "token": ANY(a__ ), "token_str": ANY(a__ )},
],
[
{"sequence": ANY(a__ ), "score": ANY(a__ ), "token": ANY(a__ ), "token_str": ANY(a__ )},
{"sequence": ANY(a__ ), "score": ANY(a__ ), "token": ANY(a__ ), "token_str": ANY(a__ )},
],
[
{"sequence": ANY(a__ ), "score": ANY(a__ ), "token": ANY(a__ ), "token_str": ANY(a__ )},
{"sequence": ANY(a__ ), "score": ANY(a__ ), "token": ANY(a__ ), "token_str": ANY(a__ )},
],
] , )
| 564 |
'''simple docstring'''
from ...utils import logging
from ..ta.modeling_tf_ta import TFTaEncoderModel, TFTaForConditionalGeneration, TFTaModel
from .configuration_mta import MTaConfig
lowercase = logging.get_logger(__name__)
lowercase = '''T5Config'''
class __lowerCamelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
snake_case__ : Optional[int] = '''mt5'''
snake_case__ : Dict = MTaConfig
class __lowerCamelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
snake_case__ : List[str] = '''mt5'''
snake_case__ : List[str] = MTaConfig
class __lowerCamelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
snake_case__ : Optional[int] = '''mt5'''
snake_case__ : Union[str, Any] = MTaConfig
| 564 | 1 |
"""simple docstring"""
def __magic_name__ ( _lowerCamelCase: int, _lowerCamelCase: int ) -> int:
'''simple docstring'''
return int((input_a, input_a).count(1 ) != 0 )
def __magic_name__ ( ) -> None:
'''simple docstring'''
assert or_gate(0, 0 ) == 0
assert or_gate(0, 1 ) == 1
assert or_gate(1, 0 ) == 1
assert or_gate(1, 1 ) == 1
if __name__ == "__main__":
print(or_gate(0, 1))
print(or_gate(1, 0))
print(or_gate(0, 0))
print(or_gate(1, 1))
| 535 |
"""simple docstring"""
import argparse
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import BigBirdPegasusConfig, BigBirdPegasusForConditionalGeneration
__lowerCamelCase :int = [
# tf -> hf
('/', '.'),
('layer_', 'layers.'),
('kernel', 'weight'),
('beta', 'bias'),
('gamma', 'weight'),
('pegasus', 'model'),
]
__lowerCamelCase :List[Any] = [
('.output.dense', '.fc2'),
('intermediate.LayerNorm', 'final_layer_norm'),
('intermediate.dense', 'fc1'),
]
__lowerCamelCase :List[Any] = (
INIT_COMMON
+ [
('attention.self.LayerNorm', 'self_attn_layer_norm'),
('attention.output.dense', 'self_attn.out_proj'),
('attention.self', 'self_attn'),
('attention.encdec.LayerNorm', 'encoder_attn_layer_norm'),
('attention.encdec_output.dense', 'encoder_attn.out_proj'),
('attention.encdec', 'encoder_attn'),
('key', 'k_proj'),
('value', 'v_proj'),
('query', 'q_proj'),
('decoder.LayerNorm', 'decoder.layernorm_embedding'),
]
+ END_COMMON
)
__lowerCamelCase :Any = (
INIT_COMMON
+ [
('embeddings.word_embeddings', 'shared.weight'),
('embeddings.position_embeddings', 'embed_positions.weight'),
('attention.self.LayerNorm', 'self_attn_layer_norm'),
('attention.output.dense', 'self_attn.output'),
('attention.self', 'self_attn.self'),
('encoder.LayerNorm', 'encoder.layernorm_embedding'),
]
+ END_COMMON
)
__lowerCamelCase :Any = [
'encdec/key/bias',
'encdec/query/bias',
'encdec/value/bias',
'self/key/bias',
'self/query/bias',
'self/value/bias',
'encdec_output/dense/bias',
'attention/output/dense/bias',
]
def snake_case ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Tuple ) -> Optional[int]:
for tf_name, hf_name in patterns:
lowerCamelCase : int = k.replace(UpperCamelCase__ , UpperCamelCase__ )
return k
def snake_case ( UpperCamelCase__ : dict , UpperCamelCase__ : dict ) -> BigBirdPegasusForConditionalGeneration:
lowerCamelCase : Tuple = BigBirdPegasusConfig(**UpperCamelCase__ )
lowerCamelCase : Optional[Any] = BigBirdPegasusForConditionalGeneration(UpperCamelCase__ )
lowerCamelCase : List[Any] = torch_model.state_dict()
lowerCamelCase : Union[str, Any] = {}
# separating decoder weights
lowerCamelCase : str = {k: tf_weights[k] for k in tf_weights if k.startswith("""pegasus/decoder""" )}
lowerCamelCase : str = {k: tf_weights[k] for k in tf_weights if not k.startswith("""pegasus/decoder""" )}
for k, v in tqdm(decoder_weights.items() , """tf -> hf conversion""" ):
lowerCamelCase : int = [k.endswith(UpperCamelCase__ ) for ending in KEYS_TO_IGNORE]
if any(UpperCamelCase__ ):
continue
lowerCamelCase : Optional[int] = DECODER_PATTERNS
lowerCamelCase : Union[str, Any] = rename_state_dict_key(UpperCamelCase__ , UpperCamelCase__ )
if new_k not in state_dict:
raise ValueError(F'could not find new key {new_k} in state dict. (converted from {k})' )
if any(True if i in k else False for i in ["""dense""", """query""", """key""", """value"""] ):
lowerCamelCase : Optional[int] = v.T
lowerCamelCase : Tuple = torch.from_numpy(UpperCamelCase__ )
assert v.shape == state_dict[new_k].shape, F'{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}'
for k, v in tqdm(remaining_weights.items() , """tf -> hf conversion""" ):
lowerCamelCase : List[Any] = [k.endswith(UpperCamelCase__ ) for ending in KEYS_TO_IGNORE]
if any(UpperCamelCase__ ):
continue
lowerCamelCase : List[str] = REMAINING_PATTERNS
lowerCamelCase : Optional[Any] = rename_state_dict_key(UpperCamelCase__ , UpperCamelCase__ )
if new_k not in state_dict and k != "pegasus/embeddings/position_embeddings":
raise ValueError(F'could not find new key {new_k} in state dict. (converted from {k})' )
if any(True if i in k else False for i in ["""dense""", """query""", """key""", """value"""] ):
lowerCamelCase : Optional[Any] = v.T
lowerCamelCase : List[str] = torch.from_numpy(UpperCamelCase__ )
if k != "pegasus/embeddings/position_embeddings":
assert v.shape == state_dict[new_k].shape, F'{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}'
lowerCamelCase : List[Any] = mapping["""model.embed_positions.weight"""]
lowerCamelCase : Tuple = mapping.pop("""model.embed_positions.weight""" )
lowerCamelCase , lowerCamelCase : Dict = torch_model.load_state_dict(UpperCamelCase__ , strict=UpperCamelCase__ )
lowerCamelCase : int = [
k
for k in missing
if k
not in [
"""final_logits_bias""",
"""model.encoder.embed_tokens.weight""",
"""model.decoder.embed_tokens.weight""",
"""lm_head.weight""",
]
]
assert unexpected_missing == [], F'no matches found for the following torch keys {unexpected_missing}'
assert extra == [], F'no matches found for the following tf keys {extra}'
return torch_model
def snake_case ( UpperCamelCase__ : Optional[int] ) -> Dict:
lowerCamelCase : str = tf.train.list_variables(UpperCamelCase__ )
lowerCamelCase : str = {}
lowerCamelCase : Dict = ["""global_step"""]
for name, shape in tqdm(UpperCamelCase__ , desc="""converting tf checkpoint to dict""" ):
lowerCamelCase : Tuple = any(pat in name for pat in ignore_name )
if skip_key:
continue
lowerCamelCase : Union[str, Any] = tf.train.load_variable(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase : str = array
return tf_weights
def snake_case ( UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : dict ) -> Optional[int]:
lowerCamelCase : List[Any] = get_tf_weights_as_numpy(UpperCamelCase__ )
lowerCamelCase : List[str] = convert_bigbird_pegasus(UpperCamelCase__ , UpperCamelCase__ )
torch_model.save_pretrained(UpperCamelCase__ )
if __name__ == "__main__":
__lowerCamelCase :int = argparse.ArgumentParser()
parser.add_argument('--tf_ckpt_path', type=str, help='passed to tf.train.list_variables')
parser.add_argument('--save_dir', default=None, type=str, help='Path to the output PyTorch model.')
__lowerCamelCase :List[Any] = parser.parse_args()
__lowerCamelCase :Tuple = {}
convert_bigbird_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir, config_update=config_update)
| 222 | 0 |
import math_equivalence # From: git+https://github.com/hendrycks/math.git
import datasets
__A = '\\n@article{hendrycksmath2021,\n title={Measuring Mathematical Problem Solving With the MATH Dataset},\n author={Dan Hendrycks\n and Collin Burns\n and Saurav Kadavath\n and Akul Arora\n and Steven Basart\n and Eric Tang\n and Dawn Song\n and Jacob Steinhardt},\n journal={arXiv preprint arXiv:2103.03874},\n year={2021}\n}\n'
__A = '\\nThis metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset.\nIt first canonicalizes the inputs (e.g., converting "1/2" to "\\frac{1}{2}") and then computes accuracy.\n'
__A = R'\nCalculates accuracy after canonicalizing inputs.\n\nArgs:\n predictions: list of predictions to score. Each prediction\n is a string that contains natural language and LaTex.\n references: list of reference for each prediction. Each\n reference is a string that contains natural language\n and LaTex.\nReturns:\n accuracy: accuracy after canonicalizing inputs\n (e.g., converting "1/2" to "\\frac{1}{2}")\n\nExamples:\n >>> metric = datasets.load_metric("competition_math")\n >>> results = metric.compute(references=["\\frac{1}{2}"], predictions=["1/2"])\n >>> print(results)\n {\'accuracy\': 1.0}\n'
@datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE ( datasets.Metric ):
"""simple docstring"""
def __A ( self: int ) -> Any:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' ),
'''references''': datasets.Value('''string''' ),
} ) , homepage='''https://github.com/hendrycks/math''' , codebase_urls=['''https://github.com/hendrycks/math'''] , )
def __A ( self: str , __A: List[Any] , __A: List[str] ) -> Any:
_A = 0.0
for i, j in zip(__A , __A ):
n_correct += 1.0 if math_equivalence.is_equiv(__A , __A ) else 0.0
_A = n_correct / len(__A )
return {
"accuracy": accuracy,
}
| 711 |
def __A ( _lowercase , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
global f # a global dp table for knapsack
if f[i][j] < 0:
if j < wt[i - 1]:
_A = mf_knapsack(i - 1 , _lowercase , _lowercase , _lowercase )
else:
_A = max(
mf_knapsack(i - 1 , _lowercase , _lowercase , _lowercase ) , mf_knapsack(i - 1 , _lowercase , _lowercase , j - wt[i - 1] ) + val[i - 1] , )
_A = val
return f[i][j]
def __A ( _lowercase , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
_A = [[0] * (w + 1) for _ in range(n + 1 )]
for i in range(1 , n + 1 ):
for w_ in range(1 , w + 1 ):
if wt[i - 1] <= w_:
_A = max(val[i - 1] + dp[i - 1][w_ - wt[i - 1]] , dp[i - 1][w_] )
else:
_A = dp[i - 1][w_]
return dp[n][w_], dp
def __A ( _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
if not (isinstance(_lowercase , (list, tuple) ) and isinstance(_lowercase , (list, tuple) )):
raise ValueError(
'''Both the weights and values vectors must be either lists or tuples''' )
_A = len(_lowercase )
if num_items != len(_lowercase ):
_A = (
'''The number of weights must be the same as the number of values.\n'''
f"""But got {num_items} weights and {len(_lowercase )} values"""
)
raise ValueError(_lowercase )
for i in range(_lowercase ):
if not isinstance(wt[i] , _lowercase ):
_A = (
'''All weights must be integers but got weight of '''
f"""type {type(wt[i] )} at index {i}"""
)
raise TypeError(_lowercase )
_A ,_A = knapsack(_lowercase , _lowercase , _lowercase , _lowercase )
_A = set()
_construct_solution(_lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
return optimal_val, example_optional_set
def __A ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
if i > 0 and j > 0:
if dp[i - 1][j] == dp[i][j]:
_construct_solution(_lowercase , _lowercase , i - 1 , _lowercase , _lowercase )
else:
optimal_set.add(_lowercase )
_construct_solution(_lowercase , _lowercase , i - 1 , j - wt[i - 1] , _lowercase )
if __name__ == "__main__":
__A = [3, 2, 4, 4]
__A = [4, 3, 2, 3]
__A = 4
__A = 6
__A = [[0] * (w + 1)] + [[0] + [-1] * (w + 1) for _ in range(n + 1)]
__A , __A = knapsack(w, wt, val, n)
print(optimal_solution)
print(mf_knapsack(n, wt, val, w)) # switched the n and w
# testing the dynamic programming problem with example
# the optimal subset for the above example are items 3 and 4
__A , __A = knapsack_with_example_solution(w, wt, val)
assert optimal_solution == 8
assert optimal_subset == {3, 4}
print('optimal_value = ', optimal_solution)
print('An optimal subset corresponding to the optimal value', optimal_subset)
| 62 | 0 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import PoolFormerImageProcessor
class snake_case_ ( unittest.TestCase ):
def __init__( self : Dict , _snake_case : List[str] , _snake_case : Dict=7 , _snake_case : List[str]=3 , _snake_case : List[Any]=30 , _snake_case : List[str]=400 , _snake_case : Optional[Any]=True , _snake_case : List[Any]=None , _snake_case : str=0.9 , _snake_case : Dict=None , _snake_case : Tuple=True , _snake_case : Union[str, Any]=[0.5, 0.5, 0.5] , _snake_case : Optional[int]=[0.5, 0.5, 0.5] , )->Optional[int]:
'''simple docstring'''
__lowerCAmelCase : int = size if size is not None else {"""shortest_edge""": 30}
__lowerCAmelCase : Optional[Any] = crop_size if crop_size is not None else {"""height""": 30, """width""": 30}
__lowerCAmelCase : Tuple = parent
__lowerCAmelCase : Dict = batch_size
__lowerCAmelCase : Any = num_channels
__lowerCAmelCase : Tuple = min_resolution
__lowerCAmelCase : int = max_resolution
__lowerCAmelCase : Union[str, Any] = do_resize_and_center_crop
__lowerCAmelCase : Optional[Any] = size
__lowerCAmelCase : int = crop_pct
__lowerCAmelCase : int = crop_size
__lowerCAmelCase : Tuple = do_normalize
__lowerCAmelCase : Optional[Any] = image_mean
__lowerCAmelCase : int = image_std
def UpperCAmelCase__ ( self : Optional[Any] )->Dict:
'''simple docstring'''
return {
"size": self.size,
"do_resize_and_center_crop": self.do_resize_and_center_crop,
"crop_pct": self.crop_pct,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class snake_case_ ( UpperCAmelCase_ ,unittest.TestCase ):
A_ = PoolFormerImageProcessor if is_vision_available() else None
def UpperCAmelCase__ ( self : Dict )->List[str]:
'''simple docstring'''
__lowerCAmelCase : List[Any] = PoolFormerImageProcessingTester(self )
@property
def UpperCAmelCase__ ( self : Tuple )->int:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase__ ( self : List[Any] )->int:
'''simple docstring'''
__lowerCAmelCase : Dict = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowerCamelCase , """do_resize_and_center_crop""" ) )
self.assertTrue(hasattr(__lowerCamelCase , """size""" ) )
self.assertTrue(hasattr(__lowerCamelCase , """crop_pct""" ) )
self.assertTrue(hasattr(__lowerCamelCase , """do_normalize""" ) )
self.assertTrue(hasattr(__lowerCamelCase , """image_mean""" ) )
self.assertTrue(hasattr(__lowerCamelCase , """image_std""" ) )
def UpperCAmelCase__ ( self : str )->Any:
'''simple docstring'''
__lowerCAmelCase : str = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 30} )
self.assertEqual(image_processor.crop_size , {"""height""": 30, """width""": 30} )
__lowerCAmelCase : Any = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42} )
self.assertEqual(image_processor.crop_size , {"""height""": 84, """width""": 84} )
def UpperCAmelCase__ ( self : Dict )->int:
'''simple docstring'''
pass
def UpperCAmelCase__ ( self : Any )->Tuple:
'''simple docstring'''
__lowerCAmelCase : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__lowerCAmelCase : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , Image.Image )
# Test not batched input
__lowerCAmelCase : str = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
__lowerCAmelCase : Optional[Any] = image_processing(__lowerCamelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def UpperCAmelCase__ ( self : List[Any] )->Optional[int]:
'''simple docstring'''
__lowerCAmelCase : str = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__lowerCAmelCase : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase , numpify=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , np.ndarray )
# Test not batched input
__lowerCAmelCase : Any = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
__lowerCAmelCase : int = image_processing(__lowerCamelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def UpperCAmelCase__ ( self : Union[str, Any] )->Tuple:
'''simple docstring'''
__lowerCAmelCase : int = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__lowerCAmelCase : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase , torchify=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , torch.Tensor )
# Test not batched input
__lowerCAmelCase : str = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
__lowerCAmelCase : Optional[Any] = image_processing(__lowerCamelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , ) | 504 |
import argparse
import os
from pathlib import Path
import torch
from bark.generation import _load_model as _bark_load_model
from huggingface_hub import hf_hub_download
from transformers import EncodecConfig, EncodecModel, set_seed
from transformers.models.bark.configuration_bark import (
BarkCoarseConfig,
BarkConfig,
BarkFineConfig,
BarkSemanticConfig,
)
from transformers.models.bark.generation_configuration_bark import (
BarkCoarseGenerationConfig,
BarkFineGenerationConfig,
BarkGenerationConfig,
BarkSemanticGenerationConfig,
)
from transformers.models.bark.modeling_bark import BarkCoarseModel, BarkFineModel, BarkModel, BarkSemanticModel
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE_ : Dict = logging.get_logger(__name__)
set_seed(770)
SCREAMING_SNAKE_CASE_ : List[Any] = {
'''c_attn''': '''att_proj''',
'''c_proj''': '''out_proj''',
'''c_fc''': '''in_proj''',
'''transformer.''': '''''',
'''h.''': '''layers.''',
'''ln_1''': '''layernorm_1''',
'''ln_2''': '''layernorm_2''',
'''ln_f''': '''layernorm_final''',
'''wpe''': '''position_embeds_layer''',
'''wte''': '''input_embeds_layer''',
}
SCREAMING_SNAKE_CASE_ : Optional[Any] = {
'''text_small''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''text.pt''',
},
'''coarse_small''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''coarse.pt''',
},
'''fine_small''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''fine.pt''',
},
'''text''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''text_2.pt''',
},
'''coarse''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''coarse_2.pt''',
},
'''fine''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''fine_2.pt''',
},
}
SCREAMING_SNAKE_CASE_ : List[str] = os.path.dirname(os.path.abspath(__file__))
SCREAMING_SNAKE_CASE_ : List[Any] = os.path.join(os.path.expanduser('''~'''), '''.cache''')
SCREAMING_SNAKE_CASE_ : Any = os.path.join(os.getenv('''XDG_CACHE_HOME''', default_cache_dir), '''suno''', '''bark_v0''')
def SCREAMING_SNAKE_CASE ( snake_case , snake_case=False ) -> Union[str, Any]:
__lowercase = model_type
if use_small:
key += "_small"
return os.path.join(snake_case , REMOTE_MODEL_PATHS[key]['file_name'] )
def SCREAMING_SNAKE_CASE ( snake_case , snake_case ) -> List[str]:
os.makedirs(snake_case , exist_ok=snake_case )
hf_hub_download(repo_id=snake_case , filename=snake_case , local_dir=snake_case )
def SCREAMING_SNAKE_CASE ( snake_case , snake_case , snake_case=False , snake_case="text" ) -> List[Any]:
if model_type == "text":
__lowercase = BarkSemanticModel
__lowercase = BarkSemanticConfig
__lowercase = BarkSemanticGenerationConfig
elif model_type == "coarse":
__lowercase = BarkCoarseModel
__lowercase = BarkCoarseConfig
__lowercase = BarkCoarseGenerationConfig
elif model_type == "fine":
__lowercase = BarkFineModel
__lowercase = BarkFineConfig
__lowercase = BarkFineGenerationConfig
else:
raise NotImplementedError()
__lowercase = F"{model_type}_small" if use_small else model_type
__lowercase = REMOTE_MODEL_PATHS[model_key]
if not os.path.exists(snake_case ):
logger.info(F"{model_type} model not found, downloading into `{CACHE_DIR}`." )
_download(model_info['repo_id'] , model_info['file_name'] )
__lowercase = torch.load(snake_case , map_location=snake_case )
# this is a hack
__lowercase = checkpoint['model_args']
if "input_vocab_size" not in model_args:
__lowercase = model_args['vocab_size']
__lowercase = model_args['vocab_size']
del model_args["vocab_size"]
# convert Bark model arguments to HF Bark model arguments
__lowercase = model_args.pop('n_head' )
__lowercase = model_args.pop('n_embd' )
__lowercase = model_args.pop('n_layer' )
__lowercase = ConfigClass(**checkpoint['model_args'] )
__lowercase = ModelClass(config=snake_case )
__lowercase = GenerationConfigClass()
__lowercase = model_generation_config
__lowercase = checkpoint['model']
# fixup checkpoint
__lowercase = '_orig_mod.'
for k, v in list(state_dict.items() ):
if k.startswith(snake_case ):
# replace part of the key with corresponding layer name in HF implementation
__lowercase = k[len(snake_case ) :]
for old_layer_name in new_layer_name_dict:
__lowercase = new_k.replace(snake_case , new_layer_name_dict[old_layer_name] )
__lowercase = state_dict.pop(snake_case )
__lowercase = set(state_dict.keys() ) - set(model.state_dict().keys() )
__lowercase = {k for k in extra_keys if not k.endswith('.attn.bias' )}
__lowercase = set(model.state_dict().keys() ) - set(state_dict.keys() )
__lowercase = {k for k in missing_keys if not k.endswith('.attn.bias' )}
if len(snake_case ) != 0:
raise ValueError(F"extra keys found: {extra_keys}" )
if len(snake_case ) != 0:
raise ValueError(F"missing keys: {missing_keys}" )
model.load_state_dict(snake_case , strict=snake_case )
__lowercase = model.num_parameters(exclude_embeddings=snake_case )
__lowercase = checkpoint['best_val_loss'].item()
logger.info(F"model loaded: {round(n_params/1E6 , 1 )}M params, {round(snake_case , 3 )} loss" )
model.eval()
model.to(snake_case )
del checkpoint, state_dict
return model
def SCREAMING_SNAKE_CASE ( snake_case , snake_case=False , snake_case="text" ) -> Tuple:
if model_type not in ("text", "coarse", "fine"):
raise NotImplementedError()
__lowercase = 'cpu' # do conversion on cpu
__lowercase = _get_ckpt_path(snake_case , use_small=snake_case )
__lowercase = _load_model(snake_case , snake_case , model_type=snake_case , use_small=snake_case )
# load bark initial model
__lowercase = _bark_load_model(snake_case , 'cpu' , model_type=snake_case , use_small=snake_case )
if model_type == "text":
__lowercase = bark_model['model']
if model.num_parameters(exclude_embeddings=snake_case ) != bark_model.get_num_params():
raise ValueError('initial and new models don\'t have the same number of parameters' )
# check if same output as the bark model
__lowercase = 5
__lowercase = 10
if model_type in ["text", "coarse"]:
__lowercase = torch.randint(256 , (batch_size, sequence_length) , dtype=torch.int )
__lowercase = bark_model(snake_case )[0]
__lowercase = model(snake_case )
# take last logits
__lowercase = output_new_model_total.logits[:, [-1], :]
else:
__lowercase = 3
__lowercase = 8
__lowercase = torch.randint(256 , (batch_size, sequence_length, n_codes_total) , dtype=torch.int )
__lowercase = model(snake_case , snake_case )
__lowercase = bark_model(snake_case , snake_case )
__lowercase = output_new_model_total.logits
# output difference should come from the difference of self-attention implementation design
if output_new_model.shape != output_old_model.shape:
raise ValueError('initial and new outputs don\'t have the same shape' )
if (output_new_model - output_old_model).abs().max().item() > 1E-3:
raise ValueError('initial and new outputs are not equal' )
Path(snake_case ).mkdir(exist_ok=snake_case )
model.save_pretrained(snake_case )
def SCREAMING_SNAKE_CASE ( snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , ) -> List[str]:
__lowercase = os.path.join(snake_case , snake_case )
__lowercase = BarkSemanticConfig.from_pretrained(os.path.join(snake_case , 'config.json' ) )
__lowercase = BarkCoarseConfig.from_pretrained(os.path.join(snake_case , 'config.json' ) )
__lowercase = BarkFineConfig.from_pretrained(os.path.join(snake_case , 'config.json' ) )
__lowercase = EncodecConfig.from_pretrained('facebook/encodec_24khz' )
__lowercase = BarkSemanticModel.from_pretrained(snake_case )
__lowercase = BarkCoarseModel.from_pretrained(snake_case )
__lowercase = BarkFineModel.from_pretrained(snake_case )
__lowercase = EncodecModel.from_pretrained('facebook/encodec_24khz' )
__lowercase = BarkConfig.from_sub_model_configs(
snake_case , snake_case , snake_case , snake_case )
__lowercase = BarkGenerationConfig.from_sub_model_configs(
semantic.generation_config , coarseAcoustic.generation_config , fineAcoustic.generation_config )
__lowercase = BarkModel(snake_case )
__lowercase = semantic
__lowercase = coarseAcoustic
__lowercase = fineAcoustic
__lowercase = codec
__lowercase = bark_generation_config
Path(snake_case ).mkdir(exist_ok=snake_case )
bark.save_pretrained(snake_case , repo_id=snake_case , push_to_hub=snake_case )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''model_type''', type=str, help='''text, coarse or fine.''')
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--is_small''', action='''store_true''', help='''convert the small version instead of the large.''')
SCREAMING_SNAKE_CASE_ : Optional[int] = parser.parse_args()
load_model(args.pytorch_dump_folder_path, model_type=args.model_type, use_small=args.is_small)
| 375 | 0 |
"""simple docstring"""
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.local_sgd import LocalSGD
########################################################################
# This is a fully working simple example to use Accelerate
# with LocalSGD, which is a method to synchronize model
# parameters every K batches. It is different, but complementary
# to gradient accumulation.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_UpperCamelCase = 16
_UpperCamelCase = 32
def _a ( _snake_case , _snake_case = 16 ):
"""simple docstring"""
UpperCAmelCase = AutoTokenizer.from_pretrained("""bert-base-cased""" )
UpperCAmelCase = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(_snake_case ):
# max_length=None => use the model max length (it's actually the default)
UpperCAmelCase = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=_snake_case , max_length=_snake_case )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
UpperCAmelCase = datasets.map(
_snake_case , batched=_snake_case , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
UpperCAmelCase = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(_snake_case ):
# On TPU it's best to pad everything to the same length or training will be very slow.
UpperCAmelCase = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
UpperCAmelCase = 16
elif accelerator.mixed_precision != "no":
UpperCAmelCase = 8
else:
UpperCAmelCase = None
return tokenizer.pad(
_snake_case , padding="""longest""" , max_length=_snake_case , pad_to_multiple_of=_snake_case , return_tensors="""pt""" , )
# Instantiate dataloaders.
UpperCAmelCase = DataLoader(
tokenized_datasets["""train"""] , shuffle=_snake_case , collate_fn=_snake_case , batch_size=_snake_case )
UpperCAmelCase = DataLoader(
tokenized_datasets["""validation"""] , shuffle=_snake_case , collate_fn=_snake_case , batch_size=_snake_case )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
_UpperCamelCase = mocked_dataloaders # noqa: F811
def _a ( _snake_case , _snake_case ):
"""simple docstring"""
if os.environ.get("""TESTING_MOCKED_DATALOADERS""" , _snake_case ) == "1":
UpperCAmelCase = 2
# New Code #
UpperCAmelCase = int(args.gradient_accumulation_steps )
UpperCAmelCase = int(args.local_sgd_steps )
# Initialize accelerator
UpperCAmelCase = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=_snake_case )
if accelerator.distributed_type not in [DistributedType.NO, DistributedType.MULTI_CPU, DistributedType.MULTI_GPU]:
raise NotImplementedError("""LocalSGD is supported only for CPUs and GPUs (no DeepSpeed or MegatronLM)""" )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCAmelCase = config["""lr"""]
UpperCAmelCase = int(config["""num_epochs"""] )
UpperCAmelCase = int(config["""seed"""] )
UpperCAmelCase = int(config["""batch_size"""] )
UpperCAmelCase = evaluate.load("""glue""" , """mrpc""" )
set_seed(_snake_case )
UpperCAmelCase , UpperCAmelCase = get_dataloaders(_snake_case , _snake_case )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCAmelCase = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=_snake_case )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
UpperCAmelCase = model.to(accelerator.device )
# Instantiate optimizer
UpperCAmelCase = AdamW(params=model.parameters() , lr=_snake_case )
# Instantiate scheduler
UpperCAmelCase = get_linear_schedule_with_warmup(
optimizer=_snake_case , num_warmup_steps=100 , num_training_steps=(len(_snake_case ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = accelerator.prepare(
_snake_case , _snake_case , _snake_case , _snake_case , _snake_case )
# Now we train the model
for epoch in range(_snake_case ):
model.train()
with LocalSGD(
accelerator=_snake_case , model=_snake_case , local_sgd_steps=_snake_case , enabled=local_sgd_steps is not None ) as local_sgd:
for step, batch in enumerate(_snake_case ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(_snake_case ):
UpperCAmelCase = model(**_snake_case )
UpperCAmelCase = output.loss
accelerator.backward(_snake_case )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
# LocalSGD-specific line
local_sgd.step()
model.eval()
for step, batch in enumerate(_snake_case ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
UpperCAmelCase = model(**_snake_case )
UpperCAmelCase = outputs.logits.argmax(dim=-1 )
UpperCAmelCase , UpperCAmelCase = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=_snake_case , references=_snake_case , )
UpperCAmelCase = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}:''' , _snake_case )
def _a ( ):
"""simple docstring"""
UpperCAmelCase = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" , type=_snake_case , default=_snake_case , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
# New Code #
parser.add_argument(
"""--gradient_accumulation_steps""" , type=_snake_case , default=1 , help="""The number of minibatches to be ran before gradients are accumulated.""" , )
parser.add_argument(
"""--local_sgd_steps""" , type=_snake_case , default=8 , help="""Number of local SGD steps or None to disable local SGD""" )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
UpperCAmelCase = parser.parse_args()
UpperCAmelCase = {"""lr""": 2E-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(_snake_case , _snake_case )
if __name__ == "__main__":
main()
| 717 |
"""simple docstring"""
def _a ( _snake_case = 10 , _snake_case = 22 ):
"""simple docstring"""
UpperCAmelCase = range(1 , _snake_case )
UpperCAmelCase = range(1 , _snake_case )
return sum(
1 for power in powers for base in bases if len(str(base**power ) ) == power )
if __name__ == "__main__":
print(F"""{solution(10, 22) = }""")
| 74 | 0 |
'''simple docstring'''
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline
from diffusers.utils import floats_tensor, nightly, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
class A ( unittest.TestCase ):
def __lowerCAmelCase ( self : Optional[Any] ) -> str:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def __lowerCAmelCase ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
_a = 1
_a = 3
_a = (32, 32)
_a = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(lowerCAmelCase_ )
return image
@property
def __lowerCAmelCase ( self : Tuple ) -> List[str]:
"""simple docstring"""
torch.manual_seed(0 )
_a = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
return model
@property
def __lowerCAmelCase ( self : List[str] ) -> Tuple:
"""simple docstring"""
torch.manual_seed(0 )
_a = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
return model
@property
def __lowerCAmelCase ( self : List[str] ) -> Any:
"""simple docstring"""
torch.manual_seed(0 )
_a = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
return CLIPTextModel(lowerCAmelCase_ )
@property
def __lowerCAmelCase ( self : Dict ) -> str:
"""simple docstring"""
def extract(*lowerCAmelCase_ : str , **lowerCAmelCase_ : List[Any] ):
class A :
def __init__( self : Dict ) -> int:
"""simple docstring"""
_a = torch.ones([0] )
def __lowerCAmelCase ( self : List[str] , lowerCAmelCase_ : List[Any] ) -> Optional[Any]:
"""simple docstring"""
self.pixel_values.to(lowerCAmelCase_ )
return self
return Out()
return extract
def __lowerCAmelCase ( self : str ) -> Tuple:
"""simple docstring"""
_a = '''cpu''' # ensure determinism for the device-dependent torch.Generator
_a = self.dummy_cond_unet
_a = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='''scaled_linear''' , clip_sample=lowerCAmelCase_ , set_alpha_to_one=lowerCAmelCase_ , )
_a = self.dummy_vae
_a = self.dummy_text_encoder
_a = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
# make sure here that pndm scheduler skips prk
_a = StableDiffusionPipeline(
unet=lowerCAmelCase_ , scheduler=lowerCAmelCase_ , vae=lowerCAmelCase_ , text_encoder=lowerCAmelCase_ , tokenizer=lowerCAmelCase_ , safety_checker=lowerCAmelCase_ , feature_extractor=self.dummy_extractor , )
_a = sd_pipe.to(lowerCAmelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
_a = '''A painting of a squirrel eating a burger'''
_a = torch.Generator(device=lowerCAmelCase_ ).manual_seed(0 )
_a = sd_pipe([prompt] , generator=lowerCAmelCase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' )
_a = output.images
_a = torch.Generator(device=lowerCAmelCase_ ).manual_seed(0 )
_a = sd_pipe(
[prompt] , generator=lowerCAmelCase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' , return_dict=lowerCAmelCase_ , )[0]
_a = image[0, -3:, -3:, -1]
_a = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_a = np.array([0.5_7_5_6, 0.6_1_1_8, 0.5_0_0_5, 0.5_0_4_1, 0.5_4_7_1, 0.4_7_2_6, 0.4_9_7_6, 0.4_8_6_5, 0.4_8_6_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def __lowerCAmelCase ( self : List[Any] ) -> List[str]:
"""simple docstring"""
_a = '''cpu''' # ensure determinism for the device-dependent torch.Generator
_a = self.dummy_cond_unet
_a = PNDMScheduler(skip_prk_steps=lowerCAmelCase_ )
_a = self.dummy_vae
_a = self.dummy_text_encoder
_a = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
# make sure here that pndm scheduler skips prk
_a = StableDiffusionPipeline(
unet=lowerCAmelCase_ , scheduler=lowerCAmelCase_ , vae=lowerCAmelCase_ , text_encoder=lowerCAmelCase_ , tokenizer=lowerCAmelCase_ , safety_checker=lowerCAmelCase_ , feature_extractor=self.dummy_extractor , )
_a = sd_pipe.to(lowerCAmelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
_a = '''A painting of a squirrel eating a burger'''
_a = torch.Generator(device=lowerCAmelCase_ ).manual_seed(0 )
_a = sd_pipe([prompt] , generator=lowerCAmelCase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' )
_a = output.images
_a = torch.Generator(device=lowerCAmelCase_ ).manual_seed(0 )
_a = sd_pipe(
[prompt] , generator=lowerCAmelCase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' , return_dict=lowerCAmelCase_ , )[0]
_a = image[0, -3:, -3:, -1]
_a = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_a = np.array([0.5_1_2_5, 0.5_7_1_6, 0.4_8_2_8, 0.5_0_6_0, 0.5_6_5_0, 0.4_7_6_8, 0.5_1_8_5, 0.4_8_9_5, 0.4_9_9_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def __lowerCAmelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
_a = StableDiffusionPipeline.from_pretrained(
'''hf-internal-testing/tiny-stable-diffusion-lms-pipe''' , safety_checker=lowerCAmelCase_ )
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
assert isinstance(pipe.scheduler , lowerCAmelCase_ )
assert pipe.safety_checker is None
_a = pipe('''example prompt''' , num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(lowerCAmelCase_ )
_a = StableDiffusionPipeline.from_pretrained(lowerCAmelCase_ )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
_a = pipe('''example prompt''' , num_inference_steps=2 ).images[0]
assert image is not None
@unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' )
def __lowerCAmelCase ( self : int ) -> List[Any]:
"""simple docstring"""
_a = self.dummy_cond_unet
_a = PNDMScheduler(skip_prk_steps=lowerCAmelCase_ )
_a = self.dummy_vae
_a = self.dummy_text_encoder
_a = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
# put models in fp16
_a = unet.half()
_a = vae.half()
_a = bert.half()
# make sure here that pndm scheduler skips prk
_a = StableDiffusionPipeline(
unet=lowerCAmelCase_ , scheduler=lowerCAmelCase_ , vae=lowerCAmelCase_ , text_encoder=lowerCAmelCase_ , tokenizer=lowerCAmelCase_ , safety_checker=lowerCAmelCase_ , feature_extractor=self.dummy_extractor , )
_a = sd_pipe.to(lowerCAmelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
_a = '''A painting of a squirrel eating a burger'''
_a = sd_pipe([prompt] , num_inference_steps=2 , output_type='''np''' ).images
assert image.shape == (1, 64, 64, 3)
@nightly
@require_torch_gpu
class A ( unittest.TestCase ):
def __lowerCAmelCase ( self : List[Any] ) -> Tuple:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self : str ) -> Optional[int]:
"""simple docstring"""
_a = StableDiffusionPipeline.from_pretrained('''runwayml/stable-diffusion-v1-5''' , safety_checker=lowerCAmelCase_ )
_a = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
_a = sd_pipe.to(lowerCAmelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
_a = (
'''portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle'''
''' coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with'''
''' anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and'''
''' children from bahnhof zoo, detailed '''
)
_a = 40_03_66_03_46
_a = 7
# without safety guidance (sld_guidance_scale = 0)
_a = torch.manual_seed(lowerCAmelCase_ )
_a = sd_pipe(
[prompt] , generator=lowerCAmelCase_ , guidance_scale=lowerCAmelCase_ , num_inference_steps=50 , output_type='''np''' , width=5_12 , height=5_12 , sld_guidance_scale=0 , )
_a = output.images
_a = image[0, -3:, -3:, -1]
_a = [0.2_2_7_8, 0.2_2_3_1, 0.2_2_4_9, 0.2_3_3_3, 0.2_3_0_3, 0.1_8_8_5, 0.2_2_7_3, 0.2_1_4_4, 0.2_1_7_6]
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
# without safety guidance (strong configuration)
_a = torch.manual_seed(lowerCAmelCase_ )
_a = sd_pipe(
[prompt] , generator=lowerCAmelCase_ , guidance_scale=lowerCAmelCase_ , num_inference_steps=50 , output_type='''np''' , width=5_12 , height=5_12 , sld_guidance_scale=20_00 , sld_warmup_steps=7 , sld_threshold=0.0_2_5 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
_a = output.images
_a = image[0, -3:, -3:, -1]
_a = [0.2_3_8_3, 0.2_2_7_6, 0.2_3_6, 0.2_1_9_2, 0.2_1_8_6, 0.2_0_5_3, 0.1_9_7_1, 0.1_9_0_1, 0.1_7_1_9]
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __lowerCAmelCase ( self : int ) -> Union[str, Any]:
"""simple docstring"""
_a = StableDiffusionPipeline.from_pretrained('''runwayml/stable-diffusion-v1-5''' , safety_checker=lowerCAmelCase_ )
_a = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
_a = sd_pipe.to(lowerCAmelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
_a = '''padme amidala taking a bath artwork, safe for work, no nudity'''
_a = 27_34_97_17_55
_a = 7
_a = torch.manual_seed(lowerCAmelCase_ )
_a = sd_pipe(
[prompt] , generator=lowerCAmelCase_ , guidance_scale=lowerCAmelCase_ , num_inference_steps=50 , output_type='''np''' , width=5_12 , height=5_12 , sld_guidance_scale=0 , )
_a = output.images
_a = image[0, -3:, -3:, -1]
_a = [0.3_5_0_2, 0.3_6_2_2, 0.3_3_9_6, 0.3_6_4_2, 0.3_4_7_8, 0.3_3_1_8, 0.3_5, 0.3_3_4_8, 0.3_2_9_7]
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
_a = torch.manual_seed(lowerCAmelCase_ )
_a = sd_pipe(
[prompt] , generator=lowerCAmelCase_ , guidance_scale=lowerCAmelCase_ , num_inference_steps=50 , output_type='''np''' , width=5_12 , height=5_12 , sld_guidance_scale=20_00 , sld_warmup_steps=7 , sld_threshold=0.0_2_5 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
_a = output.images
_a = image[0, -3:, -3:, -1]
_a = [0.5_5_3_1, 0.5_2_0_6, 0.4_8_9_5, 0.5_1_5_6, 0.5_1_8_2, 0.4_7_5_1, 0.4_8_0_2, 0.4_8_0_3, 0.4_4_4_3]
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __lowerCAmelCase ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
_a = StableDiffusionPipeline.from_pretrained('''runwayml/stable-diffusion-v1-5''' )
_a = sd_pipe.to(lowerCAmelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
_a = (
'''the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c.'''
''' leyendecker'''
)
_a = 10_44_35_52_34
_a = 12
_a = torch.manual_seed(lowerCAmelCase_ )
_a = sd_pipe(
[prompt] , generator=lowerCAmelCase_ , guidance_scale=lowerCAmelCase_ , num_inference_steps=50 , output_type='''np''' , width=5_12 , height=5_12 , sld_guidance_scale=0 , )
_a = output.images
_a = image[0, -3:, -3:, -1]
_a = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] )
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-7
_a = torch.manual_seed(lowerCAmelCase_ )
_a = sd_pipe(
[prompt] , generator=lowerCAmelCase_ , guidance_scale=lowerCAmelCase_ , num_inference_steps=50 , output_type='''np''' , width=5_12 , height=5_12 , sld_guidance_scale=20_00 , sld_warmup_steps=7 , sld_threshold=0.0_2_5 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
_a = output.images
_a = image[0, -3:, -3:, -1]
_a = np.array([0.5_8_1_8, 0.6_2_8_5, 0.6_8_3_5, 0.6_0_1_9, 0.6_2_5, 0.6_7_5_4, 0.6_0_9_6, 0.6_3_3_4, 0.6_5_6_1] )
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 22 |
"""simple docstring"""
import unittest
from transformers import BertGenerationTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__UpperCAmelCase ="""▁"""
__UpperCAmelCase =get_tests_dir("""fixtures/test_sentencepiece.model""")
@require_sentencepiece
class lowerCAmelCase__ ( UpperCAmelCase_ , unittest.TestCase ):
lowercase__ : str = BertGenerationTokenizer
lowercase__ : int = False
lowercase__ : Optional[Any] = True
def lowercase_ ( self ):
'''simple docstring'''
super().setUp()
A__ = BertGenerationTokenizer(UpperCamelCase__ , keep_accents=UpperCamelCase__ )
tokenizer.save_pretrained(self.tmpdirname )
def lowercase_ ( self ):
'''simple docstring'''
A__ = "<s>"
A__ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCamelCase__ ) , UpperCamelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCamelCase__ ) , UpperCamelCase__ )
def lowercase_ ( self ):
'''simple docstring'''
A__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<unk>" )
self.assertEqual(vocab_keys[1] , "<s>" )
self.assertEqual(vocab_keys[-1] , "<pad>" )
self.assertEqual(len(UpperCamelCase__ ) , 10_02 )
def lowercase_ ( self ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 10_00 )
def lowercase_ ( self ):
'''simple docstring'''
A__ = BertGenerationTokenizer(UpperCamelCase__ , keep_accents=UpperCamelCase__ )
A__ = tokenizer.tokenize("This is a test" )
self.assertListEqual(UpperCamelCase__ , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCamelCase__ ) , [2_85, 46, 10, 1_70, 3_82] , )
A__ = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
UpperCamelCase__ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
A__ = tokenizer.convert_tokens_to_ids(UpperCamelCase__ )
self.assertListEqual(
UpperCamelCase__ , [8, 21, 84, 55, 24, 19, 7, 0, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
A__ = tokenizer.convert_ids_to_tokens(UpperCamelCase__ )
self.assertListEqual(
UpperCamelCase__ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
@cached_property
def lowercase_ ( self ):
'''simple docstring'''
return BertGenerationTokenizer.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder" )
@slow
def lowercase_ ( self ):
'''simple docstring'''
A__ = "Hello World!"
A__ = [1_85_36, 22_60, 1_01]
self.assertListEqual(UpperCamelCase__ , self.big_tokenizer.encode(UpperCamelCase__ ) )
@slow
def lowercase_ ( self ):
'''simple docstring'''
A__ = (
"This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"
" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"
)
A__ = [
8_71,
4_19,
3_58,
9_46,
9_91,
25_21,
4_52,
3_58,
13_57,
3_87,
77_51,
35_36,
1_12,
9_85,
4_56,
1_26,
8_65,
9_38,
54_00,
57_34,
4_58,
13_68,
4_67,
7_86,
24_62,
52_46,
11_59,
6_33,
8_65,
45_19,
4_57,
5_82,
8_52,
25_57,
4_27,
9_16,
5_08,
4_05,
3_43_24,
4_97,
3_91,
4_08,
1_13_42,
12_44,
3_85,
1_00,
9_38,
9_85,
4_56,
5_74,
3_62,
1_25_97,
32_00,
31_29,
11_72,
]
self.assertListEqual(UpperCamelCase__ , self.big_tokenizer.encode(UpperCamelCase__ ) )
@require_torch
@slow
def lowercase_ ( self ):
'''simple docstring'''
import torch
from transformers import BertGenerationConfig, BertGenerationEncoder
# Build sequence
A__ = list(self.big_tokenizer.get_vocab().keys() )[:10]
A__ = " ".join(UpperCamelCase__ )
A__ = self.big_tokenizer.encode_plus(UpperCamelCase__ , return_tensors="pt" , return_token_type_ids=UpperCamelCase__ )
A__ = self.big_tokenizer.batch_encode_plus(
[sequence + " " + sequence] , return_tensors="pt" , return_token_type_ids=UpperCamelCase__ )
A__ = BertGenerationConfig()
A__ = BertGenerationEncoder(UpperCamelCase__ )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**UpperCamelCase__ )
model(**UpperCamelCase__ )
@slow
def lowercase_ ( self ):
'''simple docstring'''
A__ = {"input_ids": [[3_92_86, 4_58, 3_63_35, 20_01, 4_56, 1_30_73, 1_32_66, 4_55, 1_13, 77_46, 17_41, 1_11_57, 3_91, 1_30_73, 1_32_66, 4_55, 1_13, 39_67, 3_54_12, 1_13, 49_36, 1_09, 38_70, 23_77, 1_13, 3_00_84, 4_57_20, 4_58, 1_34, 1_74_96, 1_12, 5_03, 1_16_72, 1_13, 1_18, 1_12, 56_65, 1_33_47, 3_86_87, 1_12, 14_96, 3_13_89, 1_12, 32_68, 4_72_64, 1_34, 9_62, 1_12, 1_63_77, 80_35, 2_31_30, 4_30, 1_21_69, 1_55_18, 2_85_92, 4_58, 1_46, 4_16_97, 1_09, 3_91, 1_21_69, 1_55_18, 1_66_89, 4_58, 1_46, 4_13_58, 1_09, 4_52, 7_26, 40_34, 1_11, 7_63, 3_54_12, 50_82, 3_88, 19_03, 1_11, 90_51, 3_91, 28_70, 4_89_18, 19_00, 11_23, 5_50, 9_98, 1_12, 95_86, 1_59_85, 4_55, 3_91, 4_10, 2_29_55, 3_76_36, 1_14], [4_48, 1_74_96, 4_19, 36_63, 3_85, 7_63, 1_13, 2_75_33, 28_70, 32_83, 1_30_43, 16_39, 2_47_13, 5_23, 6_56, 2_40_13, 1_85_50, 25_21, 5_17, 2_70_14, 2_12_44, 4_20, 12_12, 14_65, 3_91, 9_27, 48_33, 3_88, 5_78, 1_17_86, 1_14, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [4_84, 21_69, 76_87, 2_19_32, 1_81_46, 7_26, 3_63, 1_70_32, 33_91, 1_14, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCamelCase__ , model_name="google/bert_for_seq_generation_L-24_bbc_encoder" , revision="c817d1fd1be2ffa69431227a1fe320544943d4db" , ) | 337 | 0 |
"""simple docstring"""
def a__ ( SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
if num < 0:
return False
lowerCAmelCase : int = num
lowerCAmelCase : int = 0
while num > 0:
lowerCAmelCase : Dict = rev_num * 1_0 + (num % 1_0)
num //= 1_0
return num_copy == rev_num
if __name__ == "__main__":
import doctest
doctest.testmod()
| 681 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowerCAmelCase__ = {
'''configuration_efficientformer''': [
'''EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''EfficientFormerConfig''',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['''EfficientFormerImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''EfficientFormerForImageClassification''',
'''EfficientFormerForImageClassificationWithTeacher''',
'''EfficientFormerModel''',
'''EfficientFormerPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFEfficientFormerForImageClassification''',
'''TFEfficientFormerForImageClassificationWithTeacher''',
'''TFEfficientFormerModel''',
'''TFEfficientFormerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_efficientformer import EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientformer import EfficientFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientformer import (
EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientFormerForImageClassification,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerModel,
EfficientFormerPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
TFEfficientFormerPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 681 | 1 |
from typing import Dict, List, Optional
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
snake_case_ : Optional[Any] = logging.get_logger(__name__)
snake_case_ : List[Any] = {
'''nielsr/canine-s''': 20_48,
}
# Unicode defines 1,114,112 total “codepoints”
snake_case_ : int = 1_11_41_12
# Below: Constants defining canonical codepoints for special, pseudo-characters.
# Copied from https://github.com/google-research/language/blob/master/language/canine/special_codepoints.py
snake_case_ : Tuple = 0
snake_case_ : List[str] = 0xE_000
snake_case_ : Tuple = 0xE_001
snake_case_ : Union[str, Any] = 0xE_002
snake_case_ : Optional[int] = 0xE_003
snake_case_ : Tuple = 0xE_004
# Maps special codepoints to human-readable names.
snake_case_ : Dict[int, str] = {
# Special symbols are represented using codepoints values that are valid,
# but designated as "Private Use", meaning that they will never be assigned
# characters by the Unicode Consortium, and are thus safe for use here.
#
# NOTE: Do *NOT* add any sort of [UNK_CHAR] here. They are explicitly
# excluded and should fail with a hard error.
CLS: "[CLS]",
SEP: "[SEP]",
BOS: "[BOS]",
MASK: "[MASK]",
PAD: "[PAD]",
RESERVED: "[RESERVED]",
}
# Maps special codepoint human-readable names to their codepoint values.
snake_case_ : Dict[str, int] = {name: codepoint for codepoint, name in SPECIAL_CODEPOINTS.items()}
class A__ ( a_ ):
UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : List[str] , _a : int=chr(lowercase_ ) , _a : Optional[Any]=chr(lowercase_ ) , _a : Union[str, Any]=chr(lowercase_ ) , _a : Tuple=chr(lowercase_ ) , _a : str=chr(lowercase_ ) , _a : Optional[Any]=chr(lowercase_ ) , _a : Union[str, Any]=False , _a : int=2048 , **_a : Union[str, Any] , ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =AddedToken(lowercase_ , lstrip=lowercase_ , rstrip=lowercase_ ) if isinstance(lowercase_ , lowercase_ ) else bos_token
_SCREAMING_SNAKE_CASE =AddedToken(lowercase_ , lstrip=lowercase_ , rstrip=lowercase_ ) if isinstance(lowercase_ , lowercase_ ) else eos_token
_SCREAMING_SNAKE_CASE =AddedToken(lowercase_ , lstrip=lowercase_ , rstrip=lowercase_ ) if isinstance(lowercase_ , lowercase_ ) else sep_token
_SCREAMING_SNAKE_CASE =AddedToken(lowercase_ , lstrip=lowercase_ , rstrip=lowercase_ ) if isinstance(lowercase_ , lowercase_ ) else cls_token
_SCREAMING_SNAKE_CASE =AddedToken(lowercase_ , lstrip=lowercase_ , rstrip=lowercase_ ) if isinstance(lowercase_ , lowercase_ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
_SCREAMING_SNAKE_CASE =AddedToken(lowercase_ , lstrip=lowercase_ , rstrip=lowercase_ ) if isinstance(lowercase_ , lowercase_ ) else mask_token
super().__init__(
bos_token=lowercase_ , eos_token=lowercase_ , sep_token=lowercase_ , cls_token=lowercase_ , pad_token=lowercase_ , mask_token=lowercase_ , add_prefix_space=lowercase_ , model_max_length=lowercase_ , **lowercase_ , )
# Creates a mapping for looking up the IDs of special symbols.
_SCREAMING_SNAKE_CASE ={}
for codepoint, name in SPECIAL_CODEPOINTS.items():
_SCREAMING_SNAKE_CASE =codepoint
# Creates a mapping for looking up the string forms of special symbol IDs.
_SCREAMING_SNAKE_CASE ={
codepoint: name for name, codepoint in self._special_codepoints.items()
}
_SCREAMING_SNAKE_CASE =UNICODE_VOCAB_SIZE
_SCREAMING_SNAKE_CASE =len(self._special_codepoints )
@property
def __UpperCamelCase ( self : Dict ) -> str:
"""simple docstring"""
return self._unicode_vocab_size
def __UpperCamelCase ( self : Any , _a : str ) -> List[str]:
"""simple docstring"""
return list(lowercase_ )
def __UpperCamelCase ( self : Union[str, Any] , _a : str ) -> Dict:
"""simple docstring"""
try:
return ord(lowercase_ )
except TypeError:
raise ValueError(f"invalid token: \'{token}\'" )
def __UpperCamelCase ( self : Any , _a : int ) -> List[Any]:
"""simple docstring"""
try:
if index in SPECIAL_CODEPOINTS:
return SPECIAL_CODEPOINTS[index]
return chr(lowercase_ )
except TypeError:
raise ValueError(f"invalid id: {index}" )
def __UpperCamelCase ( self : int , _a : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
return "".join(lowercase_ )
def __UpperCamelCase ( self : Any , _a : List[int] , _a : Optional[List[int]] = None ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =[self.sep_token_id]
_SCREAMING_SNAKE_CASE =[self.cls_token_id]
_SCREAMING_SNAKE_CASE =cls + token_ids_a + sep
if token_ids_a is not None:
result += token_ids_a + sep
return result
def __UpperCamelCase ( self : List[str] , _a : List[int] , _a : Optional[List[int]] = None , _a : bool = False ) -> List[str]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowercase_ , token_ids_a=lowercase_ , already_has_special_tokens=lowercase_ )
_SCREAMING_SNAKE_CASE =[1] + ([0] * len(lowercase_ )) + [1]
if token_ids_a is not None:
result += ([0] * len(lowercase_ )) + [1]
return result
def __UpperCamelCase ( self : Dict , _a : List[int] , _a : Optional[List[int]] = None ) -> Optional[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =[self.sep_token_id]
_SCREAMING_SNAKE_CASE =[self.cls_token_id]
_SCREAMING_SNAKE_CASE =len(cls + token_ids_a + sep ) * [0]
if token_ids_a is not None:
result += len(token_ids_a + sep ) * [1]
return result
def __UpperCamelCase ( self : List[Any] , _a : str , _a : Optional[str] = None ) -> List[Any]:
"""simple docstring"""
return () | 691 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ..models.auto import AutoModelForVisionaSeq
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
lowercase__ = "Salesforce/blip-image-captioning-base"
lowercase__ = (
"This is a tool that generates a description of an image. It takes an input named `image` which should be the "
"image to caption, and returns a text that contains the description in English."
)
lowercase__ = "image_captioner"
lowercase__ = AutoModelForVisionaSeq
lowercase__ = ["image"]
lowercase__ = ["text"]
def __init__( self : int ,*lowercase_ : int ,**lowercase_ : List[str] ):
requires_backends(self ,['''vision'''] )
super().__init__(*lowercase_ ,**lowercase_ )
def __lowerCAmelCase ( self : List[str] ,lowercase_ : "Image" ):
return self.pre_processor(images=lowercase_ ,return_tensors='''pt''' )
def __lowerCAmelCase ( self : List[Any] ,lowercase_ : Any ):
return self.model.generate(**lowercase_ )
def __lowerCAmelCase ( self : Dict ,lowercase_ : Union[str, Any] ):
return self.pre_processor.batch_decode(lowercase_ ,skip_special_tokens=lowercase_ )[0].strip()
| 450 | 0 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bart import BartTokenizer
__A = logging.get_logger(__name__)
__A = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
# See all BART models at https://huggingface.co/models?filter=bart
__A = {
'vocab_file': {
'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/vocab.json',
'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/vocab.json',
'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json',
'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json',
'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json',
'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json',
},
'merges_file': {
'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/merges.txt',
'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/merges.txt',
'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt',
'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt',
'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt',
'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt',
},
'tokenizer_file': {
'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/tokenizer.json',
'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/tokenizer.json',
'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/tokenizer.json',
'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/tokenizer.json',
'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/tokenizer.json',
'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/tokenizer.json',
},
}
__A = {
'facebook/bart-base': 1_0_2_4,
'facebook/bart-large': 1_0_2_4,
'facebook/bart-large-mnli': 1_0_2_4,
'facebook/bart-large-cnn': 1_0_2_4,
'facebook/bart-large-xsum': 1_0_2_4,
'yjernite/bart_eli5': 1_0_2_4,
}
class a_ ( lowerCAmelCase__ ):
_snake_case = VOCAB_FILES_NAMES
_snake_case = PRETRAINED_VOCAB_FILES_MAP
_snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_snake_case = ["input_ids", "attention_mask"]
_snake_case = BartTokenizer
def __init__(self , __a=None , __a=None , __a=None , __a="replace" , __a="<s>" , __a="</s>" , __a="</s>" , __a="<s>" , __a="<unk>" , __a="<pad>" , __a="<mask>" , __a=False , __a=True , **__a , ) -> Any:
"""simple docstring"""
super().__init__(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , tokenizer_file=_SCREAMING_SNAKE_CASE , errors=_SCREAMING_SNAKE_CASE , bos_token=_SCREAMING_SNAKE_CASE , eos_token=_SCREAMING_SNAKE_CASE , sep_token=_SCREAMING_SNAKE_CASE , cls_token=_SCREAMING_SNAKE_CASE , unk_token=_SCREAMING_SNAKE_CASE , pad_token=_SCREAMING_SNAKE_CASE , mask_token=_SCREAMING_SNAKE_CASE , add_prefix_space=_SCREAMING_SNAKE_CASE , trim_offsets=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
__snake_case : Optional[Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__())
if pre_tok_state.get('add_prefix_space' , _SCREAMING_SNAKE_CASE) != add_prefix_space:
__snake_case : Union[str, Any] = getattr(_SCREAMING_SNAKE_CASE , pre_tok_state.pop('type'))
__snake_case : Tuple = add_prefix_space
__snake_case : List[str] = pre_tok_class(**_SCREAMING_SNAKE_CASE)
__snake_case : Optional[Any] = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
__snake_case : Union[str, Any] = 'post_processor'
__snake_case : Union[str, Any] = getattr(self.backend_tokenizer , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
if tokenizer_component_instance:
__snake_case : Optional[Any] = json.loads(tokenizer_component_instance.__getstate__())
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
__snake_case : Any = tuple(state['sep'])
if "cls" in state:
__snake_case : Any = tuple(state['cls'])
__snake_case : List[Any] = False
if state.get('add_prefix_space' , _SCREAMING_SNAKE_CASE) != add_prefix_space:
__snake_case : int = add_prefix_space
__snake_case : Dict = True
if state.get('trim_offsets' , _SCREAMING_SNAKE_CASE) != trim_offsets:
__snake_case : Dict = trim_offsets
__snake_case : Any = True
if changes_to_apply:
__snake_case : Any = getattr(_SCREAMING_SNAKE_CASE , state.pop('type'))
__snake_case : Optional[int] = component_class(**_SCREAMING_SNAKE_CASE)
setattr(self.backend_tokenizer , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
@property
def SCREAMING_SNAKE_CASE__ (self) -> str:
"""simple docstring"""
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.')
return None
return str(self._mask_token)
@mask_token.setter
def SCREAMING_SNAKE_CASE__ (self , __a) -> Tuple:
"""simple docstring"""
__snake_case : List[str] = AddedToken(_SCREAMING_SNAKE_CASE , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE) else value
__snake_case : Tuple = value
def SCREAMING_SNAKE_CASE__ (self , *__a , **__a) -> BatchEncoding:
"""simple docstring"""
__snake_case : List[Any] = kwargs.get('is_split_into_words' , _SCREAMING_SNAKE_CASE)
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
'to use it with pretokenized inputs.')
return super()._batch_encode_plus(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE)
def SCREAMING_SNAKE_CASE__ (self , *__a , **__a) -> BatchEncoding:
"""simple docstring"""
__snake_case : List[str] = kwargs.get('is_split_into_words' , _SCREAMING_SNAKE_CASE)
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
'to use it with pretokenized inputs.')
return super()._encode_plus(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE)
def SCREAMING_SNAKE_CASE__ (self , __a , __a = None) -> Tuple[str]:
"""simple docstring"""
__snake_case : str = self._tokenizer.model.save(_SCREAMING_SNAKE_CASE , name=_SCREAMING_SNAKE_CASE)
return tuple(_SCREAMING_SNAKE_CASE)
def SCREAMING_SNAKE_CASE__ (self , __a , __a=None) -> str:
"""simple docstring"""
__snake_case : List[Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def SCREAMING_SNAKE_CASE__ (self , __a , __a = None) -> List[int]:
"""simple docstring"""
__snake_case : Dict = [self.sep_token_id]
__snake_case : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0] | 708 |
'''simple docstring'''
import math
class a_ :
def __init__(self , __a=0) -> Any: # a graph with Node 0,1,...,N-1
"""simple docstring"""
__snake_case : List[str] = n
__snake_case : Tuple = [
[math.inf for j in range(0 , __a)] for i in range(0 , __a)
] # adjacency matrix for weight
__snake_case : Union[str, Any] = [
[math.inf for j in range(0 , __a)] for i in range(0 , __a)
] # dp[i][j] stores minimum distance from i to j
def SCREAMING_SNAKE_CASE__ (self , __a , __a , __a) -> Tuple:
"""simple docstring"""
__snake_case : Union[str, Any] = w
def SCREAMING_SNAKE_CASE__ (self) -> Tuple:
"""simple docstring"""
for k in range(0 , self.n):
for i in range(0 , self.n):
for j in range(0 , self.n):
__snake_case : List[Any] = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j])
def SCREAMING_SNAKE_CASE__ (self , __a , __a) -> Optional[int]:
"""simple docstring"""
return self.dp[u][v]
if __name__ == "__main__":
__A = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 1_0)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 1_0)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3) | 61 | 0 |
"""simple docstring"""
import re
from flax.core.frozen_dict import freeze
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.experimental import PartitionSpec as P
# Sentinels
lowerCamelCase_ = object()
# For specifying empty leaf dict `{}`
lowerCamelCase_ = object()
def snake_case ( A__ ,A__ ):
UpperCAmelCase_ : Dict = tuple((re.compile(x + "$" ) for x in qs) )
for i in range(len(A__ ) - len(A__ ) + 1 ):
UpperCAmelCase_ : str = [x.match(A__ ) for x, y in zip(A__ ,ks[i:] )]
if matches and all(A__ ):
return True
return False
def snake_case ( A__ ):
def replace(A__ ,A__ ):
for rule, replacement in rules:
if _match(A__ ,A__ ):
return replacement
return val
return replace
def snake_case ( ):
return [
# embeddings
(("transformer", "wpe", "embedding"), P("mp" ,A__ )),
(("transformer", "wte", "embedding"), P("mp" ,A__ )),
# atention
(("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(A__ ,"mp" )),
(("attention", "out_proj", "kernel"), P("mp" ,A__ )),
(("attention", "out_proj", "bias"), None),
# mlp
(("mlp", "c_fc", "kernel"), P(A__ ,"mp" )),
(("mlp", "c_fc", "bias"), P("mp" )),
(("mlp", "c_proj", "kernel"), P("mp" ,A__ )),
(("mlp", "c_proj", "bias"), None),
# layer norms
((r"ln_\d+", "bias"), None),
((r"\d+", r"ln_\d+", "scale"), None),
(("ln_f", "bias"), None),
(("ln_f", "scale"), None),
]
def snake_case ( A__ ):
UpperCAmelCase_ : Union[str, Any] = _get_partition_rules()
UpperCAmelCase_ : Any = _replacement_rules(A__ )
UpperCAmelCase_ : Any = {k: _unmatched for k in flatten_dict(A__ )}
UpperCAmelCase_ : Dict = {k: replace(A__ ,A__ ) for k, v in initd.items()}
assert _unmatched not in result.values(), "Incomplete partition spec."
return freeze(unflatten_dict(A__ ) )
| 95 |
from manim import *
class __SCREAMING_SNAKE_CASE ( _a ):
def _lowerCamelCase ( self ):
UpperCamelCase__ = Rectangle(height=0.5 , width=0.5 )
UpperCamelCase__ = Rectangle(height=0.25 , width=0.25 )
UpperCamelCase__ = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
UpperCamelCase__ = [mem.copy() for i in range(6 )]
UpperCamelCase__ = [mem.copy() for i in range(6 )]
UpperCamelCase__ = VGroup(*__lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0 )
UpperCamelCase__ = VGroup(*__lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0 )
UpperCamelCase__ = VGroup(__lowerCAmelCase , __lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0 )
UpperCamelCase__ = Text("""CPU""" , font_size=24 )
UpperCamelCase__ = Group(__lowerCAmelCase , __lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0.5 , aligned_edge=__lowerCAmelCase )
cpu.move_to([-2.5, -0.5, 0] )
self.add(__lowerCAmelCase )
UpperCamelCase__ = [mem.copy() for i in range(4 )]
UpperCamelCase__ = VGroup(*__lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0 )
UpperCamelCase__ = Text("""GPU""" , font_size=24 )
UpperCamelCase__ = Group(__lowerCAmelCase , __lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0.5 , aligned_edge=__lowerCAmelCase )
gpu.move_to([-1, -1, 0] )
self.add(__lowerCAmelCase )
UpperCamelCase__ = [mem.copy() for i in range(6 )]
UpperCamelCase__ = VGroup(*__lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0 )
UpperCamelCase__ = Text("""Model""" , font_size=24 )
UpperCamelCase__ = Group(__lowerCAmelCase , __lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0.5 , aligned_edge=__lowerCAmelCase )
model.move_to([3, -1.0, 0] )
self.add(__lowerCAmelCase )
UpperCamelCase__ = []
UpperCamelCase__ = []
UpperCamelCase__ = []
for i, rect in enumerate(__lowerCAmelCase ):
rect.set_stroke(__lowerCAmelCase )
UpperCamelCase__ = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(__lowerCAmelCase , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=__lowerCAmelCase )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(model_cpu_arr[0] , direction=__lowerCAmelCase , buff=0.0 )
else:
cpu_target.next_to(model_cpu_arr[i - 1] , direction=__lowerCAmelCase , buff=0.0 )
self.add(__lowerCAmelCase )
model_cpu_arr.append(__lowerCAmelCase )
self.add(*__lowerCAmelCase , *__lowerCAmelCase , *__lowerCAmelCase )
UpperCamelCase__ = [mem.copy() for i in range(6 )]
UpperCamelCase__ = VGroup(*__lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0 )
UpperCamelCase__ = Text("""Loaded Checkpoint""" , font_size=24 )
UpperCamelCase__ = Group(__lowerCAmelCase , __lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0.5 , aligned_edge=__lowerCAmelCase )
checkpoint.move_to([3, 0.5, 0] )
self.add(__lowerCAmelCase )
UpperCamelCase__ = []
UpperCamelCase__ = []
for i, rect in enumerate(__lowerCAmelCase ):
UpperCamelCase__ = fill.copy().set_fill(__lowerCAmelCase , opacity=0.7 )
target.move_to(__lowerCAmelCase )
ckpt_arr.append(__lowerCAmelCase )
UpperCamelCase__ = target.copy()
if i < 5:
cpu_target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.move_to(cpu_right_col_base[i - 5] )
ckpt_cpu_arr.append(__lowerCAmelCase )
self.add(*__lowerCAmelCase , *__lowerCAmelCase )
UpperCamelCase__ = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
UpperCamelCase__ = MarkupText(
f"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(__lowerCAmelCase , __lowerCAmelCase )
UpperCamelCase__ = MarkupText(
f"""<span fgcolor='{BLUE}'>●</span> Checkpoint""" , font_size=18 , )
blue_text.next_to(__lowerCAmelCase , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(__lowerCAmelCase )
UpperCamelCase__ = MarkupText(
f"""Based on the passed in configuration, weights are stored in\na variety of np.memmaps on disk or to a particular device.""" , font_size=24 , )
step_a.move_to([2, 2, 0] )
UpperCamelCase__ = [meta_mem.copy() for i in range(6 )]
UpperCamelCase__ = [meta_mem.copy() for i in range(6 )]
UpperCamelCase__ = VGroup(*__lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0 )
UpperCamelCase__ = VGroup(*__lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0 )
UpperCamelCase__ = VGroup(__lowerCAmelCase , __lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0 )
UpperCamelCase__ = Text("""Disk""" , font_size=24 )
UpperCamelCase__ = Group(__lowerCAmelCase , __lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0.5 , aligned_edge=__lowerCAmelCase )
disk.move_to([-4.0, -1.25, 0] )
self.play(Write(__lowerCAmelCase , run_time=3 ) , Write(__lowerCAmelCase , run_time=1 ) , Create(__lowerCAmelCase , run_time=1 ) )
UpperCamelCase__ = []
for i, rect in enumerate(__lowerCAmelCase ):
UpperCamelCase__ = rect.copy()
target.generate_target()
target.target.move_to(disk_left_col_base[i] ).scale(0.5 )
animations.append(MoveToTarget(__lowerCAmelCase , run_time=1.5 ) )
self.play(*__lowerCAmelCase )
self.play(FadeOut(__lowerCAmelCase ) )
UpperCamelCase__ = MarkupText(f"""Then, the checkpoint is removed from memory\nthrough garbage collection.""" , font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(__lowerCAmelCase , run_time=3 ) )
self.play(
FadeOut(__lowerCAmelCase , __lowerCAmelCase , *__lowerCAmelCase , *__lowerCAmelCase ) , )
self.wait()
| 619 | 0 |
'''simple docstring'''
import logging
import os
import sys
import warnings
from dataclasses import dataclass, field
from random import randint
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import DatasetDict, load_dataset
import transformers
from transformers import (
AutoConfig,
AutoFeatureExtractor,
AutoModelForAudioClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
lowerCamelCase__ = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.31.0")
require_version("datasets>=1.14.0", "To fix: pip install -r examples/pytorch/audio-classification/requirements.txt")
def __A(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = 1_6_0_0_0 ) -> int:
"""simple docstring"""
_UpperCamelCase = int(round(sample_rate * max_length ) )
if len(lowerCAmelCase ) <= sample_length:
return wav
_UpperCamelCase = randint(0 , len(lowerCAmelCase ) - sample_length - 1 )
return wav[random_offset : random_offset + sample_length]
@dataclass
class lowerCAmelCase__ :
UpperCamelCase_ : Optional[str] = field(default=__lowercase , metadata={"help": "Name of a dataset from the datasets package"} )
UpperCamelCase_ : Optional[str] = field(
default=__lowercase , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} )
UpperCamelCase_ : Optional[str] = field(
default=__lowercase , metadata={"help": "A file containing the training audio paths and labels."} )
UpperCamelCase_ : Optional[str] = field(
default=__lowercase , metadata={"help": "A file containing the validation audio paths and labels."} )
UpperCamelCase_ : str = field(
default="train" , metadata={
"help": "The name of the training data set split to use (via the datasets library). Defaults to 'train'"
} , )
UpperCamelCase_ : str = field(
default="validation" , metadata={
"help": (
"The name of the training data set split to use (via the datasets library). Defaults to 'validation'"
)
} , )
UpperCamelCase_ : str = field(
default="audio" , metadata={"help": "The name of the dataset column containing the audio data. Defaults to 'audio'"} , )
UpperCamelCase_ : str = field(
default="label" , metadata={"help": "The name of the dataset column containing the labels. Defaults to 'label'"} )
UpperCamelCase_ : Optional[int] = field(
default=__lowercase , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
UpperCamelCase_ : Optional[int] = field(
default=__lowercase , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} , )
UpperCamelCase_ : float = field(
default=20 , metadata={"help": "Audio clips will be randomly cut to this length during training if the value is set."} , )
@dataclass
class lowerCAmelCase__ :
UpperCamelCase_ : str = field(
default="facebook/wav2vec2-base" , metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} , )
UpperCamelCase_ : Optional[str] = field(
default=__lowercase , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
UpperCamelCase_ : Optional[str] = field(
default=__lowercase , metadata={"help": "Where do you want to store the pretrained models downloaded from the Hub"} )
UpperCamelCase_ : str = field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
UpperCamelCase_ : Optional[str] = field(
default=__lowercase , metadata={"help": "Name or path of preprocessor config."} )
UpperCamelCase_ : bool = field(
default=__lowercase , metadata={"help": "Whether to freeze the feature encoder layers of the model."} )
UpperCamelCase_ : bool = field(
default=__lowercase , metadata={"help": "Whether to generate an attention mask in the feature extractor."} )
UpperCamelCase_ : bool = field(
default=__lowercase , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
UpperCamelCase_ : Optional[bool] = field(
default=__lowercase , metadata={"help": "Whether to freeze the feature extractor layers of the model."} )
UpperCamelCase_ : bool = field(
default=__lowercase , metadata={"help": "Will enable to load a pretrained model whose head dimensions are different."} , )
def A_ ( self ) -> Optional[Any]:
'''simple docstring'''
if not self.freeze_feature_extractor and self.freeze_feature_encoder:
warnings.warn(
"""The argument `--freeze_feature_extractor` is deprecated and """
"""will be removed in a future version. Use `--freeze_feature_encoder`"""
"""instead. Setting `freeze_feature_encoder==True`.""" , a , )
if self.freeze_feature_extractor and not self.freeze_feature_encoder:
raise ValueError(
"""The argument `--freeze_feature_extractor` is deprecated and """
"""should not be used in combination with `--freeze_feature_encoder`."""
"""Only make use of `--freeze_feature_encoder`.""" )
def __A() -> int:
"""simple docstring"""
_UpperCamelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("""run_audio_classification""" , lowerCAmelCase , lowerCAmelCase )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
_UpperCamelCase = training_args.get_process_log_level()
logger.setLevel(lowerCAmelCase )
transformers.utils.logging.set_verbosity(lowerCAmelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu} '
+ F'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
logger.info(F'Training/evaluation parameters {training_args}' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Detecting last checkpoint.
_UpperCamelCase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_UpperCamelCase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. '
"""Use --overwrite_output_dir to train from scratch.""" )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Initialize our dataset and prepare it for the audio classification task.
_UpperCamelCase = DatasetDict()
_UpperCamelCase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.train_split_name , use_auth_token=True if model_args.use_auth_token else None , )
_UpperCamelCase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.eval_split_name , use_auth_token=True if model_args.use_auth_token else None , )
if data_args.audio_column_name not in raw_datasets["train"].column_names:
raise ValueError(
F'--audio_column_name {data_args.audio_column_name} not found in dataset \'{data_args.dataset_name}\'. '
"""Make sure to set `--audio_column_name` to the correct audio column - one of """
F'{", ".join(raw_datasets["train"].column_names )}.' )
if data_args.label_column_name not in raw_datasets["train"].column_names:
raise ValueError(
F'--label_column_name {data_args.label_column_name} not found in dataset \'{data_args.dataset_name}\'. '
"""Make sure to set `--label_column_name` to the correct text column - one of """
F'{", ".join(raw_datasets["train"].column_names )}.' )
# Setting `return_attention_mask=True` is the way to get a correctly masked mean-pooling over
# transformer outputs in the classifier, but it doesn't always lead to better accuracy
_UpperCamelCase = AutoFeatureExtractor.from_pretrained(
model_args.feature_extractor_name or model_args.model_name_or_path , return_attention_mask=model_args.attention_mask , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# `datasets` takes care of automatically loading and resampling the audio,
# so we just need to set the correct target sampling rate.
_UpperCamelCase = raw_datasets.cast_column(
data_args.audio_column_name , datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate ) )
_UpperCamelCase = feature_extractor.model_input_names[0]
def train_transforms(lowerCAmelCase ):
_UpperCamelCase = []
for audio in batch[data_args.audio_column_name]:
_UpperCamelCase = random_subsample(
audio["""array"""] , max_length=data_args.max_length_seconds , sample_rate=feature_extractor.sampling_rate )
subsampled_wavs.append(lowerCAmelCase )
_UpperCamelCase = feature_extractor(lowerCAmelCase , sampling_rate=feature_extractor.sampling_rate )
_UpperCamelCase = {model_input_name: inputs.get(lowerCAmelCase )}
_UpperCamelCase = list(batch[data_args.label_column_name] )
return output_batch
def val_transforms(lowerCAmelCase ):
_UpperCamelCase = [audio["""array"""] for audio in batch[data_args.audio_column_name]]
_UpperCamelCase = feature_extractor(lowerCAmelCase , sampling_rate=feature_extractor.sampling_rate )
_UpperCamelCase = {model_input_name: inputs.get(lowerCAmelCase )}
_UpperCamelCase = list(batch[data_args.label_column_name] )
return output_batch
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
_UpperCamelCase = raw_datasets["""train"""].features[data_args.label_column_name].names
_UpperCamelCase , _UpperCamelCase = {}, {}
for i, label in enumerate(lowerCAmelCase ):
_UpperCamelCase = str(lowerCAmelCase )
_UpperCamelCase = label
# Load the accuracy metric from the datasets package
_UpperCamelCase = evaluate.load("""accuracy""" )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with
# `predictions` and `label_ids` fields) and has to return a dictionary string to float.
def compute_metrics(lowerCAmelCase ):
_UpperCamelCase = np.argmax(eval_pred.predictions , axis=1 )
return metric.compute(predictions=lowerCAmelCase , references=eval_pred.label_ids )
_UpperCamelCase = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(lowerCAmelCase ) , labelaid=lowerCAmelCase , idalabel=lowerCAmelCase , finetuning_task="""audio-classification""" , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
_UpperCamelCase = AutoModelForAudioClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=lowerCAmelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# freeze the convolutional waveform encoder
if model_args.freeze_feature_encoder:
model.freeze_feature_encoder()
if training_args.do_train:
if data_args.max_train_samples is not None:
_UpperCamelCase = (
raw_datasets["""train"""].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
raw_datasets["train"].set_transform(lowerCAmelCase , output_all_columns=lowerCAmelCase )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
_UpperCamelCase = (
raw_datasets["""eval"""].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
raw_datasets["eval"].set_transform(lowerCAmelCase , output_all_columns=lowerCAmelCase )
# Initialize our trainer
_UpperCamelCase = Trainer(
model=lowerCAmelCase , args=lowerCAmelCase , train_dataset=raw_datasets["""train"""] if training_args.do_train else None , eval_dataset=raw_datasets["""eval"""] if training_args.do_eval else None , compute_metrics=lowerCAmelCase , tokenizer=lowerCAmelCase , )
# Training
if training_args.do_train:
_UpperCamelCase = None
if training_args.resume_from_checkpoint is not None:
_UpperCamelCase = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
_UpperCamelCase = last_checkpoint
_UpperCamelCase = trainer.train(resume_from_checkpoint=lowerCAmelCase )
trainer.save_model()
trainer.log_metrics("""train""" , train_result.metrics )
trainer.save_metrics("""train""" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
_UpperCamelCase = trainer.evaluate()
trainer.log_metrics("""eval""" , lowerCAmelCase )
trainer.save_metrics("""eval""" , lowerCAmelCase )
# Write model card and (optionally) push to hub
_UpperCamelCase = {
"""finetuned_from""": model_args.model_name_or_path,
"""tasks""": """audio-classification""",
"""dataset""": data_args.dataset_name,
"""tags""": ["""audio-classification"""],
}
if training_args.push_to_hub:
trainer.push_to_hub(**lowerCAmelCase )
else:
trainer.create_model_card(**lowerCAmelCase )
if __name__ == "__main__":
main()
| 721 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class lowerCAmelCase__ ( __lowercase , __lowercase , __lowercase , unittest.TestCase ):
UpperCamelCase_ : List[Any] = StableDiffusionInpaintPipeline
UpperCamelCase_ : int = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
UpperCamelCase_ : Union[str, Any] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
UpperCamelCase_ : Tuple = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
UpperCamelCase_ : int = frozenset([] )
def A_ ( self ) -> Union[str, Any]:
'''simple docstring'''
torch.manual_seed(0 )
_UpperCamelCase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=9 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=a , )
_UpperCamelCase = PNDMScheduler(skip_prk_steps=a )
torch.manual_seed(0 )
_UpperCamelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=1_28 , )
torch.manual_seed(0 )
_UpperCamelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act="""gelu""" , projection_dim=5_12 , )
_UpperCamelCase = CLIPTextModel(a )
_UpperCamelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
_UpperCamelCase = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def A_ ( self , a , a=0 ) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(a ) ).to(a )
_UpperCamelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_UpperCamelCase = Image.fromarray(np.uinta(a ) ).convert("""RGB""" ).resize((64, 64) )
_UpperCamelCase = Image.fromarray(np.uinta(image + 4 ) ).convert("""RGB""" ).resize((64, 64) )
if str(a ).startswith("""mps""" ):
_UpperCamelCase = torch.manual_seed(a )
else:
_UpperCamelCase = torch.Generator(device=a ).manual_seed(a )
_UpperCamelCase = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": init_image,
"""mask_image""": mask_image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def A_ ( self ) -> Dict:
'''simple docstring'''
_UpperCamelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
_UpperCamelCase = self.get_dummy_components()
_UpperCamelCase = StableDiffusionInpaintPipeline(**a )
_UpperCamelCase = sd_pipe.to(a )
sd_pipe.set_progress_bar_config(disable=a )
_UpperCamelCase = self.get_dummy_inputs(a )
_UpperCamelCase = sd_pipe(**a ).images
_UpperCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_UpperCamelCase = np.array([0.4727, 0.5735, 0.3941, 0.5446, 0.5926, 0.4394, 0.5062, 0.4654, 0.4476] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def A_ ( self ) -> Tuple:
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class lowerCAmelCase__ ( unittest.TestCase ):
def A_ ( self ) -> Union[str, Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A_ ( self ) -> Dict:
'''simple docstring'''
_UpperCamelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-inpaint/init_image.png""" )
_UpperCamelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" )
_UpperCamelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint"""
"""/yellow_cat_sitting_on_a_park_bench.npy""" )
_UpperCamelCase = """stabilityai/stable-diffusion-2-inpainting"""
_UpperCamelCase = StableDiffusionInpaintPipeline.from_pretrained(a , safety_checker=a )
pipe.to(a )
pipe.set_progress_bar_config(disable=a )
pipe.enable_attention_slicing()
_UpperCamelCase = """Face of a yellow cat, high resolution, sitting on a park bench"""
_UpperCamelCase = torch.manual_seed(0 )
_UpperCamelCase = pipe(
prompt=a , image=a , mask_image=a , generator=a , output_type="""np""" , )
_UpperCamelCase = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert np.abs(expected_image - image ).max() < 9e-3
def A_ ( self ) -> str:
'''simple docstring'''
_UpperCamelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-inpaint/init_image.png""" )
_UpperCamelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" )
_UpperCamelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint"""
"""/yellow_cat_sitting_on_a_park_bench_fp16.npy""" )
_UpperCamelCase = """stabilityai/stable-diffusion-2-inpainting"""
_UpperCamelCase = StableDiffusionInpaintPipeline.from_pretrained(
a , torch_dtype=torch.floataa , safety_checker=a , )
pipe.to(a )
pipe.set_progress_bar_config(disable=a )
pipe.enable_attention_slicing()
_UpperCamelCase = """Face of a yellow cat, high resolution, sitting on a park bench"""
_UpperCamelCase = torch.manual_seed(0 )
_UpperCamelCase = pipe(
prompt=a , image=a , mask_image=a , generator=a , output_type="""np""" , )
_UpperCamelCase = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert np.abs(expected_image - image ).max() < 5e-1
def A_ ( self ) -> Dict:
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
_UpperCamelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-inpaint/init_image.png""" )
_UpperCamelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" )
_UpperCamelCase = """stabilityai/stable-diffusion-2-inpainting"""
_UpperCamelCase = PNDMScheduler.from_pretrained(a , subfolder="""scheduler""" )
_UpperCamelCase = StableDiffusionInpaintPipeline.from_pretrained(
a , safety_checker=a , scheduler=a , torch_dtype=torch.floataa , )
pipe.to(a )
pipe.set_progress_bar_config(disable=a )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
_UpperCamelCase = """Face of a yellow cat, high resolution, sitting on a park bench"""
_UpperCamelCase = torch.manual_seed(0 )
_UpperCamelCase = pipe(
prompt=a , image=a , mask_image=a , generator=a , num_inference_steps=2 , output_type="""np""" , )
_UpperCamelCase = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.65 * 10**9
| 202 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.