code
stringlengths 82
54.1k
| code_codestyle
int64 0
699
| style_context
stringlengths 111
35.6k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
from typing import TYPE_CHECKING
from ...utils import _LazyModule
__UpperCamelCase : int = {"""tokenization_wav2vec2_phoneme""": ["""Wav2Vec2PhonemeCTCTokenizer"""]}
if TYPE_CHECKING:
from .tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizer
else:
import sys
__UpperCamelCase : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 80 |
"""simple docstring"""
import importlib.metadata
import operator
import re
import sys
from typing import Optional
from packaging import version
lowercase_ = {
"<": operator.lt,
"<=": operator.le,
"==": operator.eq,
"!=": operator.ne,
">=": operator.ge,
">": operator.gt,
}
def lowercase ( lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : int , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Optional[Any] ) -> Dict:
if got_ver is None or want_ver is None:
raise ValueError(
f'''Unable to compare versions for {requirement}: need={want_ver} found={got_ver}. This is unusual. Consider'''
f''' reinstalling {pkg}.''' )
if not ops[op](version.parse(lowerCAmelCase__ ) , version.parse(lowerCAmelCase__ ) ):
raise ImportError(
f'''{requirement} is required for a normal functioning of this module, but found {pkg}=={got_ver}.{hint}''' )
def lowercase ( lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[str] = None ) -> None:
__a = f'''\n{hint}''' if hint is not None else ''''''
# non-versioned check
if re.match(r'''^[\w_\-\d]+$''' , lowerCAmelCase__ ):
__a , __a , __a = requirement, None, None
else:
__a = re.findall(r'''^([^!=<>\s]+)([\s!=<>]{1,2}.+)''' , lowerCAmelCase__ )
if not match:
raise ValueError(
'''requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23, but'''
f''' got {requirement}''' )
__a , __a = match[0]
__a = want_full.split(''',''' ) # there could be multiple requirements
__a = {}
for w in want_range:
__a = re.findall(r'''^([\s!=<>]{1,2})(.+)''' , lowerCAmelCase__ )
if not match:
raise ValueError(
'''requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23,'''
f''' but got {requirement}''' )
__a , __a = match[0]
__a = want_ver
if op not in ops:
raise ValueError(f'''{requirement}: need one of {list(ops.keys() )}, but got {op}''' )
# special case
if pkg == "python":
__a = '''.'''.join([str(lowerCAmelCase__ ) for x in sys.version_info[:3]] )
for op, want_ver in wanted.items():
_compare_versions(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
return
# check if any version is installed
try:
__a = importlib.metadata.version(lowerCAmelCase__ )
except importlib.metadata.PackageNotFoundError:
raise importlib.metadata.PackageNotFoundError(
f'''The \'{requirement}\' distribution was not found and is required by this application. {hint}''' )
# check that the right version is installed if version number or a range was provided
if want_ver is not None:
for op, want_ver in wanted.items():
_compare_versions(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def lowercase ( lowerCAmelCase__ : Tuple ) -> Optional[Any]:
__a = '''Try: pip install transformers -U or pip install -e \'.[dev]\' if you\'re working with git main'''
return require_version(lowerCAmelCase__ , lowerCAmelCase__ )
| 695 | 0 |
import torch
from diffusers import DDIMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class a (_lowerCAmelCase ):
"""simple docstring"""
__UpperCAmelCase : List[str] = (DDIMParallelScheduler,)
__UpperCAmelCase : Tuple = (("eta", 0.0), ("num_inference_steps", 50))
def __snake_case ( self : int , **lowerCamelCase : Optional[Any] ) -> Optional[Any]:
__snake_case : Any = {
"num_train_timesteps": 1000,
"beta_start": 0.00_01,
"beta_end": 0.02,
"beta_schedule": "linear",
"clip_sample": True,
}
config.update(**lowerCamelCase )
return config
def __snake_case ( self : Optional[Any] , **lowerCamelCase : List[str] ) -> Any:
__snake_case : Optional[int] = self.scheduler_classes[0]
__snake_case : Optional[int] = self.get_scheduler_config(**lowerCamelCase )
__snake_case : Optional[int] = scheduler_class(**lowerCamelCase )
__snake_case , __snake_case : List[Any] = 10, 0.0
__snake_case : Union[str, Any] = self.dummy_model()
__snake_case : Union[str, Any] = self.dummy_sample_deter
scheduler.set_timesteps(lowerCamelCase )
for t in scheduler.timesteps:
__snake_case : List[Any] = model(lowerCamelCase , lowerCamelCase )
__snake_case : List[str] = scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ).prev_sample
return sample
def __snake_case ( self : Tuple ) -> Dict:
for timesteps in [100, 500, 1000]:
self.check_over_configs(num_train_timesteps=lowerCamelCase )
def __snake_case ( self : Dict ) -> int:
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=lowerCamelCase )
__snake_case : Optional[int] = self.scheduler_classes[0]
__snake_case : Optional[Any] = self.get_scheduler_config(steps_offset=1 )
__snake_case : List[str] = scheduler_class(**lowerCamelCase )
scheduler.set_timesteps(5 )
assert torch.equal(scheduler.timesteps , torch.LongTensor([801, 601, 401, 201, 1] ) )
def __snake_case ( self : List[Any] ) -> int:
for beta_start, beta_end in zip([0.00_01, 0.0_01, 0.01, 0.1] , [0.0_02, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=lowerCamelCase , beta_end=lowerCamelCase )
def __snake_case ( self : List[Any] ) -> str:
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=lowerCamelCase )
def __snake_case ( self : Tuple ) -> List[str]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCamelCase )
def __snake_case ( self : Optional[Any] ) -> List[Any]:
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=lowerCamelCase )
def __snake_case ( self : Tuple ) -> List[Any]:
for timestep_spacing in ["trailing", "leading"]:
self.check_over_configs(timestep_spacing=lowerCamelCase )
def __snake_case ( self : Tuple ) -> Any:
for rescale_betas_zero_snr in [True, False]:
self.check_over_configs(rescale_betas_zero_snr=lowerCamelCase )
def __snake_case ( self : Dict ) -> List[str]:
self.check_over_configs(thresholding=lowerCamelCase )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(
thresholding=lowerCamelCase , prediction_type=lowerCamelCase , sample_max_value=lowerCamelCase , )
def __snake_case ( self : int ) -> Union[str, Any]:
for t in [1, 10, 49]:
self.check_over_forward(time_step=lowerCamelCase )
def __snake_case ( self : int ) -> List[Any]:
for t, num_inference_steps in zip([1, 10, 50] , [10, 50, 500] ):
self.check_over_forward(time_step=lowerCamelCase , num_inference_steps=lowerCamelCase )
def __snake_case ( self : Dict ) -> str:
for t, eta in zip([1, 10, 49] , [0.0, 0.5, 1.0] ):
self.check_over_forward(time_step=lowerCamelCase , eta=lowerCamelCase )
def __snake_case ( self : Any ) -> Any:
__snake_case : Any = self.scheduler_classes[0]
__snake_case : Dict = self.get_scheduler_config()
__snake_case : Optional[int] = scheduler_class(**lowerCamelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(420 , 400 ) - 0.1_47_71 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(980 , 960 ) - 0.3_24_60 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 , 486 ) - 0.0_09_79 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 , 998 ) - 0.02 ) ) < 1E-5
def __snake_case ( self : Dict ) -> List[Any]:
__snake_case : Any = self.scheduler_classes[0]
__snake_case : Dict = self.get_scheduler_config()
__snake_case : Optional[int] = scheduler_class(**lowerCamelCase )
__snake_case , __snake_case : str = 10, 0.0
scheduler.set_timesteps(lowerCamelCase )
__snake_case : Dict = self.dummy_model()
__snake_case : List[Any] = self.dummy_sample_deter
__snake_case : int = self.dummy_sample_deter + 0.1
__snake_case : str = self.dummy_sample_deter - 0.1
__snake_case : Dict = samplea.shape[0]
__snake_case : str = torch.stack([samplea, samplea, samplea] , dim=0 )
__snake_case : int = torch.arange(lowerCamelCase )[0:3, None].repeat(1 , lowerCamelCase )
__snake_case : Union[str, Any] = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
__snake_case : Any = scheduler.batch_step_no_noise(lowerCamelCase , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) , lowerCamelCase )
__snake_case : Optional[Any] = torch.sum(torch.abs(lowerCamelCase ) )
__snake_case : List[Any] = torch.mean(torch.abs(lowerCamelCase ) )
assert abs(result_sum.item() - 11_47.79_04 ) < 1E-2
assert abs(result_mean.item() - 0.49_82 ) < 1E-3
def __snake_case ( self : str ) -> Optional[int]:
__snake_case : Union[str, Any] = self.full_loop()
__snake_case : Dict = torch.sum(torch.abs(lowerCamelCase ) )
__snake_case : Dict = torch.mean(torch.abs(lowerCamelCase ) )
assert abs(result_sum.item() - 1_72.00_67 ) < 1E-2
assert abs(result_mean.item() - 0.22_39_67 ) < 1E-3
def __snake_case ( self : str ) -> Dict:
__snake_case : Any = self.full_loop(prediction_type="v_prediction" )
__snake_case : Optional[int] = torch.sum(torch.abs(lowerCamelCase ) )
__snake_case : int = torch.mean(torch.abs(lowerCamelCase ) )
assert abs(result_sum.item() - 52.53_02 ) < 1E-2
assert abs(result_mean.item() - 0.06_84 ) < 1E-3
def __snake_case ( self : Dict ) -> Tuple:
# We specify different beta, so that the first alpha is 0.99
__snake_case : str = self.full_loop(set_alpha_to_one=lowerCamelCase , beta_start=0.01 )
__snake_case : Dict = torch.sum(torch.abs(lowerCamelCase ) )
__snake_case : Dict = torch.mean(torch.abs(lowerCamelCase ) )
assert abs(result_sum.item() - 1_49.82_95 ) < 1E-2
assert abs(result_mean.item() - 0.19_51 ) < 1E-3
def __snake_case ( self : Optional[Any] ) -> List[Any]:
# We specify different beta, so that the first alpha is 0.99
__snake_case : Union[str, Any] = self.full_loop(set_alpha_to_one=lowerCamelCase , beta_start=0.01 )
__snake_case : Optional[Any] = torch.sum(torch.abs(lowerCamelCase ) )
__snake_case : Dict = torch.mean(torch.abs(lowerCamelCase ) )
assert abs(result_sum.item() - 1_49.07_84 ) < 1E-2
assert abs(result_mean.item() - 0.19_41 ) < 1E-3
| 81 |
"""simple docstring"""
from __future__ import annotations
lowercase_ = list[tuple[int, int]]
lowercase_ = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
lowercase_ = ([-1, 0], [0, -1], [1, 0], [0, 1]) # up, left, down, right
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self , _a , _a , _a , _a , _a , _a , ):
__a = pos_x
__a = pos_y
__a = (pos_y, pos_x)
__a = goal_x
__a = goal_y
__a = g_cost
__a = parent
__a = self.calculate_heuristic()
def __UpperCAmelCase ( self ):
__a = abs(self.pos_x - self.goal_x )
__a = abs(self.pos_y - self.goal_y )
return dx + dy
def __lt__( self , _a ):
return self.f_cost < other.f_cost
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self , _a , _a ):
__a = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , _a )
__a = Node(goal[1] , goal[0] , goal[1] , goal[0] , 99_999 , _a )
__a = [self.start]
__a = []
__a = False
def __UpperCAmelCase ( self ):
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
__a = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
__a = True
return self.retrace_path(_a )
self.closed_nodes.append(_a )
__a = self.get_successors(_a )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(_a )
else:
# retrieve the best current path
__a = self.open_nodes.pop(self.open_nodes.index(_a ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(_a )
else:
self.open_nodes.append(_a )
if not self.reached:
return [self.start.pos]
return None
def __UpperCAmelCase ( self , _a ):
__a = []
for action in delta:
__a = parent.pos_x + action[1]
__a = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(_a ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
_a , _a , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , _a , ) )
return successors
def __UpperCAmelCase ( self , _a ):
__a = node
__a = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
__a = current_node.parent
path.reverse()
return path
if __name__ == "__main__":
lowercase_ = (0, 0)
lowercase_ = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
print("------")
lowercase_ = GreedyBestFirst(init, goal)
lowercase_ = greedy_bf.search()
if path:
for pos_x, pos_y in path:
lowercase_ = 2
for elem in grid:
print(elem)
| 695 | 0 |
"""simple docstring"""
from typing import Dict
from transformers import EvalPrediction, HfArgumentParser, TrainingArguments, is_torch_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
get_torch_dist_unique_port,
require_torch_multi_gpu,
require_torch_neuroncore,
)
from transformers.training_args import ParallelMode
from transformers.utils import logging
lowerCamelCase = logging.get_logger(__name__)
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
from transformers import Trainer
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : Dict , _UpperCAmelCase : int = 101 ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ = length
def __len__( self : int ) -> Union[str, Any]:
'''simple docstring'''
return self.length
def __getitem__( self : int , _UpperCAmelCase : Optional[int] ) -> int:
'''simple docstring'''
return i
class lowercase__ :
'''simple docstring'''
def __call__( self : int , _UpperCAmelCase : str ) -> Optional[int]:
'''simple docstring'''
return {"input_ids": torch.tensor(_UpperCAmelCase ), "labels": torch.tensor(_UpperCAmelCase )}
class lowercase__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[int] ) -> int:
'''simple docstring'''
super().__init__()
# Add some (unused) params otherwise DDP will complain.
UpperCAmelCase_ = nn.Linear(120 , 80 )
def lowercase__ ( self : Dict , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Optional[Any]=None ) -> Any:
'''simple docstring'''
if labels is not None:
return torch.tensor(0.0 , device=input_ids.device ), input_ids
else:
return input_ids
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@require_torch_neuroncore
def lowercase__ ( self : Dict ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = F"""--nproc_per_node=2
--master_port={get_torch_dist_unique_port()}
{self.test_file_dir}/test_trainer_distributed.py
""".split()
UpperCAmelCase_ = self.get_auto_remove_tmp_dir()
UpperCAmelCase_ = F"""--output_dir {output_dir}""".split()
UpperCAmelCase_ = ["torchrun"] + distributed_args + args
execute_subprocess_async(_UpperCAmelCase , env=self.get_env() )
# successful return here == success - any errors would have caused an error in the sub-call
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@require_torch_multi_gpu
def lowercase__ ( self : Optional[int] ) -> int:
'''simple docstring'''
UpperCAmelCase_ = F"""--nproc_per_node={torch.cuda.device_count()}
--master_port={get_torch_dist_unique_port()}
{self.test_file_dir}/test_trainer_distributed.py
""".split()
UpperCAmelCase_ = self.get_auto_remove_tmp_dir()
UpperCAmelCase_ = F"""--output_dir {output_dir}""".split()
UpperCAmelCase_ = ["torchrun"] + distributed_args + args
execute_subprocess_async(_UpperCAmelCase , env=self.get_env() )
# successful return here == success - any errors would have caused an error in the sub-call
if __name__ == "__main__":
# The script below is meant to be run under torch.distributed, on a machine with multiple GPUs:
#
# PYTHONPATH="src" python -m torch.distributed.run --nproc_per_node 2 --output_dir output_dir ./tests/test_trainer_distributed.py
lowerCamelCase = HfArgumentParser((TrainingArguments,))
lowerCamelCase = parser.parse_args_into_dataclasses()[0]
logger.warning(
F"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, "
F"distributed training: {training_args.parallel_mode != ParallelMode.NOT_DISTRIBUTED}"
)
# Essentially, what we want to verify in the distributed case is that we get all samples back,
# in the right order. (this is crucial for prediction for instance)
for dataset_length in [101, 40, 7]:
lowerCamelCase = DummyDataset(dataset_length)
def a__ ( lowerCAmelCase__ ):
UpperCAmelCase_ = list(range(len(lowerCAmelCase__ ) ) )
UpperCAmelCase_ = p.predictions.tolist() == sequential and p.label_ids.tolist() == sequential
if not success and training_args.local_rank == 0:
logger.warning(
"Predictions and/or labels do not match expected results:\n - predictions: "
f"""{p.predictions.tolist()}\n - labels: {p.label_ids.tolist()}\n - expected: {sequential}""" )
return {"success": success}
lowerCamelCase = Trainer(
model=DummyModel(),
args=training_args,
data_collator=DummyDataCollator(),
eval_dataset=dataset,
compute_metrics=compute_metrics,
)
lowerCamelCase = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
lowerCamelCase = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
lowerCamelCase = 2
lowerCamelCase = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
lowerCamelCase = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
lowerCamelCase = None
| 82 |
"""simple docstring"""
import argparse
import torch
from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert
from transformers.utils import logging
logging.set_verbosity_info()
def lowercase ( lowerCAmelCase__ : Any , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : str ) -> List[Any]:
# Initialise PyTorch model
__a = RemBertConfig.from_json_file(lowerCAmelCase__ )
print('''Building PyTorch model from configuration: {}'''.format(str(lowerCAmelCase__ ) ) )
__a = RemBertModel(lowerCAmelCase__ )
# Load weights from tf checkpoint
load_tf_weights_in_rembert(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# Save pytorch-model
print('''Save PyTorch model to {}'''.format(lowerCAmelCase__ ) )
torch.save(model.state_dict() , lowerCAmelCase__ )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--rembert_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained RemBERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
lowercase_ = parser.parse_args()
convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path)
| 695 | 0 |
"""simple docstring"""
import argparse
from collections import defaultdict
import yaml
lowerCAmelCase__ = '''docs/source/en/_toctree.yml'''
def snake_case_ ( A_ : List[str] ):
'''simple docstring'''
_lowerCamelCase : int = defaultdict(A_ )
for doc in model_doc:
counts[doc["local"]] += 1
_lowerCamelCase : List[Any] = [key for key, value in counts.items() if value > 1]
_lowerCamelCase : List[str] = []
for duplicate_key in duplicates:
_lowerCamelCase : Dict = list({doc['''title'''] for doc in model_doc if doc['''local'''] == duplicate_key} )
if len(A_ ) > 1:
raise ValueError(
F'''{duplicate_key} is present several times in the documentation table of content at '''
'''`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the '''
'''others.''' )
# Only add this once
new_doc.append({'''local''': duplicate_key, '''title''': titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in model_doc if counts[doc['''local''']] == 1] )
# Sort
return sorted(A_, key=lambda A_ : s["title"].lower() )
def snake_case_ ( A_ : Union[str, Any]=False ):
'''simple docstring'''
with open(A_, encoding='''utf-8''' ) as f:
_lowerCamelCase : Dict = yaml.safe_load(f.read() )
# Get to the API doc
_lowerCamelCase : List[str] = 0
while content[api_idx]["title"] != "API":
api_idx += 1
_lowerCamelCase : Any = content[api_idx]['''sections''']
# Then to the model doc
_lowerCamelCase : str = 0
while api_doc[model_idx]["title"] != "Models":
model_idx += 1
_lowerCamelCase : List[str] = api_doc[model_idx]['''sections''']
_lowerCamelCase : Tuple = [(idx, section) for idx, section in enumerate(A_ ) if '''sections''' in section]
_lowerCamelCase : Any = False
for idx, modality_doc in modalities_docs:
_lowerCamelCase : str = modality_doc['''sections''']
_lowerCamelCase : List[str] = clean_model_doc_toc(A_ )
if old_modality_doc != new_modality_doc:
_lowerCamelCase : Any = True
if overwrite:
_lowerCamelCase : Optional[int] = new_modality_doc
if diff:
if overwrite:
_lowerCamelCase : Any = model_doc
_lowerCamelCase : Optional[Any] = api_doc
with open(A_, '''w''', encoding='''utf-8''' ) as f:
f.write(yaml.dump(A_, allow_unicode=A_ ) )
else:
raise ValueError(
'''The model doc part of the table of content is not properly sorted, run `make style` to fix this.''' )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
lowerCAmelCase__ = parser.parse_args()
check_model_doc(args.fix_and_overwrite)
| 83 |
"""simple docstring"""
import tempfile
import unittest
import numpy as np
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_flax
if is_flax_available():
import os
from flax.core.frozen_dict import unfreeze
from flax.traverse_util import flatten_dict
from transformers import FlaxBertModel
lowercase_ = "0.12" # assumed parallelism: 8
@require_flax
@is_staging_test
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def __UpperCAmelCase ( cls ):
__a = TOKEN
HfFolder.save_token(_a )
@classmethod
def __UpperCAmelCase ( cls ):
try:
delete_repo(token=cls._token , repo_id='''test-model-flax''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-model-flax-org''' )
except HTTPError:
pass
def __UpperCAmelCase ( self ):
__a = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
__a = FlaxBertModel(_a )
model.push_to_hub('''test-model-flax''' , use_auth_token=self._token )
__a = FlaxBertModel.from_pretrained(f'''{USER}/test-model-flax''' )
__a = flatten_dict(unfreeze(model.params ) )
__a = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
__a = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_a , 1E-3 , msg=f'''{key} not identical''' )
# Reset repo
delete_repo(token=self._token , repo_id='''test-model-flax''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(_a , repo_id='''test-model-flax''' , push_to_hub=_a , use_auth_token=self._token )
__a = FlaxBertModel.from_pretrained(f'''{USER}/test-model-flax''' )
__a = flatten_dict(unfreeze(model.params ) )
__a = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
__a = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_a , 1E-3 , msg=f'''{key} not identical''' )
def __UpperCAmelCase ( self ):
__a = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
__a = FlaxBertModel(_a )
model.push_to_hub('''valid_org/test-model-flax-org''' , use_auth_token=self._token )
__a = FlaxBertModel.from_pretrained('''valid_org/test-model-flax-org''' )
__a = flatten_dict(unfreeze(model.params ) )
__a = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
__a = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_a , 1E-3 , msg=f'''{key} not identical''' )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-model-flax-org''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(
_a , repo_id='''valid_org/test-model-flax-org''' , push_to_hub=_a , use_auth_token=self._token )
__a = FlaxBertModel.from_pretrained('''valid_org/test-model-flax-org''' )
__a = flatten_dict(unfreeze(model.params ) )
__a = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
__a = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_a , 1E-3 , msg=f'''{key} not identical''' )
def lowercase ( lowerCAmelCase__ : str , lowerCAmelCase__ : Dict ) -> Optional[int]:
__a = True
__a = flatten_dict(modela.params )
__a = flatten_dict(modela.params )
for key in flat_params_a.keys():
if np.sum(np.abs(flat_params_a[key] - flat_params_a[key] ) ) > 1e-4:
__a = False
return models_are_equal
@require_flax
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self ):
__a = BertConfig.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' )
__a = FlaxBertModel(_a )
__a = '''bert'''
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(_a , _a ) )
with self.assertRaises(_a ):
__a = FlaxBertModel.from_pretrained(_a )
__a = FlaxBertModel.from_pretrained(_a , subfolder=_a )
self.assertTrue(check_models_equal(_a , _a ) )
def __UpperCAmelCase ( self ):
__a = BertConfig.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' )
__a = FlaxBertModel(_a )
__a = '''bert'''
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(_a , _a ) , max_shard_size='''10KB''' )
with self.assertRaises(_a ):
__a = FlaxBertModel.from_pretrained(_a )
__a = FlaxBertModel.from_pretrained(_a , subfolder=_a )
self.assertTrue(check_models_equal(_a , _a ) )
def __UpperCAmelCase ( self ):
__a = '''bert'''
__a = '''hf-internal-testing/tiny-random-bert-subfolder'''
with self.assertRaises(_a ):
__a = FlaxBertModel.from_pretrained(_a )
__a = FlaxBertModel.from_pretrained(_a , subfolder=_a )
self.assertIsNotNone(_a )
def __UpperCAmelCase ( self ):
__a = '''bert'''
__a = '''hf-internal-testing/tiny-random-bert-sharded-subfolder'''
with self.assertRaises(_a ):
__a = FlaxBertModel.from_pretrained(_a )
__a = FlaxBertModel.from_pretrained(_a , subfolder=_a )
self.assertIsNotNone(_a )
| 695 | 0 |
import logging
import random
import ray
from transformers import RagConfig, RagRetriever, RagTokenizer
from transformers.models.rag.retrieval_rag import CustomHFIndex
UpperCAmelCase = logging.getLogger(__name__)
class A_ :
'''simple docstring'''
def __init__( self ):
lowercase = False
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , snake_case ):
if not self.initialized:
lowercase = RagRetriever(
snake_case , question_encoder_tokenizer=snake_case , generator_tokenizer=snake_case , index=snake_case , init_retrieval=snake_case , )
lowercase = True
def SCREAMING_SNAKE_CASE__ ( self ):
self.retriever.index.init_index()
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case ):
lowercase , lowercase = self.retriever._main_retrieve(snake_case , snake_case )
return doc_ids, retrieved_doc_embeds
class A_ ( __lowerCamelCase ):
'''simple docstring'''
def __init__( self , snake_case , snake_case , snake_case , snake_case , snake_case=None ):
if index is not None and index.is_initialized() and len(snake_case ) > 0:
raise ValueError(
'When using Ray for distributed fine-tuning, '
'you\'ll need to provide the paths instead, '
'as the dataset and the index are loaded '
'separately. More info in examples/rag/use_own_knowledge_dataset.py ' )
super().__init__(
snake_case , question_encoder_tokenizer=snake_case , generator_tokenizer=snake_case , index=snake_case , init_retrieval=snake_case , )
lowercase = retrieval_workers
if len(self.retrieval_workers ) > 0:
ray.get(
[
worker.create_rag_retriever.remote(snake_case , snake_case , snake_case , snake_case )
for worker in self.retrieval_workers
] )
def SCREAMING_SNAKE_CASE__ ( self ):
logger.info('initializing retrieval' )
if len(self.retrieval_workers ) > 0:
ray.get([worker.init_retrieval.remote() for worker in self.retrieval_workers] )
else:
# Non-distributed training. Load index into this same process.
self.index.init_index()
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case ):
if len(self.retrieval_workers ) > 0:
# Select a random retrieval actor.
lowercase = self.retrieval_workers[random.randint(0 , len(self.retrieval_workers ) - 1 )]
lowercase , lowercase = ray.get(random_worker.retrieve.remote(snake_case , snake_case ) )
else:
lowercase , lowercase = self._main_retrieve(snake_case , snake_case )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(snake_case )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls , snake_case , snake_case=None , **snake_case ):
return super(snake_case , cls ).get_tokenizers(snake_case , snake_case , **snake_case )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls , snake_case , snake_case , snake_case=None , **snake_case ):
lowercase = kwargs.pop('config' , snake_case ) or RagConfig.from_pretrained(snake_case , **snake_case )
lowercase = RagTokenizer.from_pretrained(snake_case , config=snake_case )
lowercase = rag_tokenizer.question_encoder
lowercase = rag_tokenizer.generator
if indexed_dataset is not None:
lowercase = 'custom'
lowercase = CustomHFIndex(config.retrieval_vector_size , snake_case )
else:
lowercase = cls._build_index(snake_case )
return cls(
snake_case , question_encoder_tokenizer=snake_case , generator_tokenizer=snake_case , retrieval_workers=snake_case , index=snake_case , )
| 84 |
"""simple docstring"""
import unittest
from diffusers.models.unet_ad_blocks import * # noqa F403
from diffusers.utils import torch_device
from .test_unet_blocks_common import UNetBlockTesterMixin
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = DownBlockaD # noqa F405
__UpperCAmelCase : Any = 'down'
def __UpperCAmelCase ( self ):
__a = [-0.0232, -0.9869, 0.8054, -0.0637, -0.1688, -1.4264, 0.4470, -1.3394, 0.0904]
super().test_output(_a )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : str = ResnetDownsampleBlockaD # noqa F405
__UpperCAmelCase : List[str] = 'down'
def __UpperCAmelCase ( self ):
__a = [0.0710, 0.2410, -0.7320, -1.0757, -1.1343, 0.3540, -0.0133, -0.2576, 0.0948]
super().test_output(_a )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] = AttnDownBlockaD # noqa F405
__UpperCAmelCase : Optional[Any] = 'down'
def __UpperCAmelCase ( self ):
__a = [0.0636, 0.8964, -0.6234, -1.0131, 0.0844, 0.4935, 0.3437, 0.0911, -0.2957]
super().test_output(_a )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = CrossAttnDownBlockaD # noqa F405
__UpperCAmelCase : Optional[Any] = 'down'
def __UpperCAmelCase ( self ):
__a , __a = super().prepare_init_args_and_inputs_for_common()
__a = 32
return init_dict, inputs_dict
def __UpperCAmelCase ( self ):
__a = [0.2238, -0.7396, -0.2255, -0.3829, 0.1925, 1.1665, 0.0603, -0.7295, 0.1983]
super().test_output(_a )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : int = SimpleCrossAttnDownBlockaD # noqa F405
__UpperCAmelCase : Any = 'down'
@property
def __UpperCAmelCase ( self ):
return super().get_dummy_input(include_encoder_hidden_states=_a )
def __UpperCAmelCase ( self ):
__a , __a = super().prepare_init_args_and_inputs_for_common()
__a = 32
return init_dict, inputs_dict
@unittest.skipIf(torch_device == '''mps''' , '''MPS result is not consistent''' )
def __UpperCAmelCase ( self ):
__a = [0.7921, -0.0992, -0.1962, -0.7695, -0.4242, 0.7804, 0.4737, 0.2765, 0.3338]
super().test_output(_a )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : int = SkipDownBlockaD # noqa F405
__UpperCAmelCase : Tuple = 'down'
@property
def __UpperCAmelCase ( self ):
return super().get_dummy_input(include_skip_sample=_a )
def __UpperCAmelCase ( self ):
__a = [-0.0845, -0.2087, -0.2465, 0.0971, 0.1900, -0.0484, 0.2664, 0.4179, 0.5069]
super().test_output(_a )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = AttnSkipDownBlockaD # noqa F405
__UpperCAmelCase : Optional[int] = 'down'
@property
def __UpperCAmelCase ( self ):
return super().get_dummy_input(include_skip_sample=_a )
def __UpperCAmelCase ( self ):
__a = [0.5539, 0.1609, 0.4924, 0.0537, -0.1995, 0.4050, 0.0979, -0.2721, -0.0642]
super().test_output(_a )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : int = DownEncoderBlockaD # noqa F405
__UpperCAmelCase : Optional[int] = 'down'
@property
def __UpperCAmelCase ( self ):
return super().get_dummy_input(include_temb=_a )
def __UpperCAmelCase ( self ):
__a = {
'''in_channels''': 32,
'''out_channels''': 32,
}
__a = self.dummy_input
return init_dict, inputs_dict
def __UpperCAmelCase ( self ):
__a = [1.1102, 0.5302, 0.4872, -0.0023, -0.8042, 0.0483, -0.3489, -0.5632, 0.7626]
super().test_output(_a )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = AttnDownEncoderBlockaD # noqa F405
__UpperCAmelCase : Any = 'down'
@property
def __UpperCAmelCase ( self ):
return super().get_dummy_input(include_temb=_a )
def __UpperCAmelCase ( self ):
__a = {
'''in_channels''': 32,
'''out_channels''': 32,
}
__a = self.dummy_input
return init_dict, inputs_dict
def __UpperCAmelCase ( self ):
__a = [0.8966, -0.1486, 0.8568, 0.8141, -0.9046, -0.1342, -0.0972, -0.7417, 0.1538]
super().test_output(_a )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : str = UNetMidBlockaD # noqa F405
__UpperCAmelCase : Any = 'mid'
def __UpperCAmelCase ( self ):
__a = {
'''in_channels''': 32,
'''temb_channels''': 128,
}
__a = self.dummy_input
return init_dict, inputs_dict
def __UpperCAmelCase ( self ):
__a = [-0.1062, 1.7248, 0.3494, 1.4569, -0.0910, -1.2421, -0.9984, 0.6736, 1.0028]
super().test_output(_a )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : str = UNetMidBlockaDCrossAttn # noqa F405
__UpperCAmelCase : str = 'mid'
def __UpperCAmelCase ( self ):
__a , __a = super().prepare_init_args_and_inputs_for_common()
__a = 32
return init_dict, inputs_dict
def __UpperCAmelCase ( self ):
__a = [0.0187, 2.4220, 0.4484, 1.1203, -0.6121, -1.5122, -0.8270, 0.7851, 1.8335]
super().test_output(_a )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : Any = UNetMidBlockaDSimpleCrossAttn # noqa F405
__UpperCAmelCase : List[Any] = 'mid'
@property
def __UpperCAmelCase ( self ):
return super().get_dummy_input(include_encoder_hidden_states=_a )
def __UpperCAmelCase ( self ):
__a , __a = super().prepare_init_args_and_inputs_for_common()
__a = 32
return init_dict, inputs_dict
def __UpperCAmelCase ( self ):
__a = [0.7143, 1.9974, 0.5448, 1.3977, 0.1282, -1.1237, -1.4238, 0.5530, 0.8880]
super().test_output(_a )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = UpBlockaD # noqa F405
__UpperCAmelCase : Union[str, Any] = 'up'
@property
def __UpperCAmelCase ( self ):
return super().get_dummy_input(include_res_hidden_states_tuple=_a )
def __UpperCAmelCase ( self ):
__a = [-0.2041, -0.4165, -0.3022, 0.0041, -0.6628, -0.7053, 0.1928, -0.0325, 0.0523]
super().test_output(_a )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : str = ResnetUpsampleBlockaD # noqa F405
__UpperCAmelCase : int = 'up'
@property
def __UpperCAmelCase ( self ):
return super().get_dummy_input(include_res_hidden_states_tuple=_a )
def __UpperCAmelCase ( self ):
__a = [0.2287, 0.3549, -0.1346, 0.4797, -0.1715, -0.9649, 0.7305, -0.5864, -0.6244]
super().test_output(_a )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : Dict = CrossAttnUpBlockaD # noqa F405
__UpperCAmelCase : List[Any] = 'up'
@property
def __UpperCAmelCase ( self ):
return super().get_dummy_input(include_res_hidden_states_tuple=_a )
def __UpperCAmelCase ( self ):
__a , __a = super().prepare_init_args_and_inputs_for_common()
__a = 32
return init_dict, inputs_dict
def __UpperCAmelCase ( self ):
__a = [-0.1403, -0.3515, -0.0420, -0.1425, 0.3167, 0.5094, -0.2181, 0.5931, 0.5582]
super().test_output(_a )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = SimpleCrossAttnUpBlockaD # noqa F405
__UpperCAmelCase : Optional[int] = 'up'
@property
def __UpperCAmelCase ( self ):
return super().get_dummy_input(include_res_hidden_states_tuple=_a , include_encoder_hidden_states=_a )
def __UpperCAmelCase ( self ):
__a , __a = super().prepare_init_args_and_inputs_for_common()
__a = 32
return init_dict, inputs_dict
def __UpperCAmelCase ( self ):
__a = [0.2645, 0.1480, 0.0909, 0.8044, -0.9758, -0.9083, 0.0994, -1.1453, -0.7402]
super().test_output(_a )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : Any = AttnUpBlockaD # noqa F405
__UpperCAmelCase : List[Any] = 'up'
@property
def __UpperCAmelCase ( self ):
return super().get_dummy_input(include_res_hidden_states_tuple=_a )
@unittest.skipIf(torch_device == '''mps''' , '''MPS result is not consistent''' )
def __UpperCAmelCase ( self ):
__a = [0.0979, 0.1326, 0.0021, 0.0659, 0.2249, 0.0059, 0.1132, 0.5952, 0.1033]
super().test_output(_a )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : Any = SkipUpBlockaD # noqa F405
__UpperCAmelCase : str = 'up'
@property
def __UpperCAmelCase ( self ):
return super().get_dummy_input(include_res_hidden_states_tuple=_a )
def __UpperCAmelCase ( self ):
__a = [-0.0893, -0.1234, -0.1506, -0.0332, 0.0123, -0.0211, 0.0566, 0.0143, 0.0362]
super().test_output(_a )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = AttnSkipUpBlockaD # noqa F405
__UpperCAmelCase : int = 'up'
@property
def __UpperCAmelCase ( self ):
return super().get_dummy_input(include_res_hidden_states_tuple=_a )
def __UpperCAmelCase ( self ):
__a = [0.0361, 0.0617, 0.2787, -0.0350, 0.0342, 0.3421, -0.0843, 0.0913, 0.3015]
super().test_output(_a )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = UpDecoderBlockaD # noqa F405
__UpperCAmelCase : List[str] = 'up'
@property
def __UpperCAmelCase ( self ):
return super().get_dummy_input(include_temb=_a )
def __UpperCAmelCase ( self ):
__a = {'''in_channels''': 32, '''out_channels''': 32}
__a = self.dummy_input
return init_dict, inputs_dict
def __UpperCAmelCase ( self ):
__a = [0.4404, 0.1998, -0.9886, -0.3320, -0.3128, -0.7034, -0.6955, -0.2338, -0.3137]
super().test_output(_a )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] = AttnUpDecoderBlockaD # noqa F405
__UpperCAmelCase : Any = 'up'
@property
def __UpperCAmelCase ( self ):
return super().get_dummy_input(include_temb=_a )
def __UpperCAmelCase ( self ):
__a = {'''in_channels''': 32, '''out_channels''': 32}
__a = self.dummy_input
return init_dict, inputs_dict
def __UpperCAmelCase ( self ):
__a = [0.6738, 0.4491, 0.1055, 1.0710, 0.7316, 0.3339, 0.3352, 0.1023, 0.3568]
super().test_output(_a )
| 695 | 0 |
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.text import TextDatasetReader
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def _a ( lowercase__ : List[Any] , lowercase__ : Dict ):
'''simple docstring'''
assert isinstance(lowercase__ , lowercase__ )
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('keep_in_memory' , [False, True] )
def _a ( lowercase__ : Optional[Any] , lowercase__ : int , lowercase__ : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = tmp_path / 'cache'
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {'text': 'string'}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
SCREAMING_SNAKE_CASE__ : Optional[Any] = TextDatasetReader(lowercase__ , cache_dir=lowercase__ , keep_in_memory=lowercase__ ).read()
_check_text_dataset(lowercase__ , lowercase__ )
@pytest.mark.parametrize(
'features' , [
None,
{'text': 'string'},
{'text': 'int32'},
{'text': 'float32'},
] , )
def _a ( lowercase__ : str , lowercase__ : Optional[int] , lowercase__ : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple = tmp_path / 'cache'
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {'text': 'string'}
SCREAMING_SNAKE_CASE__ : Optional[Any] = features.copy() if features else default_expected_features
SCREAMING_SNAKE_CASE__ : List[str] = (
Features({feature: Value(lowercase__ ) for feature, dtype in features.items()} ) if features is not None else None
)
SCREAMING_SNAKE_CASE__ : Dict = TextDatasetReader(lowercase__ , features=lowercase__ , cache_dir=lowercase__ ).read()
_check_text_dataset(lowercase__ , lowercase__ )
@pytest.mark.parametrize('split' , [None, NamedSplit('train' ), 'train', 'test'] )
def _a ( lowercase__ : Tuple , lowercase__ : Union[str, Any] , lowercase__ : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = tmp_path / 'cache'
SCREAMING_SNAKE_CASE__ : Tuple = {'text': 'string'}
SCREAMING_SNAKE_CASE__ : List[str] = TextDatasetReader(lowercase__ , cache_dir=lowercase__ , split=lowercase__ ).read()
_check_text_dataset(lowercase__ , lowercase__ )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('path_type' , [str, list] )
def _a ( lowercase__ : List[Any] , lowercase__ : Optional[Any] , lowercase__ : Union[str, Any] ):
'''simple docstring'''
if issubclass(lowercase__ , lowercase__ ):
SCREAMING_SNAKE_CASE__ : List[Any] = text_path
elif issubclass(lowercase__ , lowercase__ ):
SCREAMING_SNAKE_CASE__ : Tuple = [text_path]
SCREAMING_SNAKE_CASE__ : int = tmp_path / 'cache'
SCREAMING_SNAKE_CASE__ : str = {'text': 'string'}
SCREAMING_SNAKE_CASE__ : Tuple = TextDatasetReader(lowercase__ , cache_dir=lowercase__ ).read()
_check_text_dataset(lowercase__ , lowercase__ )
def _a ( lowercase__ : Union[str, Any] , lowercase__ : Optional[Any] , lowercase__ : List[str]=("train",) ):
'''simple docstring'''
assert isinstance(lowercase__ , lowercase__ )
for split in splits:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('keep_in_memory' , [False, True] )
def _a ( lowercase__ : Tuple , lowercase__ : Dict , lowercase__ : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = tmp_path / 'cache'
SCREAMING_SNAKE_CASE__ : Tuple = {'text': 'string'}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
SCREAMING_SNAKE_CASE__ : Optional[Any] = TextDatasetReader({'train': text_path} , cache_dir=lowercase__ , keep_in_memory=lowercase__ ).read()
_check_text_datasetdict(lowercase__ , lowercase__ )
@pytest.mark.parametrize(
'features' , [
None,
{'text': 'string'},
{'text': 'int32'},
{'text': 'float32'},
] , )
def _a ( lowercase__ : List[str] , lowercase__ : int , lowercase__ : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = tmp_path / 'cache'
# CSV file loses col_1 string dtype information: default now is "int64" instead of "string"
SCREAMING_SNAKE_CASE__ : Dict = {'text': 'string'}
SCREAMING_SNAKE_CASE__ : int = features.copy() if features else default_expected_features
SCREAMING_SNAKE_CASE__ : str = (
Features({feature: Value(lowercase__ ) for feature, dtype in features.items()} ) if features is not None else None
)
SCREAMING_SNAKE_CASE__ : str = TextDatasetReader({'train': text_path} , features=lowercase__ , cache_dir=lowercase__ ).read()
_check_text_datasetdict(lowercase__ , lowercase__ )
@pytest.mark.parametrize('split' , [None, NamedSplit('train' ), 'train', 'test'] )
def _a ( lowercase__ : str , lowercase__ : List[Any] , lowercase__ : str ):
'''simple docstring'''
if split:
SCREAMING_SNAKE_CASE__ : Any = {split: text_path}
else:
SCREAMING_SNAKE_CASE__ : int = 'train'
SCREAMING_SNAKE_CASE__ : Optional[int] = {'train': text_path, 'test': text_path}
SCREAMING_SNAKE_CASE__ : Union[str, Any] = tmp_path / 'cache'
SCREAMING_SNAKE_CASE__ : List[str] = {'text': 'string'}
SCREAMING_SNAKE_CASE__ : int = TextDatasetReader(lowercase__ , cache_dir=lowercase__ ).read()
_check_text_datasetdict(lowercase__ , lowercase__ , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
| 85 |
"""simple docstring"""
import copy
from typing import Dict, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
from ..detr import DetrConfig
from ..swin import SwinConfig
lowercase_ = {
"facebook/maskformer-swin-base-ade": (
"https://huggingface.co/facebook/maskformer-swin-base-ade/blob/main/config.json"
)
# See all MaskFormer models at https://huggingface.co/models?filter=maskformer
}
lowercase_ = logging.get_logger(__name__)
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : str = 'maskformer'
__UpperCAmelCase : Optional[int] = {'hidden_size': 'mask_feature_size'}
__UpperCAmelCase : Any = ['resnet', 'swin']
__UpperCAmelCase : Dict = ['detr']
def __init__( self , _a = 256 , _a = 256 , _a = 0.1 , _a = False , _a = None , _a = None , _a = 0.02 , _a = 1.0 , _a = 1.0 , _a = 1.0 , _a = 20.0 , _a = None , **_a , ):
if backbone_config is None:
# fall back to https://huggingface.co/microsoft/swin-base-patch4-window12-384-in22k
__a = SwinConfig(
image_size=384 , in_channels=3 , patch_size=4 , embed_dim=128 , depths=[2, 2, 18, 2] , num_heads=[4, 8, 16, 32] , window_size=12 , drop_path_rate=0.3 , out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] , )
if isinstance(_a , _a ):
__a = backbone_config.pop('''model_type''' )
__a = CONFIG_MAPPING[backbone_model_type]
__a = config_class.from_dict(_a )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
f'''Backbone {backbone_config.model_type} is not a supported model and may not be compatible with MaskFormer. '''
f'''Supported model types: {','.join(self.backbones_supported )}''' )
if decoder_config is None:
# fall back to https://huggingface.co/facebook/detr-resnet-50
__a = DetrConfig()
else:
# verify that the decoder is supported
__a = (
decoder_config.pop('''model_type''' ) if isinstance(_a , _a ) else decoder_config.model_type
)
if decoder_type not in self.decoders_supported:
raise ValueError(
f'''Transformer Decoder {decoder_type} not supported, please use one of'''
f''' {','.join(self.decoders_supported )}''' )
if isinstance(_a , _a ):
__a = CONFIG_MAPPING[decoder_type]
__a = config_class.from_dict(_a )
__a = backbone_config
__a = decoder_config
# main feature dimension for the model
__a = fpn_feature_size
__a = mask_feature_size
# initializer
__a = init_std
__a = init_xavier_std
# Hungarian matcher && loss
__a = cross_entropy_weight
__a = dice_weight
__a = mask_weight
__a = use_auxiliary_loss
__a = no_object_weight
__a = output_auxiliary_logits
__a = self.decoder_config.encoder_attention_heads
__a = self.decoder_config.num_hidden_layers
super().__init__(**_a )
@classmethod
def __UpperCAmelCase ( cls , _a , _a , **_a ):
return cls(
backbone_config=_a , decoder_config=_a , **_a , )
def __UpperCAmelCase ( self ):
__a = copy.deepcopy(self.__dict__ )
__a = self.backbone_config.to_dict()
__a = self.decoder_config.to_dict()
__a = self.__class__.model_type
return output
| 695 | 0 |
from jiwer import compute_measures
import datasets
__a :List[Any] = '\\n@inproceedings{inproceedings,\n author = {Morris, Andrew and Maier, Viktoria and Green, Phil},\n year = {2004},\n month = {01},\n pages = {},\n title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}\n}\n'
__a :Union[str, Any] = '\\nWord error rate (WER) is a common metric of the performance of an automatic speech recognition system.\n\nThe general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.\n\nThis problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate.\n\nWord error rate can then be computed as:\n\nWER = (S + D + I) / N = (S + D + I) / (S + D + C)\n\nwhere\n\nS is the number of substitutions,\nD is the number of deletions,\nI is the number of insertions,\nC is the number of correct words,\nN is the number of words in the reference (N=S+D+C).\n\nThis value indicates the average number of errors per reference word. The lower the value, the better the\nperformance of the ASR system with a WER of 0 being a perfect score.\n'
__a :str = '\nCompute WER score of transcribed segments against references.\n\nArgs:\n references: List of references for each speech input.\n predictions: List of transcriptions to score.\n concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively.\n\nReturns:\n (float): the word error rate\n\nExamples:\n\n >>> predictions = ["this is the prediction", "there is an other sample"]\n >>> references = ["this is the reference", "there is another one"]\n >>> wer = datasets.load_metric("wer")\n >>> wer_score = wer.compute(predictions=predictions, references=references)\n >>> print(wer_score)\n 0.5\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _a ( datasets.Metric ):
"""simple docstring"""
def __A ( self : Any ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , codebase_urls=["https://github.com/jitsi/jiwer/"] , reference_urls=[
"https://en.wikipedia.org/wiki/Word_error_rate",
] , )
def __A ( self : Dict , UpperCAmelCase : Dict=None , UpperCAmelCase : List[Any]=None , UpperCAmelCase : str=False ):
if concatenate_texts:
return compute_measures(UpperCAmelCase , UpperCAmelCase )["wer"]
else:
A_ = 0
A_ = 0
for prediction, reference in zip(UpperCAmelCase , UpperCAmelCase ):
A_ = compute_measures(UpperCAmelCase , UpperCAmelCase )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total | 86 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Generator
import requests
from bsa import BeautifulSoup
lowercase_ = "https://www.indeed.co.in/jobs?q=mobile+app+development&l="
def lowercase ( lowerCAmelCase__ : str = "mumbai" ) -> Generator[tuple[str, str], None, None]:
__a = BeautifulSoup(requests.get(url + location ).content , '''html.parser''' )
# This attribute finds out all the specifics listed in a job
for job in soup.find_all('''div''' , attrs={'''data-tn-component''': '''organicJob'''} ):
__a = job.find('''a''' , attrs={'''data-tn-element''': '''jobTitle'''} ).text.strip()
__a = job.find('''span''' , {'''class''': '''company'''} ).text.strip()
yield job_title, company_name
if __name__ == "__main__":
for i, job in enumerate(fetch_jobs("Bangalore"), 1):
print(F'''Job {i:>2} is {job[0]} at {job[1]}''')
| 695 | 0 |
from __future__ import annotations
from math import pow, sqrt
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> dict[str, float]:
"""simple docstring"""
if (resistance, reactance, impedance).count(0 ) != 1:
raise ValueError('''One and only one argument must be 0''' )
if resistance == 0:
return {"resistance": sqrt(pow(lowercase_ , 2 ) - pow(lowercase_ , 2 ) )}
elif reactance == 0:
return {"reactance": sqrt(pow(lowercase_ , 2 ) - pow(lowercase_ , 2 ) )}
elif impedance == 0:
return {"impedance": sqrt(pow(lowercase_ , 2 ) + pow(lowercase_ , 2 ) )}
else:
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 87 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"bigcode/gpt_bigcode-santacoder": "https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json",
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : List[str] = 'gpt_bigcode'
__UpperCAmelCase : Tuple = ['past_key_values']
__UpperCAmelCase : Dict = {
'hidden_size': 'n_embd',
'max_position_embeddings': 'n_positions',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self , _a=50_257 , _a=1_024 , _a=768 , _a=12 , _a=12 , _a=None , _a="gelu_pytorch_tanh" , _a=0.1 , _a=0.1 , _a=0.1 , _a=1E-5 , _a=0.02 , _a=True , _a=True , _a=50_256 , _a=50_256 , _a=True , _a=True , _a=True , **_a , ):
__a = vocab_size
__a = n_positions
__a = n_embd
__a = n_layer
__a = n_head
__a = n_inner
__a = activation_function
__a = resid_pdrop
__a = embd_pdrop
__a = attn_pdrop
__a = layer_norm_epsilon
__a = initializer_range
__a = scale_attn_weights
__a = use_cache
__a = attention_softmax_in_fpaa
__a = scale_attention_softmax_in_fpaa
__a = multi_query
__a = bos_token_id
__a = eos_token_id
super().__init__(bos_token_id=_a , eos_token_id=_a , **_a )
| 695 | 0 |
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
@dataclass
class lowercase__ ( A_ ):
__UpperCAmelCase = 42
class lowercase__ ( A_ ,A_ ):
@register_to_config
def __init__( self , SCREAMING_SNAKE_CASE = 3 , SCREAMING_SNAKE_CASE = 3 , SCREAMING_SNAKE_CASE = ("DownEncoderBlock2D",) , SCREAMING_SNAKE_CASE = ("UpDecoderBlock2D",) , SCREAMING_SNAKE_CASE = (64,) , SCREAMING_SNAKE_CASE = 1 , SCREAMING_SNAKE_CASE = "silu" , SCREAMING_SNAKE_CASE = 3 , SCREAMING_SNAKE_CASE = 32 , SCREAMING_SNAKE_CASE = 256 , SCREAMING_SNAKE_CASE = 32 , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = 0.1_82_15 , SCREAMING_SNAKE_CASE = "group" , ) -> Tuple:
super().__init__()
# pass init params to Encoder
_lowerCamelCase : List[str] = Encoder(
in_channels=SCREAMING_SNAKE_CASE , out_channels=SCREAMING_SNAKE_CASE , down_block_types=SCREAMING_SNAKE_CASE , block_out_channels=SCREAMING_SNAKE_CASE , layers_per_block=SCREAMING_SNAKE_CASE , act_fn=SCREAMING_SNAKE_CASE , norm_num_groups=SCREAMING_SNAKE_CASE , double_z=SCREAMING_SNAKE_CASE , )
_lowerCamelCase : Tuple = vq_embed_dim if vq_embed_dim is not None else latent_channels
_lowerCamelCase : Optional[Any] = nn.Convad(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , 1)
_lowerCamelCase : str = VectorQuantizer(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , beta=0.25 , remap=SCREAMING_SNAKE_CASE , sane_index_shape=SCREAMING_SNAKE_CASE)
_lowerCamelCase : Union[str, Any] = nn.Convad(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , 1)
# pass init params to Decoder
_lowerCamelCase : int = Decoder(
in_channels=SCREAMING_SNAKE_CASE , out_channels=SCREAMING_SNAKE_CASE , up_block_types=SCREAMING_SNAKE_CASE , block_out_channels=SCREAMING_SNAKE_CASE , layers_per_block=SCREAMING_SNAKE_CASE , act_fn=SCREAMING_SNAKE_CASE , norm_num_groups=SCREAMING_SNAKE_CASE , norm_type=SCREAMING_SNAKE_CASE , )
@apply_forward_hook
def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = True) -> VQEncoderOutput:
_lowerCamelCase : Optional[Any] = self.encoder(SCREAMING_SNAKE_CASE)
_lowerCamelCase : Dict = self.quant_conv(SCREAMING_SNAKE_CASE)
if not return_dict:
return (h,)
return VQEncoderOutput(latents=SCREAMING_SNAKE_CASE)
@apply_forward_hook
def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = False , SCREAMING_SNAKE_CASE = True) -> Union[DecoderOutput, torch.FloatTensor]:
# also go through quantization layer
if not force_not_quantize:
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : List[str] = self.quantize(SCREAMING_SNAKE_CASE)
else:
_lowerCamelCase : int = h
_lowerCamelCase : List[Any] = self.post_quant_conv(SCREAMING_SNAKE_CASE)
_lowerCamelCase : Optional[int] = self.decoder(SCREAMING_SNAKE_CASE , quant if self.config.norm_type == """spatial""" else None)
if not return_dict:
return (dec,)
return DecoderOutput(sample=SCREAMING_SNAKE_CASE)
def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = True) -> Union[DecoderOutput, torch.FloatTensor]:
_lowerCamelCase : Dict = sample
_lowerCamelCase : Any = self.encode(SCREAMING_SNAKE_CASE).latents
_lowerCamelCase : Optional[int] = self.decode(SCREAMING_SNAKE_CASE).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=SCREAMING_SNAKE_CASE)
| 88 |
"""simple docstring"""
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
lowercase_ = 1_6
lowercase_ = 3_2
def lowercase ( lowerCAmelCase__ : Accelerator , lowerCAmelCase__ : int = 16 , lowerCAmelCase__ : str = "bert-base-cased" ) -> Optional[int]:
__a = AutoTokenizer.from_pretrained(lowerCAmelCase__ )
__a = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(lowerCAmelCase__ : Optional[Any] ):
# max_length=None => use the model max length (it's actually the default)
__a = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=lowerCAmelCase__ , max_length=lowerCAmelCase__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
__a = datasets.map(
lowerCAmelCase__ , batched=lowerCAmelCase__ , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , load_from_cache_file=lowerCAmelCase__ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__a = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(lowerCAmelCase__ : int ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(lowerCAmelCase__ , padding='''max_length''' , max_length=128 , return_tensors='''pt''' )
return tokenizer.pad(lowerCAmelCase__ , padding='''longest''' , return_tensors='''pt''' )
# Instantiate dataloaders.
__a = DataLoader(
tokenized_datasets['''train'''] , shuffle=lowerCAmelCase__ , collate_fn=lowerCAmelCase__ , batch_size=lowerCAmelCase__ )
__a = DataLoader(
tokenized_datasets['''validation'''] , shuffle=lowerCAmelCase__ , collate_fn=lowerCAmelCase__ , batch_size=lowerCAmelCase__ )
return train_dataloader, eval_dataloader
def lowercase ( lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Union[str, Any] ) -> Optional[int]:
# Initialize accelerator
__a = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__a = config['''lr''']
__a = int(config['''num_epochs'''] )
__a = int(config['''seed'''] )
__a = int(config['''batch_size'''] )
__a = args.model_name_or_path
set_seed(lowerCAmelCase__ )
__a , __a = get_dataloaders(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__a = AutoModelForSequenceClassification.from_pretrained(lowerCAmelCase__ , return_dict=lowerCAmelCase__ )
# Instantiate optimizer
__a = (
AdamW
if accelerator.state.deepspeed_plugin is None
or '''optimizer''' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
__a = optimizer_cls(params=model.parameters() , lr=lowerCAmelCase__ )
if accelerator.state.deepspeed_plugin is not None:
__a = accelerator.state.deepspeed_plugin.deepspeed_config[
'''gradient_accumulation_steps'''
]
else:
__a = 1
__a = (len(lowerCAmelCase__ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
__a = get_linear_schedule_with_warmup(
optimizer=lowerCAmelCase__ , num_warmup_steps=0 , num_training_steps=lowerCAmelCase__ , )
else:
__a = DummyScheduler(lowerCAmelCase__ , total_num_steps=lowerCAmelCase__ , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__a , __a , __a , __a , __a = accelerator.prepare(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# We need to keep track of how many total steps we have iterated over
__a = 0
# We also need to keep track of the stating epoch so files are named properly
__a = 0
# Now we train the model
__a = evaluate.load('''glue''' , '''mrpc''' )
__a = 0
__a = {}
for epoch in range(lowerCAmelCase__ , lowerCAmelCase__ ):
model.train()
for step, batch in enumerate(lowerCAmelCase__ ):
__a = model(**lowerCAmelCase__ )
__a = outputs.loss
__a = loss / gradient_accumulation_steps
accelerator.backward(lowerCAmelCase__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
model.eval()
__a = 0
for step, batch in enumerate(lowerCAmelCase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__a = model(**lowerCAmelCase__ )
__a = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
__a , __a = accelerator.gather(
(predictions, batch['''labels''']) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(lowerCAmelCase__ ) - 1:
__a = predictions[: len(eval_dataloader.dataset ) - samples_seen]
__a = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=lowerCAmelCase__ , references=lowerCAmelCase__ , )
__a = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'''epoch {epoch}:''' , lowerCAmelCase__ )
__a = eval_metric['''accuracy''']
if best_performance < eval_metric["accuracy"]:
__a = eval_metric['''accuracy''']
if args.performance_lower_bound is not None:
assert (
args.performance_lower_bound <= best_performance
), f'''Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}'''
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , '''all_results.json''' ) , '''w''' ) as f:
json.dump(lowerCAmelCase__ , lowerCAmelCase__ )
def lowercase ( ) -> List[str]:
__a = argparse.ArgumentParser(description='''Simple example of training script tracking peak GPU memory usage.''' )
parser.add_argument(
'''--model_name_or_path''' , type=lowerCAmelCase__ , default='''bert-base-cased''' , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , required=lowerCAmelCase__ , )
parser.add_argument(
'''--output_dir''' , type=lowerCAmelCase__ , default='''.''' , help='''Optional save directory where all checkpoint folders will be stored. Default is the current working directory.''' , )
parser.add_argument(
'''--performance_lower_bound''' , type=lowerCAmelCase__ , default=lowerCAmelCase__ , help='''Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value.''' , )
parser.add_argument(
'''--num_epochs''' , type=lowerCAmelCase__ , default=3 , help='''Number of train epochs.''' , )
__a = parser.parse_args()
__a = {'''lr''': 2e-5, '''num_epochs''': args.num_epochs, '''seed''': 42, '''batch_size''': 16}
training_function(lowerCAmelCase__ , lowerCAmelCase__ )
if __name__ == "__main__":
main()
| 695 | 0 |
import webbrowser
from sys import argv
from urllib.parse import parse_qs, quote
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : List[Any] = "%20".join(argv[1:]) if len(argv) > 1 else quote(str(input("Search: ")))
print("Googling.....")
SCREAMING_SNAKE_CASE : Optional[Any] = F"https://www.google.com/search?q={query}&num=100"
SCREAMING_SNAKE_CASE : Tuple = requests.get(
url,
headers={"User-Agent": str(UserAgent().random)},
)
try:
SCREAMING_SNAKE_CASE : Tuple = (
BeautifulSoup(res.text, "html.parser")
.find("div", attrs={"class": "yuRUbf"})
.find("a")
.get("href")
)
except AttributeError:
SCREAMING_SNAKE_CASE : List[Any] = parse_qs(
BeautifulSoup(res.text, "html.parser")
.find("div", attrs={"class": "kCrYT"})
.find("a")
.get("href")
)["url"][0]
webbrowser.open(link)
| 89 |
"""simple docstring"""
from typing import Any
def lowercase ( lowerCAmelCase__ : list , lowerCAmelCase__ : list , lowerCAmelCase__ : dict , lowerCAmelCase__ : dict , lowerCAmelCase__ : dict , ) -> list:
_validation(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , )
# Creates data structures and fill initial step
__a = {}
__a = {}
for state in states_space:
__a = observations_space[0]
__a = (
initial_probabilities[state] * emission_probabilities[state][observation]
)
__a = None
# Fills the data structure with the probabilities of
# different transitions and pointers to previous states
for o in range(1 , len(lowerCAmelCase__ ) ):
__a = observations_space[o]
__a = observations_space[o - 1]
for state in states_space:
# Calculates the argmax for probability function
__a = ''''''
__a = -1
for k_state in states_space:
__a = (
probabilities[(k_state, prior_observation)]
* transition_probabilities[k_state][state]
* emission_probabilities[state][observation]
)
if probability > max_probability:
__a = probability
__a = k_state
# Update probabilities and pointers dicts
__a = (
probabilities[(arg_max, prior_observation)]
* transition_probabilities[arg_max][state]
* emission_probabilities[state][observation]
)
__a = arg_max
# The final observation
__a = observations_space[len(lowerCAmelCase__ ) - 1]
# argmax for given final observation
__a = ''''''
__a = -1
for k_state in states_space:
__a = probabilities[(k_state, final_observation)]
if probability > max_probability:
__a = probability
__a = k_state
__a = arg_max
# Process pointers backwards
__a = last_state
__a = []
for o in range(len(lowerCAmelCase__ ) - 1 , -1 , -1 ):
result.append(lowerCAmelCase__ )
__a = pointers[previous, observations_space[o]]
result.reverse()
return result
def lowercase ( lowerCAmelCase__ : Any , lowerCAmelCase__ : Any , lowerCAmelCase__ : Any , lowerCAmelCase__ : Any , lowerCAmelCase__ : Any , ) -> None:
_validate_not_empty(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , )
_validate_lists(lowerCAmelCase__ , lowerCAmelCase__ )
_validate_dicts(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def lowercase ( lowerCAmelCase__ : Any , lowerCAmelCase__ : Any , lowerCAmelCase__ : Any , lowerCAmelCase__ : Any , lowerCAmelCase__ : Any , ) -> None:
if not all(
[
observations_space,
states_space,
initial_probabilities,
transition_probabilities,
emission_probabilities,
] ):
raise ValueError('''There\'s an empty parameter''' )
def lowercase ( lowerCAmelCase__ : Any , lowerCAmelCase__ : Any ) -> None:
_validate_list(lowerCAmelCase__ , '''observations_space''' )
_validate_list(lowerCAmelCase__ , '''states_space''' )
def lowercase ( lowerCAmelCase__ : Any , lowerCAmelCase__ : str ) -> None:
if not isinstance(_object , lowerCAmelCase__ ):
__a = f'''{var_name} must be a list'''
raise ValueError(lowerCAmelCase__ )
else:
for x in _object:
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
__a = f'''{var_name} must be a list of strings'''
raise ValueError(lowerCAmelCase__ )
def lowercase ( lowerCAmelCase__ : Any , lowerCAmelCase__ : Any , lowerCAmelCase__ : Any , ) -> None:
_validate_dict(lowerCAmelCase__ , '''initial_probabilities''' , lowerCAmelCase__ )
_validate_nested_dict(lowerCAmelCase__ , '''transition_probabilities''' )
_validate_nested_dict(lowerCAmelCase__ , '''emission_probabilities''' )
def lowercase ( lowerCAmelCase__ : Any , lowerCAmelCase__ : str ) -> None:
_validate_dict(_object , lowerCAmelCase__ , lowerCAmelCase__ )
for x in _object.values():
_validate_dict(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def lowercase ( lowerCAmelCase__ : Any , lowerCAmelCase__ : str , lowerCAmelCase__ : type , lowerCAmelCase__ : bool = False ) -> None:
if not isinstance(_object , lowerCAmelCase__ ):
__a = f'''{var_name} must be a dict'''
raise ValueError(lowerCAmelCase__ )
if not all(isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) for x in _object ):
__a = f'''{var_name} all keys must be strings'''
raise ValueError(lowerCAmelCase__ )
if not all(isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) for x in _object.values() ):
__a = '''nested dictionary ''' if nested else ''''''
__a = f'''{var_name} {nested_text}all values must be {value_type.__name__}'''
raise ValueError(lowerCAmelCase__ )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 695 | 0 |
'''simple docstring'''
import argparse
from typing import List
import evaluate
import numpy as np
import torch
from datasets import DatasetDict, load_dataset
# New Code #
# We'll be using StratifiedKFold for this example
from sklearn.model_selection import StratifiedKFold
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to perform Cross Validation,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
__UpperCAmelCase = 16
__UpperCAmelCase = 32
def _snake_case ( A , A , A , A , A = 16 ) -> Optional[Any]:
lowerCAmelCase__ = AutoTokenizer.from_pretrained('''bert-base-cased''' )
lowerCAmelCase__ = DatasetDict(
{
'''train''': dataset['''train'''].select(A ),
'''validation''': dataset['''train'''].select(A ),
'''test''': dataset['''validation'''],
} )
def tokenize_function(A ):
# max_length=None => use the model max length (it's actually the default)
lowerCAmelCase__ = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=A , max_length=A )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowerCAmelCase__ = datasets.map(
A , batched=A , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowerCAmelCase__ = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(A ):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowerCAmelCase__ = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowerCAmelCase__ = 16
elif accelerator.mixed_precision != "no":
lowerCAmelCase__ = 8
else:
lowerCAmelCase__ = None
return tokenizer.pad(
A , padding='''longest''' , max_length=A , pad_to_multiple_of=A , return_tensors='''pt''' , )
# Instantiate dataloaders.
lowerCAmelCase__ = DataLoader(
tokenized_datasets['''train'''] , shuffle=A , collate_fn=A , batch_size=A )
lowerCAmelCase__ = DataLoader(
tokenized_datasets['''validation'''] , shuffle=A , collate_fn=A , batch_size=A )
lowerCAmelCase__ = DataLoader(
tokenized_datasets['''test'''] , shuffle=A , collate_fn=A , batch_size=A )
return train_dataloader, eval_dataloader, test_dataloader
def _snake_case ( A , A ) -> int:
# New Code #
lowerCAmelCase__ = []
# Download the dataset
lowerCAmelCase__ = load_dataset('''glue''' , '''mrpc''' )
# Create our splits
lowerCAmelCase__ = StratifiedKFold(n_splits=int(args.num_folds ) )
# Initialize accelerator
lowerCAmelCase__ = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCAmelCase__ = config['''lr''']
lowerCAmelCase__ = int(config['''num_epochs'''] )
lowerCAmelCase__ = int(config['''seed'''] )
lowerCAmelCase__ = int(config['''batch_size'''] )
lowerCAmelCase__ = evaluate.load('''glue''' , '''mrpc''' )
# If the batch size is too big we use gradient accumulation
lowerCAmelCase__ = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
lowerCAmelCase__ = batch_size // MAX_GPU_BATCH_SIZE
lowerCAmelCase__ = MAX_GPU_BATCH_SIZE
set_seed(A )
# New Code #
# Create our folds:
lowerCAmelCase__ = kfold.split(np.zeros(datasets['''train'''].num_rows ) , datasets['''train''']['''label'''] )
lowerCAmelCase__ = []
# Iterate over them
for i, (train_idxs, valid_idxs) in enumerate(A ):
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = get_fold_dataloaders(
A , A , A , A , )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCAmelCase__ = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=A )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowerCAmelCase__ = model.to(accelerator.device )
# Instantiate optimizer
lowerCAmelCase__ = AdamW(params=model.parameters() , lr=A )
# Instantiate scheduler
lowerCAmelCase__ = get_linear_schedule_with_warmup(
optimizer=A , num_warmup_steps=100 , num_training_steps=(len(A ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = accelerator.prepare(
A , A , A , A , A )
# Now we train the model
for epoch in range(A ):
model.train()
for step, batch in enumerate(A ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
lowerCAmelCase__ = model(**A )
lowerCAmelCase__ = outputs.loss
lowerCAmelCase__ = loss / gradient_accumulation_steps
accelerator.backward(A )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(A ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowerCAmelCase__ = model(**A )
lowerCAmelCase__ = outputs.logits.argmax(dim=-1 )
lowerCAmelCase__ , lowerCAmelCase__ = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=A , references=A , )
lowerCAmelCase__ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"""epoch {epoch}:""" , A )
# New Code #
# We also run predictions on the test set at the very end
lowerCAmelCase__ = []
for step, batch in enumerate(A ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowerCAmelCase__ = model(**A )
lowerCAmelCase__ = outputs.logits
lowerCAmelCase__ , lowerCAmelCase__ = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
fold_predictions.append(predictions.cpu() )
if i == 0:
# We need all of the test predictions
test_references.append(references.cpu() )
# Use accelerator.print to print only on the main process.
test_predictions.append(torch.cat(A , dim=0 ) )
# We now need to release all our memory and get rid of the current model, optimizer, etc
accelerator.free_memory()
# New Code #
# Finally we check the accuracy of our folded results:
lowerCAmelCase__ = torch.cat(A , dim=0 )
lowerCAmelCase__ = torch.stack(A , dim=0 ).sum(dim=0 ).div(int(args.num_folds ) ).argmax(dim=-1 )
lowerCAmelCase__ = metric.compute(predictions=A , references=A )
accelerator.print('''Average test metrics from all folds:''' , A )
def _snake_case ( ) -> List[str]:
lowerCAmelCase__ = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=A , default=A , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
# New Code #
parser.add_argument('''--num_folds''' , type=A , default=3 , help='''The number of splits to perform across the dataset''' )
lowerCAmelCase__ = parser.parse_args()
lowerCAmelCase__ = {'''lr''': 2E-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(A , A )
if __name__ == "__main__":
main() | 90 |
"""simple docstring"""
import math
def lowercase ( lowerCAmelCase__ : int ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(lowerCAmelCase__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def lowercase ( lowerCAmelCase__ : float = 0.1 ) -> int:
__a = 3
__a = 3
while primes / (2 * j - 1) >= ratio:
for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1 ):
primes += is_prime(lowerCAmelCase__ )
j += 2
return j
if __name__ == "__main__":
import doctest
doctest.testmod()
| 695 | 0 |
"""simple docstring"""
_lowercase = {
"km/h": 1.0,
"m/s": 3.6,
"mph": 1.609_344,
"knot": 1.852,
}
_lowercase = {
"km/h": 1.0,
"m/s": 0.277_777_778,
"mph": 0.621_371_192,
"knot": 0.539_956_803,
}
def _snake_case ( snake_case__ : float , snake_case__ : str , snake_case__ : str ):
if unit_to not in speed_chart or unit_from not in speed_chart_inverse:
A = (
F'Incorrect \'from_type\' or \'to_type\' value: {unit_from!r}, {unit_to!r}\n'
F'Valid values are: {", ".join(snake_case__ )}'
)
raise ValueError(snake_case__ )
return round(speed * speed_chart[unit_from] * speed_chart_inverse[unit_to] , 3 )
if __name__ == "__main__":
import doctest
doctest.testmod() | 91 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase_ = {
"configuration_mctct": ["MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP", "MCTCTConfig"],
"feature_extraction_mctct": ["MCTCTFeatureExtractor"],
"processing_mctct": ["MCTCTProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST",
"MCTCTForCTC",
"MCTCTModel",
"MCTCTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 695 | 0 |
'''simple docstring'''
from typing import Union
import fire
import torch
from tqdm import tqdm
def _lowerCAmelCase ( __magic_name__ : str , __magic_name__ : str = "cpu" , __magic_name__ : Union[str, None] = None ) -> None:
lowercase : Dict =torch.load(__magic_name__ , map_location=__magic_name__ )
for k, v in tqdm(state_dict.items() ):
if not isinstance(__magic_name__ , torch.Tensor ):
raise TypeError('''FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin''' )
lowercase : List[str] =v.half()
if save_path is None: # overwrite src_path
lowercase : Union[str, Any] =src_path
torch.save(__magic_name__ , __magic_name__ )
if __name__ == "__main__":
fire.Fire(convert)
| 92 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json"
),
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json"
),
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json"
),
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = 'dpr'
def __init__( self , _a=30_522 , _a=768 , _a=12 , _a=12 , _a=3_072 , _a="gelu" , _a=0.1 , _a=0.1 , _a=512 , _a=2 , _a=0.02 , _a=1E-12 , _a=0 , _a="absolute" , _a = 0 , **_a , ):
super().__init__(pad_token_id=_a , **_a )
__a = vocab_size
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = hidden_act
__a = intermediate_size
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = max_position_embeddings
__a = type_vocab_size
__a = initializer_range
__a = layer_norm_eps
__a = projection_dim
__a = position_embedding_type
| 695 | 0 |
"""simple docstring"""
from __future__ import annotations
from random import random
from typing import Generic, TypeVar
__A = TypeVar("""KT""")
__A = TypeVar("""VT""")
class _lowerCAmelCase ( Generic[KT, VT] ):
"""simple docstring"""
def __init__( self , __UpperCAmelCase = "root" , __UpperCAmelCase = None ):
'''simple docstring'''
lowerCAmelCase__ :List[str] = key
lowerCAmelCase__ :List[Any] = value
lowerCAmelCase__ :list[Node[KT, VT]] = []
def __repr__( self ):
'''simple docstring'''
return F"Node({self.key}: {self.value})"
@property
def snake_case ( self ):
'''simple docstring'''
return len(self.forward )
class _lowerCAmelCase ( Generic[KT, VT] ):
"""simple docstring"""
def __init__( self , __UpperCAmelCase = 0.5 , __UpperCAmelCase = 1_6 ):
'''simple docstring'''
lowerCAmelCase__ :Node[KT, VT] = Node[KT, VT]()
lowerCAmelCase__ :Optional[Any] = 0
lowerCAmelCase__ :Tuple = p
lowerCAmelCase__ :List[Any] = max_level
def __str__( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[Any] = list(self )
if len(__UpperCAmelCase ) == 0:
return F"SkipList(level={self.level})"
lowerCAmelCase__ :Union[str, Any] = max((len(str(__UpperCAmelCase ) ) for item in items) , default=4 )
lowerCAmelCase__ :Any = max(__UpperCAmelCase , 4 ) + 4
lowerCAmelCase__ :Tuple = self.head
lowerCAmelCase__ :Any = []
lowerCAmelCase__ :List[Any] = node.forward.copy()
lines.append(F"[{node.key}]".ljust(__UpperCAmelCase , '-' ) + '* ' * len(__UpperCAmelCase ) )
lines.append(' ' * label_size + '| ' * len(__UpperCAmelCase ) )
while len(node.forward ) != 0:
lowerCAmelCase__ :Dict = node.forward[0]
lines.append(
F"[{node.key}]".ljust(__UpperCAmelCase , '-' )
+ ' '.join(str(n.key ) if n.key == node.key else '|' for n in forwards ) )
lines.append(' ' * label_size + '| ' * len(__UpperCAmelCase ) )
lowerCAmelCase__ :Union[str, Any] = node.forward
lines.append('None'.ljust(__UpperCAmelCase ) + '* ' * len(__UpperCAmelCase ) )
return F"SkipList(level={self.level})\n" + "\n".join(__UpperCAmelCase )
def __iter__( self ):
'''simple docstring'''
lowerCAmelCase__ :Any = self.head
while len(node.forward ) != 0:
yield node.forward[0].key
lowerCAmelCase__ :str = node.forward[0]
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :str = 1
while random() < self.p and level < self.max_level:
level += 1
return level
def snake_case ( self , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :int = []
lowerCAmelCase__ :Tuple = self.head
for i in reversed(range(self.level ) ):
# i < node.level - When node level is lesser than `i` decrement `i`.
# node.forward[i].key < key - Jumping to node with key value higher
# or equal to searched key would result
# in skipping searched key.
while i < node.level and node.forward[i].key < key:
lowerCAmelCase__ :Any = node.forward[i]
# Each leftmost node (relative to searched node) will potentially have to
# be updated.
update_vector.append(__UpperCAmelCase )
update_vector.reverse() # Note that we were inserting values in reverse order.
# len(node.forward) != 0 - If current node doesn't contain any further
# references then searched key is not present.
# node.forward[0].key == key - Next node key should be equal to search key
# if key is present.
if len(node.forward ) != 0 and node.forward[0].key == key:
return node.forward[0], update_vector
else:
return None, update_vector
def snake_case ( self , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ :Tuple = self._locate_node(__UpperCAmelCase )
if node is not None:
for i, update_node in enumerate(__UpperCAmelCase ):
# Remove or replace all references to removed node.
if update_node.level > i and update_node.forward[i].key == key:
if node.level > i:
lowerCAmelCase__ :Optional[Any] = node.forward[i]
else:
lowerCAmelCase__ :str = update_node.forward[:i]
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ :str = self._locate_node(__UpperCAmelCase )
if node is not None:
lowerCAmelCase__ :Tuple = value
else:
lowerCAmelCase__ :Optional[Any] = self.random_level()
if level > self.level:
# After level increase we have to add additional nodes to head.
for _ in range(self.level - 1 , __UpperCAmelCase ):
update_vector.append(self.head )
lowerCAmelCase__ :int = level
lowerCAmelCase__ :int = Node(__UpperCAmelCase , __UpperCAmelCase )
for i, update_node in enumerate(update_vector[:level] ):
# Change references to pass through new node.
if update_node.level > i:
new_node.forward.append(update_node.forward[i] )
if update_node.level < i + 1:
update_node.forward.append(__UpperCAmelCase )
else:
lowerCAmelCase__ :Dict = new_node
def snake_case ( self , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ :Union[str, Any] = self._locate_node(__UpperCAmelCase )
if node is not None:
return node.value
return None
def __A () ->str:
"""simple docstring"""
lowerCAmelCase__ :Optional[int] = SkipList()
skip_list.insert('Key1' , 3 )
skip_list.insert('Key2' , 12 )
skip_list.insert('Key3' , 41 )
skip_list.insert('Key4' , -19 )
lowerCAmelCase__ :List[str] = skip_list.head
lowerCAmelCase__ :Any = {}
while node.level != 0:
lowerCAmelCase__ :Optional[Any] = node.forward[0]
lowerCAmelCase__ :Optional[int] = node.value
assert len(_SCREAMING_SNAKE_CASE ) == 4
assert all_values["Key1"] == 3
assert all_values["Key2"] == 12
assert all_values["Key3"] == 41
assert all_values["Key4"] == -19
def __A () ->Dict:
"""simple docstring"""
lowerCAmelCase__ :str = SkipList()
skip_list.insert('Key1' , 10 )
skip_list.insert('Key1' , 12 )
skip_list.insert('Key5' , 7 )
skip_list.insert('Key7' , 10 )
skip_list.insert('Key10' , 5 )
skip_list.insert('Key7' , 7 )
skip_list.insert('Key5' , 5 )
skip_list.insert('Key10' , 10 )
lowerCAmelCase__ :str = skip_list.head
lowerCAmelCase__ :List[Any] = {}
while node.level != 0:
lowerCAmelCase__ :int = node.forward[0]
lowerCAmelCase__ :str = node.value
if len(_SCREAMING_SNAKE_CASE ) != 4:
print()
assert len(_SCREAMING_SNAKE_CASE ) == 4
assert all_values["Key1"] == 12
assert all_values["Key7"] == 7
assert all_values["Key5"] == 5
assert all_values["Key10"] == 10
def __A () ->int:
"""simple docstring"""
lowerCAmelCase__ :List[Any] = SkipList()
assert skip_list.find('Some key' ) is None
def __A () ->Any:
"""simple docstring"""
lowerCAmelCase__ :Dict = SkipList()
skip_list.insert('Key2' , 20 )
assert skip_list.find('Key2' ) == 20
skip_list.insert('Some Key' , 10 )
skip_list.insert('Key2' , 8 )
skip_list.insert('V' , 13 )
assert skip_list.find('Y' ) is None
assert skip_list.find('Key2' ) == 8
assert skip_list.find('Some Key' ) == 10
assert skip_list.find('V' ) == 13
def __A () ->Tuple:
"""simple docstring"""
lowerCAmelCase__ :List[str] = SkipList()
skip_list.delete('Some key' )
assert len(skip_list.head.forward ) == 0
def __A () ->Union[str, Any]:
"""simple docstring"""
lowerCAmelCase__ :Dict = SkipList()
skip_list.insert('Key1' , 12 )
skip_list.insert('V' , 13 )
skip_list.insert('X' , 14 )
skip_list.insert('Key2' , 15 )
skip_list.delete('V' )
skip_list.delete('Key2' )
assert skip_list.find('V' ) is None
assert skip_list.find('Key2' ) is None
def __A () ->Optional[int]:
"""simple docstring"""
lowerCAmelCase__ :List[str] = SkipList()
skip_list.insert('Key1' , 12 )
skip_list.insert('V' , 13 )
skip_list.insert('X' , 14 )
skip_list.insert('Key2' , 15 )
skip_list.delete('V' )
assert skip_list.find('V' ) is None
assert skip_list.find('X' ) == 14
assert skip_list.find('Key1' ) == 12
assert skip_list.find('Key2' ) == 15
skip_list.delete('X' )
assert skip_list.find('V' ) is None
assert skip_list.find('X' ) is None
assert skip_list.find('Key1' ) == 12
assert skip_list.find('Key2' ) == 15
skip_list.delete('Key1' )
assert skip_list.find('V' ) is None
assert skip_list.find('X' ) is None
assert skip_list.find('Key1' ) is None
assert skip_list.find('Key2' ) == 15
skip_list.delete('Key2' )
assert skip_list.find('V' ) is None
assert skip_list.find('X' ) is None
assert skip_list.find('Key1' ) is None
assert skip_list.find('Key2' ) is None
def __A () ->int:
"""simple docstring"""
lowerCAmelCase__ :str = SkipList()
skip_list.insert('Key1' , 12 )
skip_list.insert('V' , 13 )
skip_list.insert('X' , 142 )
skip_list.insert('Key2' , 15 )
skip_list.delete('X' )
def traverse_keys(_SCREAMING_SNAKE_CASE ):
yield node.key
for forward_node in node.forward:
yield from traverse_keys(_SCREAMING_SNAKE_CASE )
assert len(set(traverse_keys(skip_list.head ) ) ) == 4
def __A () ->Optional[int]:
"""simple docstring"""
def is_sorted(_SCREAMING_SNAKE_CASE ):
return all(next_item >= item for item, next_item in zip(_SCREAMING_SNAKE_CASE , lst[1:] ) )
lowerCAmelCase__ :Optional[Any] = SkipList()
for i in range(10 ):
skip_list.insert(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
assert is_sorted(list(_SCREAMING_SNAKE_CASE ) )
skip_list.delete(5 )
skip_list.delete(8 )
skip_list.delete(2 )
assert is_sorted(list(_SCREAMING_SNAKE_CASE ) )
skip_list.insert(-12 , -12 )
skip_list.insert(77 , 77 )
assert is_sorted(list(_SCREAMING_SNAKE_CASE ) )
def __A () ->Any:
"""simple docstring"""
for _ in range(100 ):
# Repeat test 100 times due to the probabilistic nature of skip list
# random values == random bugs
test_insert()
test_insert_overrides_existing_value()
test_searching_empty_list_returns_none()
test_search()
test_deleting_item_from_empty_list_do_nothing()
test_deleted_items_are_not_founded_by_find_method()
test_delete_removes_only_given_key()
test_delete_doesnt_leave_dead_nodes()
test_iter_always_yields_sorted_values()
def __A () ->Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ :Optional[int] = SkipList()
skip_list.insert(2 , '2' )
skip_list.insert(4 , '4' )
skip_list.insert(6 , '4' )
skip_list.insert(4 , '5' )
skip_list.insert(8 , '4' )
skip_list.insert(9 , '4' )
skip_list.delete(4 )
print(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 93 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = StableDiffusionInpaintPipeline
__UpperCAmelCase : int = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
__UpperCAmelCase : str = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
__UpperCAmelCase : int = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
__UpperCAmelCase : Tuple = frozenset([] )
def __UpperCAmelCase ( self ):
torch.manual_seed(0 )
__a = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=9 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=_a , )
__a = PNDMScheduler(skip_prk_steps=_a )
torch.manual_seed(0 )
__a = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
__a = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act='''gelu''' , projection_dim=512 , )
__a = CLIPTextModel(_a )
__a = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
__a = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def __UpperCAmelCase ( self , _a , _a=0 ):
# TODO: use tensor inputs instead of PIL, this is here just to leave the old expected_slices untouched
__a = floats_tensor((1, 3, 32, 32) , rng=random.Random(_a ) ).to(_a )
__a = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__a = Image.fromarray(np.uinta(_a ) ).convert('''RGB''' ).resize((64, 64) )
__a = Image.fromarray(np.uinta(image + 4 ) ).convert('''RGB''' ).resize((64, 64) )
if str(_a ).startswith('''mps''' ):
__a = torch.manual_seed(_a )
else:
__a = torch.Generator(device=_a ).manual_seed(_a )
__a = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': init_image,
'''mask_image''': mask_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def __UpperCAmelCase ( self ):
__a = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__a = self.get_dummy_components()
__a = StableDiffusionInpaintPipeline(**_a )
__a = sd_pipe.to(_a )
sd_pipe.set_progress_bar_config(disable=_a )
__a = self.get_dummy_inputs(_a )
__a = sd_pipe(**_a ).images
__a = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__a = np.array([0.4727, 0.5735, 0.3941, 0.5446, 0.5926, 0.4394, 0.5062, 0.4654, 0.4476] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __UpperCAmelCase ( self ):
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCAmelCase ( self ):
__a = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''' )
__a = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' )
__a = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'''
'''/yellow_cat_sitting_on_a_park_bench.npy''' )
__a = '''stabilityai/stable-diffusion-2-inpainting'''
__a = StableDiffusionInpaintPipeline.from_pretrained(_a , safety_checker=_a )
pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
pipe.enable_attention_slicing()
__a = '''Face of a yellow cat, high resolution, sitting on a park bench'''
__a = torch.manual_seed(0 )
__a = pipe(
prompt=_a , image=_a , mask_image=_a , generator=_a , output_type='''np''' , )
__a = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 9E-3
def __UpperCAmelCase ( self ):
__a = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''' )
__a = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' )
__a = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'''
'''/yellow_cat_sitting_on_a_park_bench_fp16.npy''' )
__a = '''stabilityai/stable-diffusion-2-inpainting'''
__a = StableDiffusionInpaintPipeline.from_pretrained(
_a , torch_dtype=torch.floataa , safety_checker=_a , )
pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
pipe.enable_attention_slicing()
__a = '''Face of a yellow cat, high resolution, sitting on a park bench'''
__a = torch.manual_seed(0 )
__a = pipe(
prompt=_a , image=_a , mask_image=_a , generator=_a , output_type='''np''' , )
__a = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 5E-1
def __UpperCAmelCase ( self ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__a = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''' )
__a = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' )
__a = '''stabilityai/stable-diffusion-2-inpainting'''
__a = PNDMScheduler.from_pretrained(_a , subfolder='''scheduler''' )
__a = StableDiffusionInpaintPipeline.from_pretrained(
_a , safety_checker=_a , scheduler=_a , torch_dtype=torch.floataa , )
pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
__a = '''Face of a yellow cat, high resolution, sitting on a park bench'''
__a = torch.manual_seed(0 )
__a = pipe(
prompt=_a , image=_a , mask_image=_a , generator=_a , num_inference_steps=2 , output_type='''np''' , )
__a = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.65 * 10**9
| 695 | 0 |
'''simple docstring'''
import math
def lowercase_ ( ) -> None:
"""simple docstring"""
lowercase : Union[str, Any] =input('''Enter message: ''' )
lowercase : List[Any] =int(input(F'Enter key [2-{len(__A ) - 1}]: ' ) )
lowercase : int =input('''Encryption/Decryption [e/d]: ''' )
if mode.lower().startswith('''e''' ):
lowercase : Union[str, Any] =encrypt_message(__A , __A )
elif mode.lower().startswith('''d''' ):
lowercase : Dict =decrypt_message(__A , __A )
# Append pipe symbol (vertical bar) to identify spaces at the end.
print(F'Output:\n{text + "|"}' )
def lowercase_ ( __A : int , __A : str ) -> str:
"""simple docstring"""
lowercase : Union[str, Any] =[''''''] * key
for col in range(__A ):
lowercase : Optional[Any] =col
while pointer < len(__A ):
cipher_text[col] += message[pointer]
pointer += key
return "".join(__A )
def lowercase_ ( __A : int , __A : str ) -> str:
"""simple docstring"""
lowercase : Tuple =math.ceil(len(__A ) / key )
lowercase : Union[str, Any] =key
lowercase : Optional[int] =(num_cols * num_rows) - len(__A )
lowercase : List[Any] =[''''''] * num_cols
lowercase : Dict =0
lowercase : List[str] =0
for symbol in message:
plain_text[col] += symbol
col += 1
if (
(col == num_cols)
or (col == num_cols - 1)
and (row >= num_rows - num_shaded_boxes)
):
lowercase : Optional[int] =0
row += 1
return "".join(__A )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 94 |
"""simple docstring"""
import inspect
import os
import unittest
from dataclasses import dataclass
import torch
from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs
from accelerate.state import AcceleratorState
from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu
from accelerate.utils import KwargsHandler
@dataclass
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : int = 0
__UpperCAmelCase : bool = False
__UpperCAmelCase : float = 3.0
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self ):
# If no defaults are changed, `to_kwargs` returns an empty dict.
self.assertDictEqual(MockClass().to_kwargs() , {} )
self.assertDictEqual(MockClass(a=2 ).to_kwargs() , {'''a''': 2} )
self.assertDictEqual(MockClass(a=2 , b=_a ).to_kwargs() , {'''a''': 2, '''b''': True} )
self.assertDictEqual(MockClass(a=2 , c=2.25 ).to_kwargs() , {'''a''': 2, '''c''': 2.25} )
@require_cuda
def __UpperCAmelCase ( self ):
# If no defaults are changed, `to_kwargs` returns an empty dict.
__a = GradScalerKwargs(init_scale=1_024 , growth_factor=2 )
AcceleratorState._reset_state()
__a = Accelerator(mixed_precision='''fp16''' , kwargs_handlers=[scaler_handler] )
print(accelerator.use_fpaa )
__a = accelerator.scaler
# Check the kwargs have been applied
self.assertEqual(scaler._init_scale , 1024.0 )
self.assertEqual(scaler._growth_factor , 2.0 )
# Check the other values are at the default
self.assertEqual(scaler._backoff_factor , 0.5 )
self.assertEqual(scaler._growth_interval , 2_000 )
self.assertEqual(scaler._enabled , _a )
@require_multi_gpu
def __UpperCAmelCase ( self ):
__a = ['''torchrun''', f'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__ )]
execute_subprocess_async(_a , env=os.environ.copy() )
if __name__ == "__main__":
lowercase_ = DistributedDataParallelKwargs(bucket_cap_mb=1_5, find_unused_parameters=True)
lowercase_ = Accelerator(kwargs_handlers=[ddp_scaler])
lowercase_ = torch.nn.Linear(1_0_0, 2_0_0)
lowercase_ = accelerator.prepare(model)
# Check the values changed in kwargs
lowercase_ = ""
lowercase_ = model.bucket_bytes_cap // (1_0_2_4 * 1_0_2_4)
if observed_bucket_cap_map != 1_5:
error_msg += F"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n"
if model.find_unused_parameters is not True:
error_msg += F"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n"
# Check the values of the defaults
if model.dim != 0:
error_msg += F"Default value not respected, should have `0` but found {model.dim}.\n"
if model.broadcast_buffers is not True:
error_msg += F"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n"
if model.gradient_as_bucket_view is not False:
error_msg += F"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n"
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 695 | 0 |
"""simple docstring"""
def snake_case ( A__ = 50 ):
UpperCAmelCase_ : Any = [1] * (length + 1)
for row_length in range(length + 1 ):
for tile_length in range(2 ,5 ):
for tile_start in range(row_length - tile_length + 1 ):
ways_number[row_length] += ways_number[
row_length - tile_start - tile_length
]
return ways_number[length]
if __name__ == "__main__":
print(f'{solution() = }')
| 95 |
"""simple docstring"""
import inspect
import os
import sys
import unittest
import accelerate
from accelerate.test_utils import execute_subprocess_async, require_tpu
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self ):
__a = inspect.getfile(accelerate.test_utils )
__a = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_script.py'''] )
__a = os.path.sep.join(inspect.getfile(self.__class__ ).split(os.path.sep )[:-1] )
@require_tpu
def __UpperCAmelCase ( self ):
__a = f'''
{self.test_dir}/xla_spawn.py
--num_cores 8
{self.test_file_path}
'''.split()
__a = [sys.executable] + distributed_args
execute_subprocess_async(_a , env=os.environ.copy() )
| 695 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
__lowerCamelCase = {
'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/config.json',
'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/config.json',
'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/config.json',
'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json',
'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/config.json',
'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/config.json',
'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/config.json',
'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json',
}
class __A ( SCREAMING_SNAKE_CASE_ ):
UpperCAmelCase__ = "albert"
def __init__( self : str , __snake_case : Optional[int]=3_0_0_0_0 , __snake_case : Tuple=1_2_8 , __snake_case : Union[str, Any]=4_0_9_6 , __snake_case : List[Any]=1_2 , __snake_case : Optional[int]=1 , __snake_case : str=6_4 , __snake_case : Optional[int]=1_6_3_8_4 , __snake_case : Optional[Any]=1 , __snake_case : Optional[int]="gelu_new" , __snake_case : Union[str, Any]=0 , __snake_case : Any=0 , __snake_case : Tuple=5_1_2 , __snake_case : Tuple=2 , __snake_case : Optional[int]=0.02 , __snake_case : Any=1E-12 , __snake_case : Any=0.1 , __snake_case : Optional[Any]="absolute" , __snake_case : List[Any]=0 , __snake_case : Tuple=2 , __snake_case : List[Any]=3 , **__snake_case : Optional[Any] , ) -> Any:
super().__init__(pad_token_id=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case , **__snake_case )
__magic_name__: Tuple = vocab_size
__magic_name__: int = embedding_size
__magic_name__: Union[str, Any] = hidden_size
__magic_name__: Tuple = num_hidden_layers
__magic_name__: List[str] = num_hidden_groups
__magic_name__: List[str] = num_attention_heads
__magic_name__: List[Any] = inner_group_num
__magic_name__: Tuple = hidden_act
__magic_name__: int = intermediate_size
__magic_name__: List[Any] = hidden_dropout_prob
__magic_name__: Union[str, Any] = attention_probs_dropout_prob
__magic_name__: List[str] = max_position_embeddings
__magic_name__: Any = type_vocab_size
__magic_name__: Optional[Any] = initializer_range
__magic_name__: int = layer_norm_eps
__magic_name__: Any = classifier_dropout_prob
__magic_name__: int = position_embedding_type
class __A ( SCREAMING_SNAKE_CASE_ ):
@property
def lowerCamelCase__ ( self : str ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
__magic_name__: str = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
__magic_name__: List[Any] = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
] )
| 96 |
"""simple docstring"""
import os
import unittest
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
BertTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : str = BertTokenizer
__UpperCAmelCase : Optional[Any] = BertTokenizerFast
__UpperCAmelCase : str = True
__UpperCAmelCase : Tuple = True
__UpperCAmelCase : Any = filter_non_english
def __UpperCAmelCase ( self ):
super().setUp()
__a = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
__a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def __UpperCAmelCase ( self , _a ):
__a = '''UNwant\u00E9d,running'''
__a = '''unwanted, running'''
return input_text, output_text
def __UpperCAmelCase ( self ):
__a = self.tokenizer_class(self.vocab_file )
__a = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(_a , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_a ) , [9, 6, 7, 12, 10, 11] )
def __UpperCAmelCase ( self ):
if not self.test_rust_tokenizer:
return
__a = self.get_tokenizer()
__a = self.get_rust_tokenizer()
__a = '''UNwant\u00E9d,running'''
__a = tokenizer.tokenize(_a )
__a = rust_tokenizer.tokenize(_a )
self.assertListEqual(_a , _a )
__a = tokenizer.encode(_a , add_special_tokens=_a )
__a = rust_tokenizer.encode(_a , add_special_tokens=_a )
self.assertListEqual(_a , _a )
__a = self.get_rust_tokenizer()
__a = tokenizer.encode(_a )
__a = rust_tokenizer.encode(_a )
self.assertListEqual(_a , _a )
# With lower casing
__a = self.get_tokenizer(do_lower_case=_a )
__a = self.get_rust_tokenizer(do_lower_case=_a )
__a = '''UNwant\u00E9d,running'''
__a = tokenizer.tokenize(_a )
__a = rust_tokenizer.tokenize(_a )
self.assertListEqual(_a , _a )
__a = tokenizer.encode(_a , add_special_tokens=_a )
__a = rust_tokenizer.encode(_a , add_special_tokens=_a )
self.assertListEqual(_a , _a )
__a = self.get_rust_tokenizer()
__a = tokenizer.encode(_a )
__a = rust_tokenizer.encode(_a )
self.assertListEqual(_a , _a )
def __UpperCAmelCase ( self ):
__a = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize('''ah\u535A\u63A8zz''' ) , ['''ah''', '''\u535A''', '''\u63A8''', '''zz'''] )
def __UpperCAmelCase ( self ):
__a = BasicTokenizer(do_lower_case=_a )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def __UpperCAmelCase ( self ):
__a = BasicTokenizer(do_lower_case=_a , strip_accents=_a )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hällo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''h\u00E9llo'''] )
def __UpperCAmelCase ( self ):
__a = BasicTokenizer(do_lower_case=_a , strip_accents=_a )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def __UpperCAmelCase ( self ):
__a = BasicTokenizer(do_lower_case=_a )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def __UpperCAmelCase ( self ):
__a = BasicTokenizer(do_lower_case=_a )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def __UpperCAmelCase ( self ):
__a = BasicTokenizer(do_lower_case=_a , strip_accents=_a )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HäLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def __UpperCAmelCase ( self ):
__a = BasicTokenizer(do_lower_case=_a , strip_accents=_a )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HaLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def __UpperCAmelCase ( self ):
__a = BasicTokenizer(do_lower_case=_a , never_split=['''[UNK]'''] )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? [UNK]''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?''', '''[UNK]'''] )
def __UpperCAmelCase ( self ):
__a = BasicTokenizer()
__a = '''a\n\'ll !!to?\'d of, can\'t.'''
__a = ['''a''', '''\'''', '''ll''', '''!''', '''!''', '''to''', '''?''', '''\'''', '''d''', '''of''', ''',''', '''can''', '''\'''', '''t''', '''.''']
self.assertListEqual(tokenizer.tokenize(_a ) , _a )
def __UpperCAmelCase ( self ):
__a = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''']
__a = {}
for i, token in enumerate(_a ):
__a = i
__a = WordpieceTokenizer(vocab=_a , unk_token='''[UNK]''' )
self.assertListEqual(tokenizer.tokenize('''''' ) , [] )
self.assertListEqual(tokenizer.tokenize('''unwanted running''' ) , ['''un''', '''##want''', '''##ed''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.tokenize('''unwantedX running''' ) , ['''[UNK]''', '''runn''', '''##ing'''] )
def __UpperCAmelCase ( self ):
self.assertTrue(_is_whitespace(''' ''' ) )
self.assertTrue(_is_whitespace('''\t''' ) )
self.assertTrue(_is_whitespace('''\r''' ) )
self.assertTrue(_is_whitespace('''\n''' ) )
self.assertTrue(_is_whitespace('''\u00A0''' ) )
self.assertFalse(_is_whitespace('''A''' ) )
self.assertFalse(_is_whitespace('''-''' ) )
def __UpperCAmelCase ( self ):
self.assertTrue(_is_control('''\u0005''' ) )
self.assertFalse(_is_control('''A''' ) )
self.assertFalse(_is_control(''' ''' ) )
self.assertFalse(_is_control('''\t''' ) )
self.assertFalse(_is_control('''\r''' ) )
def __UpperCAmelCase ( self ):
self.assertTrue(_is_punctuation('''-''' ) )
self.assertTrue(_is_punctuation('''$''' ) )
self.assertTrue(_is_punctuation('''`''' ) )
self.assertTrue(_is_punctuation('''.''' ) )
self.assertFalse(_is_punctuation('''A''' ) )
self.assertFalse(_is_punctuation(''' ''' ) )
def __UpperCAmelCase ( self ):
__a = self.get_tokenizer()
__a = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(_a ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] )
self.assertListEqual(
[rust_tokenizer.tokenize(_a ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] )
@slow
def __UpperCAmelCase ( self ):
__a = self.tokenizer_class.from_pretrained('''bert-base-uncased''' )
__a = tokenizer.encode('''sequence builders''' , add_special_tokens=_a )
__a = tokenizer.encode('''multi-sequence build''' , add_special_tokens=_a )
__a = tokenizer.build_inputs_with_special_tokens(_a )
__a = tokenizer.build_inputs_with_special_tokens(_a , _a )
assert encoded_sentence == [101] + text + [102]
assert encoded_pair == [101] + text + [102] + text_a + [102]
def __UpperCAmelCase ( self ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
__a = self.rust_tokenizer_class.from_pretrained(_a , **_a )
__a = f'''A, naïve {tokenizer_r.mask_token} AllenNLP sentence.'''
__a = tokenizer_r.encode_plus(
_a , return_attention_mask=_a , return_token_type_ids=_a , return_offsets_mapping=_a , add_special_tokens=_a , )
__a = tokenizer_r.do_lower_case if hasattr(_a , '''do_lower_case''' ) else False
__a = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), '''A'''),
((1, 2), ''','''),
((3, 5), '''na'''),
((5, 6), '''##ï'''),
((6, 8), '''##ve'''),
((9, 15), tokenizer_r.mask_token),
((16, 21), '''Allen'''),
((21, 23), '''##NL'''),
((23, 24), '''##P'''),
((25, 33), '''sentence'''),
((33, 34), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), '''a'''),
((1, 2), ''','''),
((3, 8), '''naive'''),
((9, 15), tokenizer_r.mask_token),
((16, 21), '''allen'''),
((21, 23), '''##nl'''),
((23, 24), '''##p'''),
((25, 33), '''sentence'''),
((33, 34), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['''input_ids'''] ) )
self.assertEqual([e[0] for e in expected_results] , tokens['''offset_mapping'''] )
def __UpperCAmelCase ( self ):
__a = ['''的''', '''人''', '''有''']
__a = ''''''.join(_a )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
__a = True
__a = self.tokenizer_class.from_pretrained(_a , **_a )
__a = self.rust_tokenizer_class.from_pretrained(_a , **_a )
__a = tokenizer_p.encode(_a , add_special_tokens=_a )
__a = tokenizer_r.encode(_a , add_special_tokens=_a )
__a = tokenizer_r.convert_ids_to_tokens(_a )
__a = tokenizer_p.convert_ids_to_tokens(_a )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(_a , _a )
self.assertListEqual(_a , _a )
__a = False
__a = self.rust_tokenizer_class.from_pretrained(_a , **_a )
__a = self.tokenizer_class.from_pretrained(_a , **_a )
__a = tokenizer_r.encode(_a , add_special_tokens=_a )
__a = tokenizer_p.encode(_a , add_special_tokens=_a )
__a = tokenizer_r.convert_ids_to_tokens(_a )
__a = tokenizer_p.convert_ids_to_tokens(_a )
# it is expected that only the first Chinese character is not preceded by "##".
__a = [
f'''##{token}''' if idx != 0 else token for idx, token in enumerate(_a )
]
self.assertListEqual(_a , _a )
self.assertListEqual(_a , _a )
| 695 | 0 |
import torch
from transformers import CamembertForMaskedLM, CamembertTokenizer
def a ( snake_case__: Optional[int] , snake_case__: Optional[Any] , snake_case__: int , snake_case__: Dict=5 ):
'''simple docstring'''
# Adapted from https://github.com/pytorch/fairseq/blob/master/fairseq/models/roberta/hub_interface.py
assert masked_input.count('''<mask>''' ) == 1
lowercase_ = torch.tensor(tokenizer.encode(snake_case__ , add_special_tokens=snake_case__ ) ).unsqueeze(0 ) # Batch size 1
lowercase_ = model(snake_case__ )[0] # The last hidden-state is the first element of the output tuple
lowercase_ = (input_ids.squeeze() == tokenizer.mask_token_id).nonzero().item()
lowercase_ = logits[0, masked_index, :]
lowercase_ = logits.softmax(dim=0 )
lowercase_ , lowercase_ = prob.topk(k=snake_case__ , dim=0 )
lowercase_ = ''' '''.join(
[tokenizer.convert_ids_to_tokens(indices[i].item() ) for i in range(len(snake_case__ ) )] )
lowercase_ = tokenizer.mask_token
lowercase_ = []
for index, predicted_token_bpe in enumerate(topk_predicted_token_bpe.split(''' ''' ) ):
lowercase_ = predicted_token_bpe.replace('''\u2581''' , ''' ''' )
if " {0}".format(snake_case__ ) in masked_input:
topk_filled_outputs.append(
(
masked_input.replace(''' {0}'''.format(snake_case__ ) , snake_case__ ),
values[index].item(),
predicted_token,
) )
else:
topk_filled_outputs.append(
(
masked_input.replace(snake_case__ , snake_case__ ),
values[index].item(),
predicted_token,
) )
return topk_filled_outputs
__a = CamembertTokenizer.from_pretrained('camembert-base')
__a = CamembertForMaskedLM.from_pretrained('camembert-base')
model.eval()
__a = 'Le camembert est <mask> :)'
print(fill_mask(masked_input, model, tokenizer, topk=3))
| 97 |
"""simple docstring"""
from __future__ import annotations
def lowercase ( lowerCAmelCase__ : float , lowerCAmelCase__ : float , lowerCAmelCase__ : float ) -> float:
if days_between_payments <= 0:
raise ValueError('''days_between_payments must be > 0''' )
if daily_interest_rate < 0:
raise ValueError('''daily_interest_rate must be >= 0''' )
if principal <= 0:
raise ValueError('''principal must be > 0''' )
return principal * daily_interest_rate * days_between_payments
def lowercase ( lowerCAmelCase__ : float , lowerCAmelCase__ : float , lowerCAmelCase__ : float , ) -> float:
if number_of_compounding_periods <= 0:
raise ValueError('''number_of_compounding_periods must be > 0''' )
if nominal_annual_interest_rate_percentage < 0:
raise ValueError('''nominal_annual_interest_rate_percentage must be >= 0''' )
if principal <= 0:
raise ValueError('''principal must be > 0''' )
return principal * (
(1 + nominal_annual_interest_rate_percentage) ** number_of_compounding_periods
- 1
)
def lowercase ( lowerCAmelCase__ : float , lowerCAmelCase__ : float , lowerCAmelCase__ : float , ) -> float:
if number_of_years <= 0:
raise ValueError('''number_of_years must be > 0''' )
if nominal_annual_percentage_rate < 0:
raise ValueError('''nominal_annual_percentage_rate must be >= 0''' )
if principal <= 0:
raise ValueError('''principal must be > 0''' )
return compound_interest(
lowerCAmelCase__ , nominal_annual_percentage_rate / 365 , number_of_years * 365 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 695 | 0 |
'''simple docstring'''
import argparse
import json
import os
import re
import shutil
import torch
from transformers import BioGptConfig, BioGptForCausalLM
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
lowercase__ : Dict = 2
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self : List[Any] , *, # begin keyword-only arguments
lowerCAmelCase__ : str="<s>" , lowerCAmelCase__ : Dict="<pad>" , lowerCAmelCase__ : str="</s>" , lowerCAmelCase__ : Any="<unk>" , lowerCAmelCase__ : Optional[Any]=None , ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = bos, unk, pad, eos
_UpperCamelCase = []
_UpperCamelCase = []
_UpperCamelCase = {}
_UpperCamelCase = self.add_symbol(lowerCAmelCase__ )
_UpperCamelCase = self.add_symbol(lowerCAmelCase__ )
_UpperCamelCase = self.add_symbol(lowerCAmelCase__ )
_UpperCamelCase = self.add_symbol(lowerCAmelCase__ )
if extra_special_symbols:
for s in extra_special_symbols:
self.add_symbol(lowerCAmelCase__ )
_UpperCamelCase = len(self.symbols )
def __eq__( self : Union[str, Any] , lowerCAmelCase__ : Dict ) -> Any:
'''simple docstring'''
return self.indices == other.indices
def __getitem__( self : List[Any] , lowerCAmelCase__ : int ) -> Tuple:
'''simple docstring'''
if idx < len(self.symbols ):
return self.symbols[idx]
return self.unk_word
def __len__( self : Any ) -> List[str]:
'''simple docstring'''
return len(self.symbols )
def __contains__( self : Optional[int] , lowerCAmelCase__ : List[Any] ) -> List[Any]:
'''simple docstring'''
return sym in self.indices
@classmethod
def snake_case__ ( cls : Any , lowerCAmelCase__ : Tuple ) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = cls()
d.add_from_file(lowerCAmelCase__ )
return d
def snake_case__ ( self : Optional[int] , lowerCAmelCase__ : int , lowerCAmelCase__ : int=1 , lowerCAmelCase__ : Union[str, Any]=False ) -> str:
'''simple docstring'''
if word in self.indices and not overwrite:
_UpperCamelCase = self.indices[word]
_UpperCamelCase = self.count[idx] + n
return idx
else:
_UpperCamelCase = len(self.symbols )
_UpperCamelCase = idx
self.symbols.append(lowerCAmelCase__ )
self.count.append(lowerCAmelCase__ )
return idx
def snake_case__ ( self : Dict , lowerCAmelCase__ : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
return 0
def snake_case__ ( self : List[str] , lowerCAmelCase__ : List[str] ) -> Union[str, Any]:
'''simple docstring'''
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
try:
with open(lowerCAmelCase__ , '''r''' , encoding='''utf-8''' ) as fd:
self.add_from_file(lowerCAmelCase__ )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception('''Incorrect encoding detected in {}, please rebuild the dataset'''.format(lowerCAmelCase__ ) )
return
_UpperCamelCase = f.readlines()
_UpperCamelCase = self._load_meta(lowerCAmelCase__ )
for line in lines[indices_start_line:]:
try:
_UpperCamelCase , _UpperCamelCase = line.rstrip().rsplit(''' ''' , 1 )
if field == "#fairseq:overwrite":
_UpperCamelCase = True
_UpperCamelCase , _UpperCamelCase = line.rsplit(''' ''' , 1 )
else:
_UpperCamelCase = False
_UpperCamelCase = int(lowerCAmelCase__ )
_UpperCamelCase = line
if word in self and not overwrite:
raise RuntimeError(
'''Duplicate word found when loading Dictionary: \'{}\'. '''
'''Duplicate words can overwrite earlier ones by adding the '''
'''#fairseq:overwrite flag at the end of the corresponding row '''
'''in the dictionary file. If using the Camembert model, please '''
'''download an updated copy of the model file.'''.format(lowerCAmelCase__ ) )
self.add_symbol(lowerCAmelCase__ , n=lowerCAmelCase__ , overwrite=lowerCAmelCase__ )
except ValueError:
raise ValueError('''Incorrect dictionary format, expected \'<token> <cnt> [flags]\'''' )
def a__ ( lowercase : Dict ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = dict((re.sub(r'''@@$''', '''''', lowercase ), v) if k.endswith('''@@''' ) else (re.sub(r'''$''', '''</w>''', lowercase ), v) for k, v in d.items() )
_UpperCamelCase = '''<s> <pad> </s> <unk>'''.split()
# restore the special tokens
for k in keep_keys:
del da[F"""{k}</w>"""]
_UpperCamelCase = d[k] # restore
return da
def a__ ( lowercase : Any, lowercase : int ) -> Any:
"""simple docstring"""
if not os.path.exists(lowercase ):
raise ValueError(F"""path {biogpt_checkpoint_path} does not exist!""" )
os.makedirs(lowercase, exist_ok=lowercase )
print(F"""Writing results to {pytorch_dump_folder_path}""" )
# handle various types of models
_UpperCamelCase = os.path.join(lowercase, '''checkpoint.pt''' )
if not os.path.isfile(lowercase ):
raise ValueError(F"""path to the file {checkpoint_file} does not exist!""" )
_UpperCamelCase = torch.load(lowercase, map_location='''cpu''' )
_UpperCamelCase = chkpt['''cfg''']['''model''']
# dicts
_UpperCamelCase = os.path.join(lowercase, '''dict.txt''' )
if not os.path.isfile(lowercase ):
raise ValueError(F"""path to the file {dict_file} does not exist!""" )
_UpperCamelCase = Dictionary.load(lowercase )
_UpperCamelCase = rewrite_dict_keys(src_dict.indices )
_UpperCamelCase = len(lowercase )
_UpperCamelCase = os.path.join(lowercase, VOCAB_FILES_NAMES['''vocab_file'''] )
print(F"""Generating {src_vocab_file} of {src_vocab_size} records""" )
with open(lowercase, '''w''', encoding='''utf-8''' ) as f:
f.write(json.dumps(lowercase, ensure_ascii=lowercase, indent=lowercase ) )
# merges_file (bpecodes)
_UpperCamelCase = os.path.join(lowercase, '''bpecodes''' )
if not os.path.isfile(lowercase ):
raise ValueError(F"""path to the file {bpecodes_file} does not exist!""" )
_UpperCamelCase = os.path.join(lowercase, VOCAB_FILES_NAMES['''merges_file'''] )
shutil.copyfile(lowercase, lowercase )
# model config
_UpperCamelCase = os.path.join(lowercase, '''config.json''' )
_UpperCamelCase = {
'''activation_dropout''': args['''activation_dropout'''],
'''architectures''': ['''BioGptForCausalLM'''],
'''attention_probs_dropout_prob''': args['''attention_dropout'''],
'''bos_token_id''': 0,
'''eos_token_id''': 2,
'''hidden_act''': args['''activation_fn'''],
'''hidden_dropout_prob''': args['''dropout'''],
'''hidden_size''': args['''decoder_embed_dim'''],
'''initializer_range''': 0.0_2,
'''intermediate_size''': args['''decoder_ffn_embed_dim'''],
'''layer_norm_eps''': 1e-12,
'''layerdrop''': args['''decoder_layerdrop'''],
'''max_position_embeddings''': args['''max_target_positions'''],
'''model_type''': '''biogpt''',
'''num_attention_heads''': args['''decoder_attention_heads'''],
'''num_hidden_layers''': args['''decoder_layers'''],
'''pad_token_id''': 1,
'''scale_embedding''': not args['''no_scale_embedding'''],
'''tie_word_embeddings''': args['''share_decoder_input_output_embed'''],
'''vocab_size''': src_vocab_size,
}
# good hparam defaults to start with
print(F"""Generating {biogpt_model_config_file}""" )
with open(lowercase, '''w''', encoding='''utf-8''' ) as f:
f.write(json.dumps(lowercase, ensure_ascii=lowercase, indent=lowercase ) )
# tokenizer config
_UpperCamelCase = os.path.join(lowercase, lowercase )
_UpperCamelCase = {
'''bos_token''': '''<s>''',
'''eos_token''': '''</s>''',
'''model_max_length''': 1024,
'''pad_token''': '''<pad>''',
'''special_tokens_map_file''': None,
'''tokenizer_class''': '''BioGptTokenizer''',
'''unk_token''': '''<unk>''',
}
print(F"""Generating {biogpt_tokenizer_config_file}""" )
with open(lowercase, '''w''', encoding='''utf-8''' ) as f:
f.write(json.dumps(lowercase, ensure_ascii=lowercase, indent=lowercase ) )
# model
_UpperCamelCase = chkpt['''model''']
# remove unneeded keys
_UpperCamelCase = [
'''decoder.version''',
]
for k in ignore_keys:
model_state_dict.pop(lowercase, lowercase )
_UpperCamelCase = list(model_state_dict.keys() )
for layer_name in layer_names:
if layer_name.endswith('''output_projection.weight''' ):
_UpperCamelCase = model_state_dict.pop(lowercase )
else:
_UpperCamelCase = model_state_dict.pop(lowercase )
_UpperCamelCase = BioGptConfig.from_pretrained(lowercase )
_UpperCamelCase = BioGptForCausalLM(lowercase )
# check that it loads ok
model_new.load_state_dict(lowercase )
# save
_UpperCamelCase = os.path.join(lowercase, lowercase )
print(F"""Generating {pytorch_weights_dump_path}""" )
torch.save(lowercase, lowercase )
print('''Conversion is done!''' )
if __name__ == "__main__":
lowercase__ : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--biogpt_checkpoint_path',
default=None,
type=str,
required=True,
help=(
'Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,'
' bpecodes, etc.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
lowercase__ : List[str] = parser.parse_args()
convert_biogpt_checkpoint_to_pytorch(args.biogpt_checkpoint_path, args.pytorch_dump_folder_path)
| 98 |
"""simple docstring"""
def lowercase ( lowerCAmelCase__ : Any , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Any=False ) -> Any:
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
__a = len(set_a.intersection(lowerCAmelCase__ ) )
if alternative_union:
__a = len(lowerCAmelCase__ ) + len(lowerCAmelCase__ )
else:
__a = len(set_a.union(lowerCAmelCase__ ) )
return intersection / union
if isinstance(lowerCAmelCase__ , (list, tuple) ) and isinstance(lowerCAmelCase__ , (list, tuple) ):
__a = [element for element in set_a if element in set_b]
if alternative_union:
__a = len(lowerCAmelCase__ ) + len(lowerCAmelCase__ )
return len(lowerCAmelCase__ ) / union
else:
__a = set_a + [element for element in set_b if element not in set_a]
return len(lowerCAmelCase__ ) / len(lowerCAmelCase__ )
return len(lowerCAmelCase__ ) / len(lowerCAmelCase__ )
return None
if __name__ == "__main__":
lowercase_ = {"a", "b", "c", "d", "e"}
lowercase_ = {"c", "d", "e", "f", "h", "i"}
print(jaccard_similarity(set_a, set_b))
| 695 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
SCREAMING_SNAKE_CASE = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = ['MLukeTokenizer']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mluke import MLukeTokenizer
else:
import sys
SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 99 |
"""simple docstring"""
from __future__ import annotations
import requests
def lowercase ( lowerCAmelCase__ : str ) -> dict:
__a = f'''https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty'''
return requests.get(lowerCAmelCase__ ).json()
def lowercase ( lowerCAmelCase__ : int = 10 ) -> list[dict]:
__a = '''https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty'''
__a = requests.get(lowerCAmelCase__ ).json()[:max_stories]
return [get_hackernews_story(lowerCAmelCase__ ) for story_id in story_ids]
def lowercase ( lowerCAmelCase__ : int = 10 ) -> str:
__a = hackernews_top_stories(lowerCAmelCase__ )
return "\n".join('''* [{title}]({url})'''.format(**lowerCAmelCase__ ) for story in stories )
if __name__ == "__main__":
print(hackernews_top_stories_as_markdown())
| 695 | 0 |
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_gpta import GPTaTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_A : List[Any] = logging.get_logger(__name__)
_A : str = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
_A : int = {
"""vocab_file""": {
"""gpt2""": """https://huggingface.co/gpt2/resolve/main/vocab.json""",
"""gpt2-medium""": """https://huggingface.co/gpt2-medium/resolve/main/vocab.json""",
"""gpt2-large""": """https://huggingface.co/gpt2-large/resolve/main/vocab.json""",
"""gpt2-xl""": """https://huggingface.co/gpt2-xl/resolve/main/vocab.json""",
"""distilgpt2""": """https://huggingface.co/distilgpt2/resolve/main/vocab.json""",
},
"""merges_file""": {
"""gpt2""": """https://huggingface.co/gpt2/resolve/main/merges.txt""",
"""gpt2-medium""": """https://huggingface.co/gpt2-medium/resolve/main/merges.txt""",
"""gpt2-large""": """https://huggingface.co/gpt2-large/resolve/main/merges.txt""",
"""gpt2-xl""": """https://huggingface.co/gpt2-xl/resolve/main/merges.txt""",
"""distilgpt2""": """https://huggingface.co/distilgpt2/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""gpt2""": """https://huggingface.co/gpt2/resolve/main/tokenizer.json""",
"""gpt2-medium""": """https://huggingface.co/gpt2-medium/resolve/main/tokenizer.json""",
"""gpt2-large""": """https://huggingface.co/gpt2-large/resolve/main/tokenizer.json""",
"""gpt2-xl""": """https://huggingface.co/gpt2-xl/resolve/main/tokenizer.json""",
"""distilgpt2""": """https://huggingface.co/distilgpt2/resolve/main/tokenizer.json""",
},
}
_A : Optional[Any] = {
"""gpt2""": 10_24,
"""gpt2-medium""": 10_24,
"""gpt2-large""": 10_24,
"""gpt2-xl""": 10_24,
"""distilgpt2""": 10_24,
}
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = VOCAB_FILES_NAMES
lowerCamelCase__ : Any = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ : Union[str, Any] = ["""input_ids""", """attention_mask"""]
lowerCamelCase__ : Tuple = GPTaTokenizer
def __init__( self , A_=None , A_=None , A_=None , A_="<|endoftext|>" , A_="<|endoftext|>" , A_="<|endoftext|>" , A_=False , **A_ , ):
'''simple docstring'''
super().__init__(
A_ , A_ , tokenizer_file=A_ , unk_token=A_ , bos_token=A_ , eos_token=A_ , add_prefix_space=A_ , **A_ , )
SCREAMING_SNAKE_CASE__ = kwargs.pop('''add_bos_token''' , A_ )
SCREAMING_SNAKE_CASE__ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , A_ ) != add_prefix_space:
SCREAMING_SNAKE_CASE__ = getattr(A_ , pre_tok_state.pop('''type''' ) )
SCREAMING_SNAKE_CASE__ = add_prefix_space
SCREAMING_SNAKE_CASE__ = pre_tok_class(**A_ )
SCREAMING_SNAKE_CASE__ = add_prefix_space
def lowercase_ ( self , *A_ , **A_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = kwargs.get('''is_split_into_words''' , A_ )
assert self.add_prefix_space or not is_split_into_words, (
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*A_ , **A_ )
def lowercase_ ( self , *A_ , **A_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = kwargs.get('''is_split_into_words''' , A_ )
assert self.add_prefix_space or not is_split_into_words, (
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._encode_plus(*A_ , **A_ )
def lowercase_ ( self , A_ , A_ = None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = self._tokenizer.model.save(A_ , name=A_ )
return tuple(A_ )
def lowercase_ ( self , A_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(A_ , add_special_tokens=A_ ) + [self.eos_token_id] )
if len(A_ ) > self.model_max_length:
SCREAMING_SNAKE_CASE__ = input_ids[-self.model_max_length :]
return input_ids
| 100 |
"""simple docstring"""
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"salesforce/blip2-opt-2.7b": "https://huggingface.co/salesforce/blip2-opt-2.7b/resolve/main/config.json",
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = 'blip_2_vision_model'
def __init__( self , _a=1_408 , _a=6_144 , _a=39 , _a=16 , _a=224 , _a=14 , _a="gelu" , _a=0.0_0001 , _a=0.0 , _a=1E-10 , _a=True , **_a , ):
super().__init__(**_a )
__a = hidden_size
__a = intermediate_size
__a = num_hidden_layers
__a = num_attention_heads
__a = patch_size
__a = image_size
__a = initializer_range
__a = attention_dropout
__a = layer_norm_eps
__a = hidden_act
__a = qkv_bias
@classmethod
def __UpperCAmelCase ( cls , _a , **_a ):
cls._set_token_in_kwargs(_a )
__a , __a = cls.get_config_dict(_a , **_a )
# get the vision config dict if we are loading from Blip2Config
if config_dict.get('''model_type''' ) == "blip-2":
__a = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(_a , **_a )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : str = 'blip_2_qformer'
def __init__( self , _a=30_522 , _a=768 , _a=12 , _a=12 , _a=3_072 , _a="gelu" , _a=0.1 , _a=0.1 , _a=512 , _a=0.02 , _a=1E-12 , _a=0 , _a="absolute" , _a=2 , _a=1_408 , **_a , ):
super().__init__(pad_token_id=_a , **_a )
__a = vocab_size
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = hidden_act
__a = intermediate_size
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = max_position_embeddings
__a = initializer_range
__a = layer_norm_eps
__a = position_embedding_type
__a = cross_attention_frequency
__a = encoder_hidden_size
@classmethod
def __UpperCAmelCase ( cls , _a , **_a ):
cls._set_token_in_kwargs(_a )
__a , __a = cls.get_config_dict(_a , **_a )
# get the qformer config dict if we are loading from Blip2Config
if config_dict.get('''model_type''' ) == "blip-2":
__a = config_dict['''qformer_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(_a , **_a )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : Any = 'blip-2'
__UpperCAmelCase : List[str] = True
def __init__( self , _a=None , _a=None , _a=None , _a=32 , **_a ):
super().__init__(**_a )
if vision_config is None:
__a = {}
logger.info('''vision_config is None. initializing the Blip2VisionConfig with default values.''' )
if qformer_config is None:
__a = {}
logger.info('''qformer_config is None. Initializing the Blip2QFormerConfig with default values.''' )
if text_config is None:
__a = {}
logger.info('''text_config is None. Initializing the text config with default values (`OPTConfig`).''' )
__a = BlipaVisionConfig(**_a )
__a = BlipaQFormerConfig(**_a )
__a = text_config['''model_type'''] if '''model_type''' in text_config else '''opt'''
__a = CONFIG_MAPPING[text_model_type](**_a )
__a = self.text_config.tie_word_embeddings
__a = self.text_config.is_encoder_decoder
__a = num_query_tokens
__a = self.vision_config.hidden_size
__a = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
__a = 1.0
__a = 0.02
@classmethod
def __UpperCAmelCase ( cls , _a , _a , _a , **_a , ):
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **_a , )
def __UpperCAmelCase ( self ):
__a = copy.deepcopy(self.__dict__ )
__a = self.vision_config.to_dict()
__a = self.qformer_config.to_dict()
__a = self.text_config.to_dict()
__a = self.__class__.model_type
return output
| 695 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ : Any =logging.get_logger(__name__)
lowerCAmelCase__ : Union[str, Any] ={
'caidas/swin2sr-classicalsr-x2-64': (
'https://huggingface.co/caidas/swin2sr-classicalsr-x2-64/resolve/main/config.json'
),
}
class __lowercase (__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_UpperCAmelCase = """swin2sr"""
_UpperCAmelCase = {
"""hidden_size""": """embed_dim""",
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self , lowerCAmelCase__=6_4 , lowerCAmelCase__=1 , lowerCAmelCase__=3 , lowerCAmelCase__=1_8_0 , lowerCAmelCase__=[6, 6, 6, 6, 6, 6] , lowerCAmelCase__=[6, 6, 6, 6, 6, 6] , lowerCAmelCase__=8 , lowerCAmelCase__=2.0 , lowerCAmelCase__=True , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.1 , lowerCAmelCase__="gelu" , lowerCAmelCase__=False , lowerCAmelCase__=0.02 , lowerCAmelCase__=1E-5 , lowerCAmelCase__=2 , lowerCAmelCase__=1.0 , lowerCAmelCase__="1conv" , lowerCAmelCase__="pixelshuffle" , **lowerCAmelCase__ , ):
"""simple docstring"""
super().__init__(**lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = image_size
SCREAMING_SNAKE_CASE_ : List[str] = patch_size
SCREAMING_SNAKE_CASE_ : List[Any] = num_channels
SCREAMING_SNAKE_CASE_ : Optional[int] = embed_dim
SCREAMING_SNAKE_CASE_ : Optional[int] = depths
SCREAMING_SNAKE_CASE_ : Any = len(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = num_heads
SCREAMING_SNAKE_CASE_ : Any = window_size
SCREAMING_SNAKE_CASE_ : str = mlp_ratio
SCREAMING_SNAKE_CASE_ : Any = qkv_bias
SCREAMING_SNAKE_CASE_ : List[Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ : Dict = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : Any = drop_path_rate
SCREAMING_SNAKE_CASE_ : str = hidden_act
SCREAMING_SNAKE_CASE_ : List[str] = use_absolute_embeddings
SCREAMING_SNAKE_CASE_ : Optional[Any] = layer_norm_eps
SCREAMING_SNAKE_CASE_ : Union[str, Any] = initializer_range
SCREAMING_SNAKE_CASE_ : List[Any] = upscale
SCREAMING_SNAKE_CASE_ : int = img_range
SCREAMING_SNAKE_CASE_ : Optional[int] = resi_connection
SCREAMING_SNAKE_CASE_ : List[Any] = upsampler
| 101 |
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"microsoft/deberta-v2-xlarge": "https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json",
"microsoft/deberta-v2-xxlarge": "https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json",
"microsoft/deberta-v2-xlarge-mnli": (
"https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json"
),
"microsoft/deberta-v2-xxlarge-mnli": (
"https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json"
),
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : Dict = 'deberta-v2'
def __init__( self , _a=128_100 , _a=1_536 , _a=24 , _a=24 , _a=6_144 , _a="gelu" , _a=0.1 , _a=0.1 , _a=512 , _a=0 , _a=0.02 , _a=1E-7 , _a=False , _a=-1 , _a=0 , _a=True , _a=None , _a=0 , _a="gelu" , **_a , ):
super().__init__(**_a )
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = intermediate_size
__a = hidden_act
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = max_position_embeddings
__a = type_vocab_size
__a = initializer_range
__a = relative_attention
__a = max_relative_positions
__a = pad_token_id
__a = position_biased_input
# Backwards compatibility
if type(_a ) == str:
__a = [x.strip() for x in pos_att_type.lower().split('''|''' )]
__a = pos_att_type
__a = vocab_size
__a = layer_norm_eps
__a = kwargs.get('''pooler_hidden_size''' , _a )
__a = pooler_dropout
__a = pooler_hidden_act
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@property
def __UpperCAmelCase ( self ):
if self.task == "multiple-choice":
__a = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
__a = {0: '''batch''', 1: '''sequence'''}
if self._config.type_vocab_size > 0:
return OrderedDict(
[('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ('''token_type_ids''', dynamic_axis)] )
else:
return OrderedDict([('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis)] )
@property
def __UpperCAmelCase ( self ):
return 12
def __UpperCAmelCase ( self , _a , _a = -1 , _a = -1 , _a = -1 , _a = False , _a = None , _a = 3 , _a = 40 , _a = 40 , _a = None , ):
__a = super().generate_dummy_inputs(preprocessor=_a , framework=_a )
if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs:
del dummy_inputs["token_type_ids"]
return dummy_inputs
| 695 | 0 |
"""simple docstring"""
from ...utils import is_torch_available, is_transformers_available
if is_transformers_available() and is_torch_available():
from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
| 102 |
"""simple docstring"""
import importlib.metadata
import operator
import re
import sys
from typing import Optional
from packaging import version
lowercase_ = {
"<": operator.lt,
"<=": operator.le,
"==": operator.eq,
"!=": operator.ne,
">=": operator.ge,
">": operator.gt,
}
def lowercase ( lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : int , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Optional[Any] ) -> Dict:
if got_ver is None or want_ver is None:
raise ValueError(
f'''Unable to compare versions for {requirement}: need={want_ver} found={got_ver}. This is unusual. Consider'''
f''' reinstalling {pkg}.''' )
if not ops[op](version.parse(lowerCAmelCase__ ) , version.parse(lowerCAmelCase__ ) ):
raise ImportError(
f'''{requirement} is required for a normal functioning of this module, but found {pkg}=={got_ver}.{hint}''' )
def lowercase ( lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[str] = None ) -> None:
__a = f'''\n{hint}''' if hint is not None else ''''''
# non-versioned check
if re.match(r'''^[\w_\-\d]+$''' , lowerCAmelCase__ ):
__a , __a , __a = requirement, None, None
else:
__a = re.findall(r'''^([^!=<>\s]+)([\s!=<>]{1,2}.+)''' , lowerCAmelCase__ )
if not match:
raise ValueError(
'''requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23, but'''
f''' got {requirement}''' )
__a , __a = match[0]
__a = want_full.split(''',''' ) # there could be multiple requirements
__a = {}
for w in want_range:
__a = re.findall(r'''^([\s!=<>]{1,2})(.+)''' , lowerCAmelCase__ )
if not match:
raise ValueError(
'''requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23,'''
f''' but got {requirement}''' )
__a , __a = match[0]
__a = want_ver
if op not in ops:
raise ValueError(f'''{requirement}: need one of {list(ops.keys() )}, but got {op}''' )
# special case
if pkg == "python":
__a = '''.'''.join([str(lowerCAmelCase__ ) for x in sys.version_info[:3]] )
for op, want_ver in wanted.items():
_compare_versions(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
return
# check if any version is installed
try:
__a = importlib.metadata.version(lowerCAmelCase__ )
except importlib.metadata.PackageNotFoundError:
raise importlib.metadata.PackageNotFoundError(
f'''The \'{requirement}\' distribution was not found and is required by this application. {hint}''' )
# check that the right version is installed if version number or a range was provided
if want_ver is not None:
for op, want_ver in wanted.items():
_compare_versions(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def lowercase ( lowerCAmelCase__ : Tuple ) -> Optional[Any]:
__a = '''Try: pip install transformers -U or pip install -e \'.[dev]\' if you\'re working with git main'''
return require_version(lowerCAmelCase__ , lowerCAmelCase__ )
| 695 | 0 |
"""simple docstring"""
import os
import tempfile
import unittest
from transformers import FlaubertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FlaubertForMultipleChoice,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertModel,
FlaubertWithLMHeadModel,
)
from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
def __init__( self : Optional[int] , __lowerCamelCase : Tuple , __lowerCamelCase : Union[str, Any]=1_3 , __lowerCamelCase : Optional[Any]=7 , __lowerCamelCase : List[str]=True , __lowerCamelCase : Dict=True , __lowerCamelCase : List[str]=True , __lowerCamelCase : Union[str, Any]=True , __lowerCamelCase : Optional[Any]=True , __lowerCamelCase : List[Any]=False , __lowerCamelCase : Optional[int]=False , __lowerCamelCase : int=False , __lowerCamelCase : Dict=2 , __lowerCamelCase : Tuple=9_9 , __lowerCamelCase : Optional[int]=0 , __lowerCamelCase : Tuple=3_2 , __lowerCamelCase : Tuple=5 , __lowerCamelCase : Tuple=4 , __lowerCamelCase : Any=0.1 , __lowerCamelCase : int=0.1 , __lowerCamelCase : Any=5_1_2 , __lowerCamelCase : str=1_2 , __lowerCamelCase : Any=2 , __lowerCamelCase : Any=0.0_2 , __lowerCamelCase : Optional[int]=3 , __lowerCamelCase : List[str]=4 , __lowerCamelCase : Optional[int]="last" , __lowerCamelCase : str=None , __lowerCamelCase : int=None , ):
"""simple docstring"""
_snake_case = parent
_snake_case = batch_size
_snake_case = seq_length
_snake_case = is_training
_snake_case = use_input_lengths
_snake_case = use_token_type_ids
_snake_case = use_labels
_snake_case = gelu_activation
_snake_case = sinusoidal_embeddings
_snake_case = causal
_snake_case = asm
_snake_case = n_langs
_snake_case = vocab_size
_snake_case = n_special
_snake_case = hidden_size
_snake_case = num_hidden_layers
_snake_case = num_attention_heads
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = max_position_embeddings
_snake_case = type_vocab_size
_snake_case = type_sequence_label_size
_snake_case = initializer_range
_snake_case = num_labels
_snake_case = num_choices
_snake_case = summary_type
_snake_case = use_proj
_snake_case = scope
def __UpperCAmelCase ( self : int ):
"""simple docstring"""
_snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_snake_case = random_attention_mask([self.batch_size, self.seq_length] )
_snake_case = None
if self.use_input_lengths:
_snake_case = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
_snake_case = None
if self.use_token_type_ids:
_snake_case = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
_snake_case = None
_snake_case = None
_snake_case = None
if self.use_labels:
_snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_snake_case = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_snake_case = ids_tensor([self.batch_size] , 2 ).float()
_snake_case = ids_tensor([self.batch_size] , self.num_choices )
_snake_case = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def __UpperCAmelCase ( self : Dict ):
"""simple docstring"""
return FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , )
def __UpperCAmelCase ( self : Optional[int] , __lowerCamelCase : Tuple , __lowerCamelCase : Optional[int] , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Tuple , __lowerCamelCase : str , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : str , __lowerCamelCase : List[str] , ):
"""simple docstring"""
_snake_case = FlaubertModel(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
_snake_case = model(__lowerCamelCase , lengths=__lowerCamelCase , langs=__lowerCamelCase )
_snake_case = model(__lowerCamelCase , langs=__lowerCamelCase )
_snake_case = model(__lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCAmelCase ( self : Any , __lowerCamelCase : List[Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Any , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Tuple , __lowerCamelCase : Dict , __lowerCamelCase : Optional[int] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[Any] , ):
"""simple docstring"""
_snake_case = FlaubertWithLMHeadModel(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
_snake_case = model(__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCAmelCase ( self : List[str] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Dict , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Tuple , __lowerCamelCase : Dict , __lowerCamelCase : int , __lowerCamelCase : Any , __lowerCamelCase : Optional[int] , __lowerCamelCase : Dict , ):
"""simple docstring"""
_snake_case = FlaubertForQuestionAnsweringSimple(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
_snake_case = model(__lowerCamelCase )
_snake_case = model(__lowerCamelCase , start_positions=__lowerCamelCase , end_positions=__lowerCamelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __UpperCAmelCase ( self : Optional[Any] , __lowerCamelCase : str , __lowerCamelCase : Any , __lowerCamelCase : Tuple , __lowerCamelCase : str , __lowerCamelCase : Any , __lowerCamelCase : str , __lowerCamelCase : List[Any] , __lowerCamelCase : List[str] , __lowerCamelCase : Optional[int] , ):
"""simple docstring"""
_snake_case = FlaubertForQuestionAnswering(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
_snake_case = model(__lowerCamelCase )
_snake_case = model(
__lowerCamelCase , start_positions=__lowerCamelCase , end_positions=__lowerCamelCase , cls_index=__lowerCamelCase , is_impossible=__lowerCamelCase , p_mask=__lowerCamelCase , )
_snake_case = model(
__lowerCamelCase , start_positions=__lowerCamelCase , end_positions=__lowerCamelCase , cls_index=__lowerCamelCase , is_impossible=__lowerCamelCase , )
((_snake_case) , ) = result_with_labels.to_tuple()
_snake_case = model(__lowerCamelCase , start_positions=__lowerCamelCase , end_positions=__lowerCamelCase )
((_snake_case) , ) = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def __UpperCAmelCase ( self : Dict , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Tuple , __lowerCamelCase : List[Any] , __lowerCamelCase : Tuple , __lowerCamelCase : Optional[int] , __lowerCamelCase : List[str] , __lowerCamelCase : Dict , __lowerCamelCase : List[str] , __lowerCamelCase : str , ):
"""simple docstring"""
_snake_case = FlaubertForSequenceClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
_snake_case = model(__lowerCamelCase )
_snake_case = model(__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __UpperCAmelCase ( self : Any , __lowerCamelCase : Any , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[str] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[str] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Dict , __lowerCamelCase : Any , __lowerCamelCase : List[Any] , ):
"""simple docstring"""
_snake_case = self.num_labels
_snake_case = FlaubertForTokenClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
_snake_case = model(__lowerCamelCase , attention_mask=__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __UpperCAmelCase ( self : Optional[Any] , __lowerCamelCase : Any , __lowerCamelCase : int , __lowerCamelCase : List[str] , __lowerCamelCase : List[Any] , __lowerCamelCase : List[str] , __lowerCamelCase : List[str] , __lowerCamelCase : List[Any] , __lowerCamelCase : Tuple , __lowerCamelCase : int , ):
"""simple docstring"""
_snake_case = self.num_choices
_snake_case = FlaubertForMultipleChoice(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
_snake_case = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_snake_case = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_snake_case = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_snake_case = model(
__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __UpperCAmelCase ( self : Dict ):
"""simple docstring"""
_snake_case = self.prepare_config_and_inputs()
(
(
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) ,
) = config_and_inputs
_snake_case = {
'''input_ids''': input_ids,
'''token_type_ids''': token_type_ids,
'''lengths''': input_lengths,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_torch
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,unittest.TestCase ):
A__ : Optional[int] = (
(
FlaubertModel,
FlaubertWithLMHeadModel,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertForMultipleChoice,
)
if is_torch_available()
else ()
)
A__ : Any = (
{
'''feature-extraction''': FlaubertModel,
'''fill-mask''': FlaubertWithLMHeadModel,
'''question-answering''': FlaubertForQuestionAnsweringSimple,
'''text-classification''': FlaubertForSequenceClassification,
'''token-classification''': FlaubertForTokenClassification,
'''zero-shot''': FlaubertForSequenceClassification,
}
if is_torch_available()
else {}
)
def __UpperCAmelCase ( self : int , __lowerCamelCase : Tuple , __lowerCamelCase : int , __lowerCamelCase : List[Any] , __lowerCamelCase : str , __lowerCamelCase : Optional[int] ):
"""simple docstring"""
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('''Fast''' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def __UpperCAmelCase ( self : Union[str, Any] , __lowerCamelCase : Tuple , __lowerCamelCase : Any , __lowerCamelCase : Union[str, Any]=False ):
"""simple docstring"""
_snake_case = super()._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase )
if return_labels:
if model_class.__name__ == "FlaubertForQuestionAnswering":
_snake_case = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__lowerCamelCase )
_snake_case = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__lowerCamelCase )
return inputs_dict
def __UpperCAmelCase ( self : List[str] ):
"""simple docstring"""
_snake_case = FlaubertModelTester(self )
_snake_case = ConfigTester(self , config_class=__lowerCamelCase , emb_dim=3_7 )
def __UpperCAmelCase ( self : Optional[int] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def __UpperCAmelCase ( self : Any ):
"""simple docstring"""
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*__lowerCamelCase )
def __UpperCAmelCase ( self : Union[str, Any] ):
"""simple docstring"""
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*__lowerCamelCase )
def __UpperCAmelCase ( self : Optional[int] ):
"""simple docstring"""
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_simple_qa(*__lowerCamelCase )
def __UpperCAmelCase ( self : Any ):
"""simple docstring"""
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*__lowerCamelCase )
def __UpperCAmelCase ( self : str ):
"""simple docstring"""
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*__lowerCamelCase )
def __UpperCAmelCase ( self : Union[str, Any] ):
"""simple docstring"""
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_token_classif(*__lowerCamelCase )
def __UpperCAmelCase ( self : int ):
"""simple docstring"""
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_multiple_choice(*__lowerCamelCase )
@slow
def __UpperCAmelCase ( self : int ):
"""simple docstring"""
for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case = FlaubertModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
@slow
@require_torch_gpu
def __UpperCAmelCase ( self : List[Any] ):
"""simple docstring"""
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# FlauBertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == FlaubertForMultipleChoice:
return
_snake_case = True
_snake_case = model_class(config=__lowerCamelCase )
_snake_case = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase )
_snake_case = torch.jit.trace(
__lowerCamelCase , (inputs_dict['''input_ids'''].to('''cpu''' ), inputs_dict['''attention_mask'''].to('''cpu''' )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(__lowerCamelCase , os.path.join(__lowerCamelCase , '''traced_model.pt''' ) )
_snake_case = torch.jit.load(os.path.join(__lowerCamelCase , '''traced_model.pt''' ) , map_location=__lowerCamelCase )
loaded(inputs_dict['''input_ids'''].to(__lowerCamelCase ) , inputs_dict['''attention_mask'''].to(__lowerCamelCase ) )
@require_torch
class UpperCAmelCase ( unittest.TestCase ):
@slow
def __UpperCAmelCase ( self : Any ):
"""simple docstring"""
_snake_case = FlaubertModel.from_pretrained('''flaubert/flaubert_base_cased''' )
_snake_case = torch.tensor([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
with torch.no_grad():
_snake_case = model(__lowerCamelCase )[0]
_snake_case = torch.Size((1, 1_1, 7_6_8) )
self.assertEqual(output.shape , __lowerCamelCase )
_snake_case = torch.tensor(
[[[-2.6_2_5_1, -1.4_2_9_8, -0.0_2_2_7], [-2.8_5_1_0, -1.6_3_8_7, 0.2_2_5_8], [-2.8_1_1_4, -1.1_8_3_2, -0.3_0_6_6]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __lowerCamelCase , atol=1E-4 ) )
| 103 |
"""simple docstring"""
from __future__ import annotations
lowercase_ = list[tuple[int, int]]
lowercase_ = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
lowercase_ = ([-1, 0], [0, -1], [1, 0], [0, 1]) # up, left, down, right
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self , _a , _a , _a , _a , _a , _a , ):
__a = pos_x
__a = pos_y
__a = (pos_y, pos_x)
__a = goal_x
__a = goal_y
__a = g_cost
__a = parent
__a = self.calculate_heuristic()
def __UpperCAmelCase ( self ):
__a = abs(self.pos_x - self.goal_x )
__a = abs(self.pos_y - self.goal_y )
return dx + dy
def __lt__( self , _a ):
return self.f_cost < other.f_cost
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self , _a , _a ):
__a = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , _a )
__a = Node(goal[1] , goal[0] , goal[1] , goal[0] , 99_999 , _a )
__a = [self.start]
__a = []
__a = False
def __UpperCAmelCase ( self ):
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
__a = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
__a = True
return self.retrace_path(_a )
self.closed_nodes.append(_a )
__a = self.get_successors(_a )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(_a )
else:
# retrieve the best current path
__a = self.open_nodes.pop(self.open_nodes.index(_a ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(_a )
else:
self.open_nodes.append(_a )
if not self.reached:
return [self.start.pos]
return None
def __UpperCAmelCase ( self , _a ):
__a = []
for action in delta:
__a = parent.pos_x + action[1]
__a = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(_a ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
_a , _a , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , _a , ) )
return successors
def __UpperCAmelCase ( self , _a ):
__a = node
__a = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
__a = current_node.parent
path.reverse()
return path
if __name__ == "__main__":
lowercase_ = (0, 0)
lowercase_ = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
print("------")
lowercase_ = GreedyBestFirst(init, goal)
lowercase_ = greedy_bf.search()
if path:
for pos_x, pos_y in path:
lowercase_ = 2
for elem in grid:
print(elem)
| 695 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCamelCase = {
"""configuration_mobilenet_v2""": [
"""MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""MobileNetV2Config""",
"""MobileNetV2OnnxConfig""",
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ["""MobileNetV2FeatureExtractor"""]
UpperCamelCase = ["""MobileNetV2ImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"""MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MobileNetV2ForImageClassification""",
"""MobileNetV2ForSemanticSegmentation""",
"""MobileNetV2Model""",
"""MobileNetV2PreTrainedModel""",
"""load_tf_weights_in_mobilenet_v2""",
]
if TYPE_CHECKING:
from .configuration_mobilenet_va import (
MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileNetVaConfig,
MobileNetVaOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilenet_va import MobileNetVaFeatureExtractor
from .image_processing_mobilenet_va import MobileNetVaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilenet_va import (
MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileNetVaForImageClassification,
MobileNetVaForSemanticSegmentation,
MobileNetVaModel,
MobileNetVaPreTrainedModel,
load_tf_weights_in_mobilenet_va,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 104 |
"""simple docstring"""
import argparse
import torch
from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert
from transformers.utils import logging
logging.set_verbosity_info()
def lowercase ( lowerCAmelCase__ : Any , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : str ) -> List[Any]:
# Initialise PyTorch model
__a = RemBertConfig.from_json_file(lowerCAmelCase__ )
print('''Building PyTorch model from configuration: {}'''.format(str(lowerCAmelCase__ ) ) )
__a = RemBertModel(lowerCAmelCase__ )
# Load weights from tf checkpoint
load_tf_weights_in_rembert(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# Save pytorch-model
print('''Save PyTorch model to {}'''.format(lowerCAmelCase__ ) )
torch.save(model.state_dict() , lowerCAmelCase__ )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--rembert_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained RemBERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
lowercase_ = parser.parse_args()
convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path)
| 695 | 0 |
import argparse
import os
from . import (
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
AlbertConfig,
BartConfig,
BertConfig,
CamembertConfig,
CTRLConfig,
DistilBertConfig,
DPRConfig,
ElectraConfig,
FlaubertConfig,
GPTaConfig,
LayoutLMConfig,
LxmertConfig,
OpenAIGPTConfig,
RobertaConfig,
TaConfig,
TFAlbertForPreTraining,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFCamembertForMaskedLM,
TFCTRLLMHeadModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
TFElectraForPreTraining,
TFFlaubertWithLMHeadModel,
TFGPTaLMHeadModel,
TFLayoutLMForMaskedLM,
TFLxmertForPreTraining,
TFLxmertVisualFeatureEncoder,
TFOpenAIGPTLMHeadModel,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForSequenceClassification,
TFTaForConditionalGeneration,
TFTransfoXLLMHeadModel,
TFWavaVecaModel,
TFXLMRobertaForMaskedLM,
TFXLMWithLMHeadModel,
TFXLNetLMHeadModel,
TransfoXLConfig,
WavaVecaConfig,
WavaVecaModel,
XLMConfig,
XLMRobertaConfig,
XLNetConfig,
is_torch_available,
load_pytorch_checkpoint_in_tfa_model,
)
from .utils import CONFIG_NAME, WEIGHTS_NAME, cached_file, logging
if is_torch_available():
import numpy as np
import torch
from . import (
AlbertForPreTraining,
BartForConditionalGeneration,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
CamembertForMaskedLM,
CTRLLMHeadModel,
DistilBertForMaskedLM,
DistilBertForQuestionAnswering,
DPRContextEncoder,
DPRQuestionEncoder,
DPRReader,
ElectraForPreTraining,
FlaubertWithLMHeadModel,
GPTaLMHeadModel,
LayoutLMForMaskedLM,
LxmertForPreTraining,
LxmertVisualFeatureEncoder,
OpenAIGPTLMHeadModel,
RobertaForMaskedLM,
RobertaForSequenceClassification,
TaForConditionalGeneration,
TransfoXLLMHeadModel,
XLMRobertaForMaskedLM,
XLMWithLMHeadModel,
XLNetLMHeadModel,
)
logging.set_verbosity_info()
UpperCamelCase__ : str = {
'''bart''': (
BartConfig,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
BartForConditionalGeneration,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
),
'''bert''': (
BertConfig,
TFBertForPreTraining,
BertForPreTraining,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''bert-large-uncased-whole-word-masking-finetuned-squad''': (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''bert-large-cased-whole-word-masking-finetuned-squad''': (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''bert-base-cased-finetuned-mrpc''': (
BertConfig,
TFBertForSequenceClassification,
BertForSequenceClassification,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''dpr''': (
DPRConfig,
TFDPRQuestionEncoder,
TFDPRContextEncoder,
TFDPRReader,
DPRQuestionEncoder,
DPRContextEncoder,
DPRReader,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
),
'''gpt2''': (
GPTaConfig,
TFGPTaLMHeadModel,
GPTaLMHeadModel,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''xlnet''': (
XLNetConfig,
TFXLNetLMHeadModel,
XLNetLMHeadModel,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''xlm''': (
XLMConfig,
TFXLMWithLMHeadModel,
XLMWithLMHeadModel,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''xlm-roberta''': (
XLMRobertaConfig,
TFXLMRobertaForMaskedLM,
XLMRobertaForMaskedLM,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''transfo-xl''': (
TransfoXLConfig,
TFTransfoXLLMHeadModel,
TransfoXLLMHeadModel,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''openai-gpt''': (
OpenAIGPTConfig,
TFOpenAIGPTLMHeadModel,
OpenAIGPTLMHeadModel,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''roberta''': (
RobertaConfig,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
RobertaForMaskedLM,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''layoutlm''': (
LayoutLMConfig,
TFLayoutLMForMaskedLM,
LayoutLMForMaskedLM,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
),
'''roberta-large-mnli''': (
RobertaConfig,
TFRobertaForSequenceClassification,
RobertaForSequenceClassification,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''camembert''': (
CamembertConfig,
TFCamembertForMaskedLM,
CamembertForMaskedLM,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''flaubert''': (
FlaubertConfig,
TFFlaubertWithLMHeadModel,
FlaubertWithLMHeadModel,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''distilbert''': (
DistilBertConfig,
TFDistilBertForMaskedLM,
DistilBertForMaskedLM,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''distilbert-base-distilled-squad''': (
DistilBertConfig,
TFDistilBertForQuestionAnswering,
DistilBertForQuestionAnswering,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''lxmert''': (
LxmertConfig,
TFLxmertForPreTraining,
LxmertForPreTraining,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''lxmert-visual-feature-encoder''': (
LxmertConfig,
TFLxmertVisualFeatureEncoder,
LxmertVisualFeatureEncoder,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''ctrl''': (
CTRLConfig,
TFCTRLLMHeadModel,
CTRLLMHeadModel,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''albert''': (
AlbertConfig,
TFAlbertForPreTraining,
AlbertForPreTraining,
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''t5''': (
TaConfig,
TFTaForConditionalGeneration,
TaForConditionalGeneration,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''electra''': (
ElectraConfig,
TFElectraForPreTraining,
ElectraForPreTraining,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''wav2vec2''': (
WavaVecaConfig,
TFWavaVecaModel,
WavaVecaModel,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
}
def __UpperCAmelCase ( lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Dict , lowerCamelCase_ : int=False , lowerCamelCase_ : Optional[Any]=True ) -> List[str]:
"""simple docstring"""
if model_type not in MODEL_CLASSES:
raise ValueError(F'Unrecognized model type, should be one of {list(MODEL_CLASSES.keys() )}.' )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[Any] = MODEL_CLASSES[model_type]
# Initialise TF model
if config_file in aws_config_map:
SCREAMING_SNAKE_CASE_ : Optional[int] = cached_file(lowerCamelCase_ , lowerCamelCase_ , force_download=not use_cached_models )
SCREAMING_SNAKE_CASE_ : Dict = config_class.from_json_file(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = True
SCREAMING_SNAKE_CASE_ : Optional[int] = True
print(F'Building TensorFlow model from configuration: {config}' )
SCREAMING_SNAKE_CASE_ : List[Any] = model_class(lowerCamelCase_ )
# Load weights from tf checkpoint
if pytorch_checkpoint_path in aws_config_map.keys():
SCREAMING_SNAKE_CASE_ : str = cached_file(
lowerCamelCase_ , lowerCamelCase_ , force_download=not use_cached_models )
# Load PyTorch checkpoint in tf2 model:
SCREAMING_SNAKE_CASE_ : Optional[int] = load_pytorch_checkpoint_in_tfa_model(lowerCamelCase_ , lowerCamelCase_ )
if compare_with_pt_model:
SCREAMING_SNAKE_CASE_ : Dict = tf_model(tf_model.dummy_inputs , training=lowerCamelCase_ ) # build the network
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.load(lowerCamelCase_ , map_location='cpu' )
SCREAMING_SNAKE_CASE_ : str = pt_model_class.from_pretrained(
pretrained_model_name_or_path=lowerCamelCase_ , config=lowerCamelCase_ , state_dict=lowerCamelCase_ )
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : Dict = pt_model(**pt_model.dummy_inputs )
SCREAMING_SNAKE_CASE_ : Optional[int] = pto[0].numpy()
SCREAMING_SNAKE_CASE_ : List[str] = tfo[0].numpy()
SCREAMING_SNAKE_CASE_ : Optional[Any] = np.amax(np.abs(np_pt - np_tf ) )
print(F'Max absolute difference between models outputs {diff}' )
assert diff <= 2E-2, F'Error, model absolute difference is >2e-2: {diff}'
# Save pytorch-model
print(F'Save TensorFlow model to {tf_dump_path}' )
tf_model.save_weights(lowerCamelCase_ , save_format='h5' )
def __UpperCAmelCase ( lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Dict , lowerCamelCase_ : List[str]=None , lowerCamelCase_ : Union[str, Any]=None , lowerCamelCase_ : Optional[Any]=False , lowerCamelCase_ : str=False , lowerCamelCase_ : Union[str, Any]=False , lowerCamelCase_ : List[Any]=False , ) -> Dict:
"""simple docstring"""
if args_model_type is None:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = list(MODEL_CLASSES.keys() )
else:
SCREAMING_SNAKE_CASE_ : Dict = [args_model_type]
for j, model_type in enumerate(lowerCamelCase_ , start=1 ):
print('=' * 1_00 )
print(F' Converting model type {j}/{len(lowerCamelCase_ )}: {model_type}' )
print('=' * 1_00 )
if model_type not in MODEL_CLASSES:
raise ValueError(F'Unrecognized model type {model_type}, should be one of {list(MODEL_CLASSES.keys() )}.' )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = MODEL_CLASSES[model_type]
if model_shortcut_names_or_path is None:
SCREAMING_SNAKE_CASE_ : List[Any] = list(aws_model_maps.keys() )
if config_shortcut_names_or_path is None:
SCREAMING_SNAKE_CASE_ : Optional[int] = model_shortcut_names_or_path
for i, (model_shortcut_name, config_shortcut_name) in enumerate(
zip(lowerCamelCase_ , lowerCamelCase_ ) , start=1 ):
print('-' * 1_00 )
if "-squad" in model_shortcut_name or "-mrpc" in model_shortcut_name or "-mnli" in model_shortcut_name:
if not only_convert_finetuned_models:
print(F' Skipping finetuned checkpoint {model_shortcut_name}' )
continue
SCREAMING_SNAKE_CASE_ : Union[str, Any] = model_shortcut_name
elif only_convert_finetuned_models:
print(F' Skipping not finetuned checkpoint {model_shortcut_name}' )
continue
print(
F' Converting checkpoint {i}/{len(lowerCamelCase_ )}: {model_shortcut_name} - model_type {model_type}' )
print('-' * 1_00 )
if config_shortcut_name in aws_config_map:
SCREAMING_SNAKE_CASE_ : Optional[Any] = cached_file(lowerCamelCase_ , lowerCamelCase_ , force_download=not use_cached_models )
else:
SCREAMING_SNAKE_CASE_ : List[str] = config_shortcut_name
if model_shortcut_name in aws_model_maps:
SCREAMING_SNAKE_CASE_ : Dict = cached_file(lowerCamelCase_ , lowerCamelCase_ , force_download=not use_cached_models )
else:
SCREAMING_SNAKE_CASE_ : Any = model_shortcut_name
if os.path.isfile(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE_ : Tuple = 'converted_model'
convert_pt_checkpoint_to_tf(
model_type=lowerCamelCase_ , pytorch_checkpoint_path=lowerCamelCase_ , config_file=lowerCamelCase_ , tf_dump_path=os.path.join(lowerCamelCase_ , model_shortcut_name + '-tf_model.h5' ) , compare_with_pt_model=lowerCamelCase_ , )
if remove_cached_files:
os.remove(lowerCamelCase_ )
os.remove(lowerCamelCase_ )
if __name__ == "__main__":
UpperCamelCase__ : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_dump_path''', default=None, type=str, required=True, help='''Path to the output Tensorflow dump file.'''
)
parser.add_argument(
'''--model_type''',
default=None,
type=str,
help=(
F"""Model type selected in the list of {list(MODEL_CLASSES.keys())}. If not given, will download and """
'''convert all the models from AWS.'''
),
)
parser.add_argument(
'''--pytorch_checkpoint_path''',
default=None,
type=str,
help=(
'''Path to the PyTorch checkpoint path or shortcut name to download from AWS. '''
'''If not given, will download and convert all the checkpoints from AWS.'''
),
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
help=(
'''The config json file corresponding to the pre-trained model. \n'''
'''This specifies the model architecture. If not given and '''
'''--pytorch_checkpoint_path is not given or is a shortcut name '''
'''use the configuration associated to the shortcut name on the AWS'''
),
)
parser.add_argument(
'''--compare_with_pt_model''', action='''store_true''', help='''Compare Tensorflow and PyTorch model predictions.'''
)
parser.add_argument(
'''--use_cached_models''',
action='''store_true''',
help='''Use cached models if possible instead of updating to latest checkpoint versions.''',
)
parser.add_argument(
'''--remove_cached_files''',
action='''store_true''',
help='''Remove pytorch models after conversion (save memory when converting in batches).''',
)
parser.add_argument('''--only_convert_finetuned_models''', action='''store_true''', help='''Only convert finetuned models.''')
UpperCamelCase__ : str = parser.parse_args()
# if args.pytorch_checkpoint_path is not None:
# convert_pt_checkpoint_to_tf(args.model_type.lower(),
# args.pytorch_checkpoint_path,
# args.config_file if args.config_file is not None else args.pytorch_checkpoint_path,
# args.tf_dump_path,
# compare_with_pt_model=args.compare_with_pt_model,
# use_cached_models=args.use_cached_models)
# else:
convert_all_pt_checkpoints_to_tf(
args.model_type.lower() if args.model_type is not None else None,
args.tf_dump_path,
model_shortcut_names_or_path=[args.pytorch_checkpoint_path]
if args.pytorch_checkpoint_path is not None
else None,
config_shortcut_names_or_path=[args.config_file] if args.config_file is not None else None,
compare_with_pt_model=args.compare_with_pt_model,
use_cached_models=args.use_cached_models,
remove_cached_files=args.remove_cached_files,
only_convert_finetuned_models=args.only_convert_finetuned_models,
)
| 105 |
"""simple docstring"""
import tempfile
import unittest
import numpy as np
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_flax
if is_flax_available():
import os
from flax.core.frozen_dict import unfreeze
from flax.traverse_util import flatten_dict
from transformers import FlaxBertModel
lowercase_ = "0.12" # assumed parallelism: 8
@require_flax
@is_staging_test
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def __UpperCAmelCase ( cls ):
__a = TOKEN
HfFolder.save_token(_a )
@classmethod
def __UpperCAmelCase ( cls ):
try:
delete_repo(token=cls._token , repo_id='''test-model-flax''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-model-flax-org''' )
except HTTPError:
pass
def __UpperCAmelCase ( self ):
__a = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
__a = FlaxBertModel(_a )
model.push_to_hub('''test-model-flax''' , use_auth_token=self._token )
__a = FlaxBertModel.from_pretrained(f'''{USER}/test-model-flax''' )
__a = flatten_dict(unfreeze(model.params ) )
__a = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
__a = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_a , 1E-3 , msg=f'''{key} not identical''' )
# Reset repo
delete_repo(token=self._token , repo_id='''test-model-flax''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(_a , repo_id='''test-model-flax''' , push_to_hub=_a , use_auth_token=self._token )
__a = FlaxBertModel.from_pretrained(f'''{USER}/test-model-flax''' )
__a = flatten_dict(unfreeze(model.params ) )
__a = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
__a = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_a , 1E-3 , msg=f'''{key} not identical''' )
def __UpperCAmelCase ( self ):
__a = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
__a = FlaxBertModel(_a )
model.push_to_hub('''valid_org/test-model-flax-org''' , use_auth_token=self._token )
__a = FlaxBertModel.from_pretrained('''valid_org/test-model-flax-org''' )
__a = flatten_dict(unfreeze(model.params ) )
__a = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
__a = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_a , 1E-3 , msg=f'''{key} not identical''' )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-model-flax-org''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(
_a , repo_id='''valid_org/test-model-flax-org''' , push_to_hub=_a , use_auth_token=self._token )
__a = FlaxBertModel.from_pretrained('''valid_org/test-model-flax-org''' )
__a = flatten_dict(unfreeze(model.params ) )
__a = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
__a = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_a , 1E-3 , msg=f'''{key} not identical''' )
def lowercase ( lowerCAmelCase__ : str , lowerCAmelCase__ : Dict ) -> Optional[int]:
__a = True
__a = flatten_dict(modela.params )
__a = flatten_dict(modela.params )
for key in flat_params_a.keys():
if np.sum(np.abs(flat_params_a[key] - flat_params_a[key] ) ) > 1e-4:
__a = False
return models_are_equal
@require_flax
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self ):
__a = BertConfig.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' )
__a = FlaxBertModel(_a )
__a = '''bert'''
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(_a , _a ) )
with self.assertRaises(_a ):
__a = FlaxBertModel.from_pretrained(_a )
__a = FlaxBertModel.from_pretrained(_a , subfolder=_a )
self.assertTrue(check_models_equal(_a , _a ) )
def __UpperCAmelCase ( self ):
__a = BertConfig.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' )
__a = FlaxBertModel(_a )
__a = '''bert'''
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(_a , _a ) , max_shard_size='''10KB''' )
with self.assertRaises(_a ):
__a = FlaxBertModel.from_pretrained(_a )
__a = FlaxBertModel.from_pretrained(_a , subfolder=_a )
self.assertTrue(check_models_equal(_a , _a ) )
def __UpperCAmelCase ( self ):
__a = '''bert'''
__a = '''hf-internal-testing/tiny-random-bert-subfolder'''
with self.assertRaises(_a ):
__a = FlaxBertModel.from_pretrained(_a )
__a = FlaxBertModel.from_pretrained(_a , subfolder=_a )
self.assertIsNotNone(_a )
def __UpperCAmelCase ( self ):
__a = '''bert'''
__a = '''hf-internal-testing/tiny-random-bert-sharded-subfolder'''
with self.assertRaises(_a ):
__a = FlaxBertModel.from_pretrained(_a )
__a = FlaxBertModel.from_pretrained(_a , subfolder=_a )
self.assertIsNotNone(_a )
| 695 | 0 |
import logging
from transformers import PretrainedConfig
__snake_case :int =logging.getLogger(__name__)
__snake_case :Tuple ={
'bertabs-finetuned-cnndm': 'https://huggingface.co/remi/bertabs-finetuned-cnndm-extractive-abstractive-summarization/resolve/main/config.json',
}
class lowerCAmelCase__ ( _lowerCamelCase ):
A_ : Dict = 'bertabs'
def __init__( self : Optional[int] , __UpperCamelCase : int=30_522 , __UpperCamelCase : Tuple=512 , __UpperCamelCase : List[Any]=6 , __UpperCamelCase : Tuple=512 , __UpperCamelCase : Dict=8 , __UpperCamelCase : List[Any]=512 , __UpperCamelCase : Dict=0.2 , __UpperCamelCase : Optional[Any]=6 , __UpperCamelCase : Union[str, Any]=768 , __UpperCamelCase : List[Any]=8 , __UpperCamelCase : Optional[int]=2_048 , __UpperCamelCase : Tuple=0.2 , **__UpperCamelCase : Any , ) -> Union[str, Any]:
super().__init__(**__UpperCamelCase )
A = vocab_size
A = max_pos
A = enc_layers
A = enc_hidden_size
A = enc_heads
A = enc_ff_size
A = enc_dropout
A = dec_layers
A = dec_hidden_size
A = dec_heads
A = dec_ff_size
A = dec_dropout | 106 |
"""simple docstring"""
import unittest
from diffusers.models.unet_ad_blocks import * # noqa F403
from diffusers.utils import torch_device
from .test_unet_blocks_common import UNetBlockTesterMixin
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = DownBlockaD # noqa F405
__UpperCAmelCase : Any = 'down'
def __UpperCAmelCase ( self ):
__a = [-0.0232, -0.9869, 0.8054, -0.0637, -0.1688, -1.4264, 0.4470, -1.3394, 0.0904]
super().test_output(_a )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : str = ResnetDownsampleBlockaD # noqa F405
__UpperCAmelCase : List[str] = 'down'
def __UpperCAmelCase ( self ):
__a = [0.0710, 0.2410, -0.7320, -1.0757, -1.1343, 0.3540, -0.0133, -0.2576, 0.0948]
super().test_output(_a )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] = AttnDownBlockaD # noqa F405
__UpperCAmelCase : Optional[Any] = 'down'
def __UpperCAmelCase ( self ):
__a = [0.0636, 0.8964, -0.6234, -1.0131, 0.0844, 0.4935, 0.3437, 0.0911, -0.2957]
super().test_output(_a )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = CrossAttnDownBlockaD # noqa F405
__UpperCAmelCase : Optional[Any] = 'down'
def __UpperCAmelCase ( self ):
__a , __a = super().prepare_init_args_and_inputs_for_common()
__a = 32
return init_dict, inputs_dict
def __UpperCAmelCase ( self ):
__a = [0.2238, -0.7396, -0.2255, -0.3829, 0.1925, 1.1665, 0.0603, -0.7295, 0.1983]
super().test_output(_a )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : int = SimpleCrossAttnDownBlockaD # noqa F405
__UpperCAmelCase : Any = 'down'
@property
def __UpperCAmelCase ( self ):
return super().get_dummy_input(include_encoder_hidden_states=_a )
def __UpperCAmelCase ( self ):
__a , __a = super().prepare_init_args_and_inputs_for_common()
__a = 32
return init_dict, inputs_dict
@unittest.skipIf(torch_device == '''mps''' , '''MPS result is not consistent''' )
def __UpperCAmelCase ( self ):
__a = [0.7921, -0.0992, -0.1962, -0.7695, -0.4242, 0.7804, 0.4737, 0.2765, 0.3338]
super().test_output(_a )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : int = SkipDownBlockaD # noqa F405
__UpperCAmelCase : Tuple = 'down'
@property
def __UpperCAmelCase ( self ):
return super().get_dummy_input(include_skip_sample=_a )
def __UpperCAmelCase ( self ):
__a = [-0.0845, -0.2087, -0.2465, 0.0971, 0.1900, -0.0484, 0.2664, 0.4179, 0.5069]
super().test_output(_a )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = AttnSkipDownBlockaD # noqa F405
__UpperCAmelCase : Optional[int] = 'down'
@property
def __UpperCAmelCase ( self ):
return super().get_dummy_input(include_skip_sample=_a )
def __UpperCAmelCase ( self ):
__a = [0.5539, 0.1609, 0.4924, 0.0537, -0.1995, 0.4050, 0.0979, -0.2721, -0.0642]
super().test_output(_a )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : int = DownEncoderBlockaD # noqa F405
__UpperCAmelCase : Optional[int] = 'down'
@property
def __UpperCAmelCase ( self ):
return super().get_dummy_input(include_temb=_a )
def __UpperCAmelCase ( self ):
__a = {
'''in_channels''': 32,
'''out_channels''': 32,
}
__a = self.dummy_input
return init_dict, inputs_dict
def __UpperCAmelCase ( self ):
__a = [1.1102, 0.5302, 0.4872, -0.0023, -0.8042, 0.0483, -0.3489, -0.5632, 0.7626]
super().test_output(_a )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = AttnDownEncoderBlockaD # noqa F405
__UpperCAmelCase : Any = 'down'
@property
def __UpperCAmelCase ( self ):
return super().get_dummy_input(include_temb=_a )
def __UpperCAmelCase ( self ):
__a = {
'''in_channels''': 32,
'''out_channels''': 32,
}
__a = self.dummy_input
return init_dict, inputs_dict
def __UpperCAmelCase ( self ):
__a = [0.8966, -0.1486, 0.8568, 0.8141, -0.9046, -0.1342, -0.0972, -0.7417, 0.1538]
super().test_output(_a )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : str = UNetMidBlockaD # noqa F405
__UpperCAmelCase : Any = 'mid'
def __UpperCAmelCase ( self ):
__a = {
'''in_channels''': 32,
'''temb_channels''': 128,
}
__a = self.dummy_input
return init_dict, inputs_dict
def __UpperCAmelCase ( self ):
__a = [-0.1062, 1.7248, 0.3494, 1.4569, -0.0910, -1.2421, -0.9984, 0.6736, 1.0028]
super().test_output(_a )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : str = UNetMidBlockaDCrossAttn # noqa F405
__UpperCAmelCase : str = 'mid'
def __UpperCAmelCase ( self ):
__a , __a = super().prepare_init_args_and_inputs_for_common()
__a = 32
return init_dict, inputs_dict
def __UpperCAmelCase ( self ):
__a = [0.0187, 2.4220, 0.4484, 1.1203, -0.6121, -1.5122, -0.8270, 0.7851, 1.8335]
super().test_output(_a )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : Any = UNetMidBlockaDSimpleCrossAttn # noqa F405
__UpperCAmelCase : List[Any] = 'mid'
@property
def __UpperCAmelCase ( self ):
return super().get_dummy_input(include_encoder_hidden_states=_a )
def __UpperCAmelCase ( self ):
__a , __a = super().prepare_init_args_and_inputs_for_common()
__a = 32
return init_dict, inputs_dict
def __UpperCAmelCase ( self ):
__a = [0.7143, 1.9974, 0.5448, 1.3977, 0.1282, -1.1237, -1.4238, 0.5530, 0.8880]
super().test_output(_a )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = UpBlockaD # noqa F405
__UpperCAmelCase : Union[str, Any] = 'up'
@property
def __UpperCAmelCase ( self ):
return super().get_dummy_input(include_res_hidden_states_tuple=_a )
def __UpperCAmelCase ( self ):
__a = [-0.2041, -0.4165, -0.3022, 0.0041, -0.6628, -0.7053, 0.1928, -0.0325, 0.0523]
super().test_output(_a )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : str = ResnetUpsampleBlockaD # noqa F405
__UpperCAmelCase : int = 'up'
@property
def __UpperCAmelCase ( self ):
return super().get_dummy_input(include_res_hidden_states_tuple=_a )
def __UpperCAmelCase ( self ):
__a = [0.2287, 0.3549, -0.1346, 0.4797, -0.1715, -0.9649, 0.7305, -0.5864, -0.6244]
super().test_output(_a )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : Dict = CrossAttnUpBlockaD # noqa F405
__UpperCAmelCase : List[Any] = 'up'
@property
def __UpperCAmelCase ( self ):
return super().get_dummy_input(include_res_hidden_states_tuple=_a )
def __UpperCAmelCase ( self ):
__a , __a = super().prepare_init_args_and_inputs_for_common()
__a = 32
return init_dict, inputs_dict
def __UpperCAmelCase ( self ):
__a = [-0.1403, -0.3515, -0.0420, -0.1425, 0.3167, 0.5094, -0.2181, 0.5931, 0.5582]
super().test_output(_a )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = SimpleCrossAttnUpBlockaD # noqa F405
__UpperCAmelCase : Optional[int] = 'up'
@property
def __UpperCAmelCase ( self ):
return super().get_dummy_input(include_res_hidden_states_tuple=_a , include_encoder_hidden_states=_a )
def __UpperCAmelCase ( self ):
__a , __a = super().prepare_init_args_and_inputs_for_common()
__a = 32
return init_dict, inputs_dict
def __UpperCAmelCase ( self ):
__a = [0.2645, 0.1480, 0.0909, 0.8044, -0.9758, -0.9083, 0.0994, -1.1453, -0.7402]
super().test_output(_a )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : Any = AttnUpBlockaD # noqa F405
__UpperCAmelCase : List[Any] = 'up'
@property
def __UpperCAmelCase ( self ):
return super().get_dummy_input(include_res_hidden_states_tuple=_a )
@unittest.skipIf(torch_device == '''mps''' , '''MPS result is not consistent''' )
def __UpperCAmelCase ( self ):
__a = [0.0979, 0.1326, 0.0021, 0.0659, 0.2249, 0.0059, 0.1132, 0.5952, 0.1033]
super().test_output(_a )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : Any = SkipUpBlockaD # noqa F405
__UpperCAmelCase : str = 'up'
@property
def __UpperCAmelCase ( self ):
return super().get_dummy_input(include_res_hidden_states_tuple=_a )
def __UpperCAmelCase ( self ):
__a = [-0.0893, -0.1234, -0.1506, -0.0332, 0.0123, -0.0211, 0.0566, 0.0143, 0.0362]
super().test_output(_a )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = AttnSkipUpBlockaD # noqa F405
__UpperCAmelCase : int = 'up'
@property
def __UpperCAmelCase ( self ):
return super().get_dummy_input(include_res_hidden_states_tuple=_a )
def __UpperCAmelCase ( self ):
__a = [0.0361, 0.0617, 0.2787, -0.0350, 0.0342, 0.3421, -0.0843, 0.0913, 0.3015]
super().test_output(_a )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = UpDecoderBlockaD # noqa F405
__UpperCAmelCase : List[str] = 'up'
@property
def __UpperCAmelCase ( self ):
return super().get_dummy_input(include_temb=_a )
def __UpperCAmelCase ( self ):
__a = {'''in_channels''': 32, '''out_channels''': 32}
__a = self.dummy_input
return init_dict, inputs_dict
def __UpperCAmelCase ( self ):
__a = [0.4404, 0.1998, -0.9886, -0.3320, -0.3128, -0.7034, -0.6955, -0.2338, -0.3137]
super().test_output(_a )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] = AttnUpDecoderBlockaD # noqa F405
__UpperCAmelCase : Any = 'up'
@property
def __UpperCAmelCase ( self ):
return super().get_dummy_input(include_temb=_a )
def __UpperCAmelCase ( self ):
__a = {'''in_channels''': 32, '''out_channels''': 32}
__a = self.dummy_input
return init_dict, inputs_dict
def __UpperCAmelCase ( self ):
__a = [0.6738, 0.4491, 0.1055, 1.0710, 0.7316, 0.3339, 0.3352, 0.1023, 0.3568]
super().test_output(_a )
| 695 | 0 |
'''simple docstring'''
import baseaa
def _SCREAMING_SNAKE_CASE ( __snake_case : str ):
return baseaa.aaaencode(string.encode('utf-8' ) )
def _SCREAMING_SNAKE_CASE ( __snake_case : bytes ):
return baseaa.aaadecode(__snake_case ).decode('utf-8' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 107 |
"""simple docstring"""
import copy
from typing import Dict, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
from ..detr import DetrConfig
from ..swin import SwinConfig
lowercase_ = {
"facebook/maskformer-swin-base-ade": (
"https://huggingface.co/facebook/maskformer-swin-base-ade/blob/main/config.json"
)
# See all MaskFormer models at https://huggingface.co/models?filter=maskformer
}
lowercase_ = logging.get_logger(__name__)
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : str = 'maskformer'
__UpperCAmelCase : Optional[int] = {'hidden_size': 'mask_feature_size'}
__UpperCAmelCase : Any = ['resnet', 'swin']
__UpperCAmelCase : Dict = ['detr']
def __init__( self , _a = 256 , _a = 256 , _a = 0.1 , _a = False , _a = None , _a = None , _a = 0.02 , _a = 1.0 , _a = 1.0 , _a = 1.0 , _a = 20.0 , _a = None , **_a , ):
if backbone_config is None:
# fall back to https://huggingface.co/microsoft/swin-base-patch4-window12-384-in22k
__a = SwinConfig(
image_size=384 , in_channels=3 , patch_size=4 , embed_dim=128 , depths=[2, 2, 18, 2] , num_heads=[4, 8, 16, 32] , window_size=12 , drop_path_rate=0.3 , out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] , )
if isinstance(_a , _a ):
__a = backbone_config.pop('''model_type''' )
__a = CONFIG_MAPPING[backbone_model_type]
__a = config_class.from_dict(_a )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
f'''Backbone {backbone_config.model_type} is not a supported model and may not be compatible with MaskFormer. '''
f'''Supported model types: {','.join(self.backbones_supported )}''' )
if decoder_config is None:
# fall back to https://huggingface.co/facebook/detr-resnet-50
__a = DetrConfig()
else:
# verify that the decoder is supported
__a = (
decoder_config.pop('''model_type''' ) if isinstance(_a , _a ) else decoder_config.model_type
)
if decoder_type not in self.decoders_supported:
raise ValueError(
f'''Transformer Decoder {decoder_type} not supported, please use one of'''
f''' {','.join(self.decoders_supported )}''' )
if isinstance(_a , _a ):
__a = CONFIG_MAPPING[decoder_type]
__a = config_class.from_dict(_a )
__a = backbone_config
__a = decoder_config
# main feature dimension for the model
__a = fpn_feature_size
__a = mask_feature_size
# initializer
__a = init_std
__a = init_xavier_std
# Hungarian matcher && loss
__a = cross_entropy_weight
__a = dice_weight
__a = mask_weight
__a = use_auxiliary_loss
__a = no_object_weight
__a = output_auxiliary_logits
__a = self.decoder_config.encoder_attention_heads
__a = self.decoder_config.num_hidden_layers
super().__init__(**_a )
@classmethod
def __UpperCAmelCase ( cls , _a , _a , **_a ):
return cls(
backbone_config=_a , decoder_config=_a , **_a , )
def __UpperCAmelCase ( self ):
__a = copy.deepcopy(self.__dict__ )
__a = self.backbone_config.to_dict()
__a = self.decoder_config.to_dict()
__a = self.__class__.model_type
return output
| 695 | 0 |
import os
import sys
__a: Union[str, Any] = os.path.join(os.path.dirname(__file__), '''src''')
sys.path.append(SRC_DIR)
from transformers import (
AutoConfig,
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForQuestionAnswering,
AutoModelForSequenceClassification,
AutoTokenizer,
add_start_docstrings,
)
__a: Union[str, Any] = [
'''torch''',
'''numpy''',
'''tokenizers''',
'''filelock''',
'''requests''',
'''tqdm''',
'''regex''',
'''sentencepiece''',
'''sacremoses''',
'''importlib_metadata''',
'''huggingface_hub''',
]
@add_start_docstrings(AutoConfig.__doc__ )
def _SCREAMING_SNAKE_CASE ( *__snake_case , **__snake_case ) -> Union[str, Any]:
return AutoConfig.from_pretrained(*__snake_case , **__snake_case )
@add_start_docstrings(AutoTokenizer.__doc__ )
def _SCREAMING_SNAKE_CASE ( *__snake_case , **__snake_case ) -> Any:
return AutoTokenizer.from_pretrained(*__snake_case , **__snake_case )
@add_start_docstrings(AutoModel.__doc__ )
def _SCREAMING_SNAKE_CASE ( *__snake_case , **__snake_case ) -> Tuple:
return AutoModel.from_pretrained(*__snake_case , **__snake_case )
@add_start_docstrings(AutoModelForCausalLM.__doc__ )
def _SCREAMING_SNAKE_CASE ( *__snake_case , **__snake_case ) -> Tuple:
return AutoModelForCausalLM.from_pretrained(*__snake_case , **__snake_case )
@add_start_docstrings(AutoModelForMaskedLM.__doc__ )
def _SCREAMING_SNAKE_CASE ( *__snake_case , **__snake_case ) -> Optional[Any]:
return AutoModelForMaskedLM.from_pretrained(*__snake_case , **__snake_case )
@add_start_docstrings(AutoModelForSequenceClassification.__doc__ )
def _SCREAMING_SNAKE_CASE ( *__snake_case , **__snake_case ) -> List[str]:
return AutoModelForSequenceClassification.from_pretrained(*__snake_case , **__snake_case )
@add_start_docstrings(AutoModelForQuestionAnswering.__doc__ )
def _SCREAMING_SNAKE_CASE ( *__snake_case , **__snake_case ) -> List[Any]:
return AutoModelForQuestionAnswering.from_pretrained(*__snake_case , **__snake_case ) | 108 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Generator
import requests
from bsa import BeautifulSoup
lowercase_ = "https://www.indeed.co.in/jobs?q=mobile+app+development&l="
def lowercase ( lowerCAmelCase__ : str = "mumbai" ) -> Generator[tuple[str, str], None, None]:
__a = BeautifulSoup(requests.get(url + location ).content , '''html.parser''' )
# This attribute finds out all the specifics listed in a job
for job in soup.find_all('''div''' , attrs={'''data-tn-component''': '''organicJob'''} ):
__a = job.find('''a''' , attrs={'''data-tn-element''': '''jobTitle'''} ).text.strip()
__a = job.find('''span''' , {'''class''': '''company'''} ).text.strip()
yield job_title, company_name
if __name__ == "__main__":
for i, job in enumerate(fetch_jobs("Bangalore"), 1):
print(F'''Job {i:>2} is {job[0]} at {job[1]}''')
| 695 | 0 |
'''simple docstring'''
import logging
import os
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import List, Optional
import faiss
import torch
from datasets import Features, Sequence, Value, load_dataset
from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser
a = logging.getLogger(__name__)
torch.set_grad_enabled(False)
a = "cuda" if torch.cuda.is_available() else "cpu"
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase=100 , __UpperCAmelCase=" " ) -> List[str]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = text.split(__UpperCAmelCase )
return [character.join(text[i : i + n] ).strip() for i in range(0 , len(__UpperCAmelCase ) , __UpperCAmelCase )]
def __magic_name__ ( __UpperCAmelCase ) -> dict:
'''simple docstring'''
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = [], []
for title, text in zip(documents["""title"""] , documents["""text"""] ):
if text is not None:
for passage in split_text(__UpperCAmelCase ):
titles.append(title if title is not None else """""" )
texts.append(__UpperCAmelCase )
return {"title": titles, "text": texts}
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> dict:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = ctx_tokenizer(
documents["""title"""] , documents["""text"""] , truncation=__UpperCAmelCase , padding="""longest""" , return_tensors="""pt""" )["""input_ids"""]
__SCREAMING_SNAKE_CASE = ctx_encoder(input_ids.to(device=__UpperCAmelCase ) , return_dict=__UpperCAmelCase ).pooler_output
return {"embeddings": embeddings.detach().cpu().numpy()}
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) -> Any:
'''simple docstring'''
logger.info("""Step 1 - Create the dataset""" )
######################################
# The dataset needed for RAG must have three columns:
# - title (string): title of the document
# - text (string): text of a passage of the document
# - embeddings (array of dimension d): DPR representation of the passage
# Let's say you have documents in tab-separated csv files with columns "title" and "text"
assert os.path.isfile(rag_example_args.csv_path ), "Please provide a valid path to a csv file"
# You can load a Dataset object this way
__SCREAMING_SNAKE_CASE = load_dataset(
"""csv""" , data_files=[rag_example_args.csv_path] , split="""train""" , delimiter="""\t""" , column_names=["""title""", """text"""] )
# More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files
# Then split the documents into passages of 100 words
__SCREAMING_SNAKE_CASE = dataset.map(__UpperCAmelCase , batched=__UpperCAmelCase , num_proc=processing_args.num_proc )
# And compute the embeddings
__SCREAMING_SNAKE_CASE = DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ).to(device=__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name )
__SCREAMING_SNAKE_CASE = Features(
{"""text""": Value("""string""" ), """title""": Value("""string""" ), """embeddings""": Sequence(Value("""float32""" ) )} ) # optional, save as float32 instead of float64 to save space
__SCREAMING_SNAKE_CASE = dataset.map(
partial(__UpperCAmelCase , ctx_encoder=__UpperCAmelCase , ctx_tokenizer=__UpperCAmelCase ) , batched=__UpperCAmelCase , batch_size=processing_args.batch_size , features=__UpperCAmelCase , )
# And finally save your dataset
__SCREAMING_SNAKE_CASE = os.path.join(rag_example_args.output_dir , """my_knowledge_dataset""" )
dataset.save_to_disk(__UpperCAmelCase )
# from datasets import load_from_disk
# dataset = load_from_disk(passages_path) # to reload the dataset
######################################
logger.info("""Step 2 - Index the dataset""" )
######################################
# Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search
__SCREAMING_SNAKE_CASE = faiss.IndexHNSWFlat(index_hnsw_args.d , index_hnsw_args.m , faiss.METRIC_INNER_PRODUCT )
dataset.add_faiss_index("""embeddings""" , custom_index=__UpperCAmelCase )
# And save the index
__SCREAMING_SNAKE_CASE = os.path.join(rag_example_args.output_dir , """my_knowledge_dataset_hnsw_index.faiss""" )
dataset.get_index("""embeddings""" ).save(__UpperCAmelCase )
# dataset.load_faiss_index("embeddings", index_path) # to reload the index
@dataclass
class __a :
__UpperCamelCase : str = field(
default=str(Path(_snake_case ).parent / 'test_run' / 'dummy-kb' / 'my_knowledge_dataset.csv' ), metadata={'help': 'Path to a tab-separated csv file with columns \'title\' and \'text\''}, )
__UpperCamelCase : Optional[str] = field(
default=_snake_case, metadata={'help': 'Question that is passed as input to RAG. Default is \'What does Moses\' rod turn into ?\'.'}, )
__UpperCamelCase : str = field(
default='facebook/rag-sequence-nq', metadata={'help': 'The RAG model to use. Either \'facebook/rag-sequence-nq\' or \'facebook/rag-token-nq\''}, )
__UpperCamelCase : str = field(
default='facebook/dpr-ctx_encoder-multiset-base', metadata={
'help': (
'The DPR context encoder model to use. Either \'facebook/dpr-ctx_encoder-single-nq-base\' or'
' \'facebook/dpr-ctx_encoder-multiset-base\''
)
}, )
__UpperCamelCase : Optional[str] = field(
default=str(Path(_snake_case ).parent / 'test_run' / 'dummy-kb' ), metadata={'help': 'Path to a directory where the dataset passages and the index will be saved'}, )
@dataclass
class __a :
__UpperCamelCase : Optional[int] = field(
default=_snake_case, metadata={
'help': 'The number of processes to use to split the documents into passages. Default is single process.'
}, )
__UpperCamelCase : int = field(
default=16, metadata={
'help': 'The batch size to use when computing the passages embeddings using the DPR context encoder.'
}, )
@dataclass
class __a :
__UpperCamelCase : int = field(
default=768, metadata={'help': 'The dimension of the embeddings to pass to the HNSW Faiss index.'}, )
__UpperCamelCase : int = field(
default=128, metadata={
'help': (
'The number of bi-directional links created for every new element during the HNSW index construction.'
)
}, )
if __name__ == "__main__":
logging.basicConfig(level=logging.WARNING)
logger.setLevel(logging.INFO)
a = HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments))
a , a , a = parser.parse_args_into_dataclasses()
with TemporaryDirectory() as tmp_dir:
a = rag_example_args.output_dir or tmp_dir
main(rag_example_args, processing_args, index_hnsw_args)
| 109 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"bigcode/gpt_bigcode-santacoder": "https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json",
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : List[str] = 'gpt_bigcode'
__UpperCAmelCase : Tuple = ['past_key_values']
__UpperCAmelCase : Dict = {
'hidden_size': 'n_embd',
'max_position_embeddings': 'n_positions',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self , _a=50_257 , _a=1_024 , _a=768 , _a=12 , _a=12 , _a=None , _a="gelu_pytorch_tanh" , _a=0.1 , _a=0.1 , _a=0.1 , _a=1E-5 , _a=0.02 , _a=True , _a=True , _a=50_256 , _a=50_256 , _a=True , _a=True , _a=True , **_a , ):
__a = vocab_size
__a = n_positions
__a = n_embd
__a = n_layer
__a = n_head
__a = n_inner
__a = activation_function
__a = resid_pdrop
__a = embd_pdrop
__a = attn_pdrop
__a = layer_norm_epsilon
__a = initializer_range
__a = scale_attn_weights
__a = use_cache
__a = attention_softmax_in_fpaa
__a = scale_attention_softmax_in_fpaa
__a = multi_query
__a = bos_token_id
__a = eos_token_id
super().__init__(bos_token_id=_a , eos_token_id=_a , **_a )
| 695 | 0 |
'''simple docstring'''
from math import cos, sin, sqrt, tau
from audio_filters.iir_filter import IIRFilter
def UpperCAmelCase ( lowerCamelCase_ :int , lowerCamelCase_ :int , lowerCamelCase_ :float = 1 / sqrt(2 ) ):
'''simple docstring'''
snake_case_ : List[Any] = tau * frequency / samplerate
snake_case_ : Any = sin(lowerCAmelCase__ )
snake_case_ : Any = cos(lowerCAmelCase__ )
snake_case_ : str = _sin / (2 * q_factor)
snake_case_ : Optional[int] = (1 - _cos) / 2
snake_case_ : Union[str, Any] = 1 - _cos
snake_case_ : Dict = 1 + alpha
snake_case_ : int = -2 * _cos
snake_case_ : List[str] = 1 - alpha
snake_case_ : List[Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def UpperCAmelCase ( lowerCamelCase_ :int , lowerCamelCase_ :int , lowerCamelCase_ :float = 1 / sqrt(2 ) ):
'''simple docstring'''
snake_case_ : Optional[Any] = tau * frequency / samplerate
snake_case_ : Union[str, Any] = sin(lowerCAmelCase__ )
snake_case_ : Optional[int] = cos(lowerCAmelCase__ )
snake_case_ : Dict = _sin / (2 * q_factor)
snake_case_ : List[str] = (1 + _cos) / 2
snake_case_ : Any = -1 - _cos
snake_case_ : str = 1 + alpha
snake_case_ : List[str] = -2 * _cos
snake_case_ : Any = 1 - alpha
snake_case_ : Dict = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def UpperCAmelCase ( lowerCamelCase_ :int , lowerCamelCase_ :int , lowerCamelCase_ :float = 1 / sqrt(2 ) ):
'''simple docstring'''
snake_case_ : Optional[Any] = tau * frequency / samplerate
snake_case_ : List[str] = sin(lowerCAmelCase__ )
snake_case_ : Union[str, Any] = cos(lowerCAmelCase__ )
snake_case_ : Optional[int] = _sin / (2 * q_factor)
snake_case_ : List[Any] = _sin / 2
snake_case_ : Any = 0
snake_case_ : Dict = -ba
snake_case_ : Dict = 1 + alpha
snake_case_ : Tuple = -2 * _cos
snake_case_ : List[Any] = 1 - alpha
snake_case_ : Optional[int] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def UpperCAmelCase ( lowerCamelCase_ :int , lowerCamelCase_ :int , lowerCamelCase_ :float = 1 / sqrt(2 ) ):
'''simple docstring'''
snake_case_ : Optional[int] = tau * frequency / samplerate
snake_case_ : Dict = sin(lowerCAmelCase__ )
snake_case_ : Any = cos(lowerCAmelCase__ )
snake_case_ : int = _sin / (2 * q_factor)
snake_case_ : Any = 1 - alpha
snake_case_ : Union[str, Any] = -2 * _cos
snake_case_ : Optional[Any] = 1 + alpha
snake_case_ : List[str] = IIRFilter(2 )
filt.set_coefficients([ba, ba, ba] , [ba, ba, ba] )
return filt
def UpperCAmelCase ( lowerCamelCase_ :int , lowerCamelCase_ :int , lowerCamelCase_ :float , lowerCamelCase_ :float = 1 / sqrt(2 ) , ):
'''simple docstring'''
snake_case_ : str = tau * frequency / samplerate
snake_case_ : Dict = sin(lowerCAmelCase__ )
snake_case_ : Any = cos(lowerCAmelCase__ )
snake_case_ : int = _sin / (2 * q_factor)
snake_case_ : List[Any] = 10 ** (gain_db / 40)
snake_case_ : List[str] = 1 + alpha * big_a
snake_case_ : Optional[int] = -2 * _cos
snake_case_ : Tuple = 1 - alpha * big_a
snake_case_ : Optional[int] = 1 + alpha / big_a
snake_case_ : str = -2 * _cos
snake_case_ : Optional[int] = 1 - alpha / big_a
snake_case_ : Any = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def UpperCAmelCase ( lowerCamelCase_ :int , lowerCamelCase_ :int , lowerCamelCase_ :float , lowerCamelCase_ :float = 1 / sqrt(2 ) , ):
'''simple docstring'''
snake_case_ : List[str] = tau * frequency / samplerate
snake_case_ : int = sin(lowerCAmelCase__ )
snake_case_ : int = cos(lowerCAmelCase__ )
snake_case_ : int = _sin / (2 * q_factor)
snake_case_ : Dict = 10 ** (gain_db / 40)
snake_case_ : Optional[Any] = (big_a + 1) - (big_a - 1) * _cos
snake_case_ : Optional[Any] = (big_a + 1) + (big_a - 1) * _cos
snake_case_ : Optional[Any] = (big_a - 1) - (big_a + 1) * _cos
snake_case_ : Tuple = (big_a - 1) + (big_a + 1) * _cos
snake_case_ : Optional[Any] = 2 * sqrt(lowerCAmelCase__ ) * alpha
snake_case_ : List[Any] = big_a * (pmc + aaa)
snake_case_ : Union[str, Any] = 2 * big_a * mpc
snake_case_ : List[str] = big_a * (pmc - aaa)
snake_case_ : str = ppmc + aaa
snake_case_ : int = -2 * pmpc
snake_case_ : List[Any] = ppmc - aaa
snake_case_ : Tuple = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def UpperCAmelCase ( lowerCamelCase_ :int , lowerCamelCase_ :int , lowerCamelCase_ :float , lowerCamelCase_ :float = 1 / sqrt(2 ) , ):
'''simple docstring'''
snake_case_ : Tuple = tau * frequency / samplerate
snake_case_ : Tuple = sin(lowerCAmelCase__ )
snake_case_ : Dict = cos(lowerCAmelCase__ )
snake_case_ : Optional[int] = _sin / (2 * q_factor)
snake_case_ : Optional[Any] = 10 ** (gain_db / 40)
snake_case_ : Union[str, Any] = (big_a + 1) - (big_a - 1) * _cos
snake_case_ : Optional[int] = (big_a + 1) + (big_a - 1) * _cos
snake_case_ : Optional[Any] = (big_a - 1) - (big_a + 1) * _cos
snake_case_ : Union[str, Any] = (big_a - 1) + (big_a + 1) * _cos
snake_case_ : int = 2 * sqrt(lowerCAmelCase__ ) * alpha
snake_case_ : Optional[int] = big_a * (ppmc + aaa)
snake_case_ : List[str] = -2 * big_a * pmpc
snake_case_ : Tuple = big_a * (ppmc - aaa)
snake_case_ : List[str] = pmc + aaa
snake_case_ : Dict = 2 * mpc
snake_case_ : Optional[Any] = pmc - aaa
snake_case_ : List[str] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt | 334 |
"""simple docstring"""
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
lowercase_ = 1_6
lowercase_ = 3_2
def lowercase ( lowerCAmelCase__ : Accelerator , lowerCAmelCase__ : int = 16 , lowerCAmelCase__ : str = "bert-base-cased" ) -> Optional[int]:
__a = AutoTokenizer.from_pretrained(lowerCAmelCase__ )
__a = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(lowerCAmelCase__ : Optional[Any] ):
# max_length=None => use the model max length (it's actually the default)
__a = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=lowerCAmelCase__ , max_length=lowerCAmelCase__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
__a = datasets.map(
lowerCAmelCase__ , batched=lowerCAmelCase__ , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , load_from_cache_file=lowerCAmelCase__ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__a = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(lowerCAmelCase__ : int ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(lowerCAmelCase__ , padding='''max_length''' , max_length=128 , return_tensors='''pt''' )
return tokenizer.pad(lowerCAmelCase__ , padding='''longest''' , return_tensors='''pt''' )
# Instantiate dataloaders.
__a = DataLoader(
tokenized_datasets['''train'''] , shuffle=lowerCAmelCase__ , collate_fn=lowerCAmelCase__ , batch_size=lowerCAmelCase__ )
__a = DataLoader(
tokenized_datasets['''validation'''] , shuffle=lowerCAmelCase__ , collate_fn=lowerCAmelCase__ , batch_size=lowerCAmelCase__ )
return train_dataloader, eval_dataloader
def lowercase ( lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Union[str, Any] ) -> Optional[int]:
# Initialize accelerator
__a = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__a = config['''lr''']
__a = int(config['''num_epochs'''] )
__a = int(config['''seed'''] )
__a = int(config['''batch_size'''] )
__a = args.model_name_or_path
set_seed(lowerCAmelCase__ )
__a , __a = get_dataloaders(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__a = AutoModelForSequenceClassification.from_pretrained(lowerCAmelCase__ , return_dict=lowerCAmelCase__ )
# Instantiate optimizer
__a = (
AdamW
if accelerator.state.deepspeed_plugin is None
or '''optimizer''' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
__a = optimizer_cls(params=model.parameters() , lr=lowerCAmelCase__ )
if accelerator.state.deepspeed_plugin is not None:
__a = accelerator.state.deepspeed_plugin.deepspeed_config[
'''gradient_accumulation_steps'''
]
else:
__a = 1
__a = (len(lowerCAmelCase__ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
__a = get_linear_schedule_with_warmup(
optimizer=lowerCAmelCase__ , num_warmup_steps=0 , num_training_steps=lowerCAmelCase__ , )
else:
__a = DummyScheduler(lowerCAmelCase__ , total_num_steps=lowerCAmelCase__ , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__a , __a , __a , __a , __a = accelerator.prepare(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# We need to keep track of how many total steps we have iterated over
__a = 0
# We also need to keep track of the stating epoch so files are named properly
__a = 0
# Now we train the model
__a = evaluate.load('''glue''' , '''mrpc''' )
__a = 0
__a = {}
for epoch in range(lowerCAmelCase__ , lowerCAmelCase__ ):
model.train()
for step, batch in enumerate(lowerCAmelCase__ ):
__a = model(**lowerCAmelCase__ )
__a = outputs.loss
__a = loss / gradient_accumulation_steps
accelerator.backward(lowerCAmelCase__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
model.eval()
__a = 0
for step, batch in enumerate(lowerCAmelCase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__a = model(**lowerCAmelCase__ )
__a = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
__a , __a = accelerator.gather(
(predictions, batch['''labels''']) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(lowerCAmelCase__ ) - 1:
__a = predictions[: len(eval_dataloader.dataset ) - samples_seen]
__a = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=lowerCAmelCase__ , references=lowerCAmelCase__ , )
__a = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'''epoch {epoch}:''' , lowerCAmelCase__ )
__a = eval_metric['''accuracy''']
if best_performance < eval_metric["accuracy"]:
__a = eval_metric['''accuracy''']
if args.performance_lower_bound is not None:
assert (
args.performance_lower_bound <= best_performance
), f'''Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}'''
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , '''all_results.json''' ) , '''w''' ) as f:
json.dump(lowerCAmelCase__ , lowerCAmelCase__ )
def lowercase ( ) -> List[str]:
__a = argparse.ArgumentParser(description='''Simple example of training script tracking peak GPU memory usage.''' )
parser.add_argument(
'''--model_name_or_path''' , type=lowerCAmelCase__ , default='''bert-base-cased''' , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , required=lowerCAmelCase__ , )
parser.add_argument(
'''--output_dir''' , type=lowerCAmelCase__ , default='''.''' , help='''Optional save directory where all checkpoint folders will be stored. Default is the current working directory.''' , )
parser.add_argument(
'''--performance_lower_bound''' , type=lowerCAmelCase__ , default=lowerCAmelCase__ , help='''Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value.''' , )
parser.add_argument(
'''--num_epochs''' , type=lowerCAmelCase__ , default=3 , help='''Number of train epochs.''' , )
__a = parser.parse_args()
__a = {'''lr''': 2e-5, '''num_epochs''': args.num_epochs, '''seed''': 42, '''batch_size''': 16}
training_function(lowerCAmelCase__ , lowerCAmelCase__ )
if __name__ == "__main__":
main()
| 695 | 0 |
'''simple docstring'''
def UpperCamelCase ( _lowerCamelCase : str , _lowerCamelCase : int ):
return [sentence[i : i + ngram_size] for i in range(len(lowerCAmelCase__ ) - ngram_size + 1 )]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 440 |
"""simple docstring"""
from typing import Any
def lowercase ( lowerCAmelCase__ : list , lowerCAmelCase__ : list , lowerCAmelCase__ : dict , lowerCAmelCase__ : dict , lowerCAmelCase__ : dict , ) -> list:
_validation(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , )
# Creates data structures and fill initial step
__a = {}
__a = {}
for state in states_space:
__a = observations_space[0]
__a = (
initial_probabilities[state] * emission_probabilities[state][observation]
)
__a = None
# Fills the data structure with the probabilities of
# different transitions and pointers to previous states
for o in range(1 , len(lowerCAmelCase__ ) ):
__a = observations_space[o]
__a = observations_space[o - 1]
for state in states_space:
# Calculates the argmax for probability function
__a = ''''''
__a = -1
for k_state in states_space:
__a = (
probabilities[(k_state, prior_observation)]
* transition_probabilities[k_state][state]
* emission_probabilities[state][observation]
)
if probability > max_probability:
__a = probability
__a = k_state
# Update probabilities and pointers dicts
__a = (
probabilities[(arg_max, prior_observation)]
* transition_probabilities[arg_max][state]
* emission_probabilities[state][observation]
)
__a = arg_max
# The final observation
__a = observations_space[len(lowerCAmelCase__ ) - 1]
# argmax for given final observation
__a = ''''''
__a = -1
for k_state in states_space:
__a = probabilities[(k_state, final_observation)]
if probability > max_probability:
__a = probability
__a = k_state
__a = arg_max
# Process pointers backwards
__a = last_state
__a = []
for o in range(len(lowerCAmelCase__ ) - 1 , -1 , -1 ):
result.append(lowerCAmelCase__ )
__a = pointers[previous, observations_space[o]]
result.reverse()
return result
def lowercase ( lowerCAmelCase__ : Any , lowerCAmelCase__ : Any , lowerCAmelCase__ : Any , lowerCAmelCase__ : Any , lowerCAmelCase__ : Any , ) -> None:
_validate_not_empty(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , )
_validate_lists(lowerCAmelCase__ , lowerCAmelCase__ )
_validate_dicts(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def lowercase ( lowerCAmelCase__ : Any , lowerCAmelCase__ : Any , lowerCAmelCase__ : Any , lowerCAmelCase__ : Any , lowerCAmelCase__ : Any , ) -> None:
if not all(
[
observations_space,
states_space,
initial_probabilities,
transition_probabilities,
emission_probabilities,
] ):
raise ValueError('''There\'s an empty parameter''' )
def lowercase ( lowerCAmelCase__ : Any , lowerCAmelCase__ : Any ) -> None:
_validate_list(lowerCAmelCase__ , '''observations_space''' )
_validate_list(lowerCAmelCase__ , '''states_space''' )
def lowercase ( lowerCAmelCase__ : Any , lowerCAmelCase__ : str ) -> None:
if not isinstance(_object , lowerCAmelCase__ ):
__a = f'''{var_name} must be a list'''
raise ValueError(lowerCAmelCase__ )
else:
for x in _object:
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
__a = f'''{var_name} must be a list of strings'''
raise ValueError(lowerCAmelCase__ )
def lowercase ( lowerCAmelCase__ : Any , lowerCAmelCase__ : Any , lowerCAmelCase__ : Any , ) -> None:
_validate_dict(lowerCAmelCase__ , '''initial_probabilities''' , lowerCAmelCase__ )
_validate_nested_dict(lowerCAmelCase__ , '''transition_probabilities''' )
_validate_nested_dict(lowerCAmelCase__ , '''emission_probabilities''' )
def lowercase ( lowerCAmelCase__ : Any , lowerCAmelCase__ : str ) -> None:
_validate_dict(_object , lowerCAmelCase__ , lowerCAmelCase__ )
for x in _object.values():
_validate_dict(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def lowercase ( lowerCAmelCase__ : Any , lowerCAmelCase__ : str , lowerCAmelCase__ : type , lowerCAmelCase__ : bool = False ) -> None:
if not isinstance(_object , lowerCAmelCase__ ):
__a = f'''{var_name} must be a dict'''
raise ValueError(lowerCAmelCase__ )
if not all(isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) for x in _object ):
__a = f'''{var_name} all keys must be strings'''
raise ValueError(lowerCAmelCase__ )
if not all(isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) for x in _object.values() ):
__a = '''nested dictionary ''' if nested else ''''''
__a = f'''{var_name} {nested_text}all values must be {value_type.__name__}'''
raise ValueError(lowerCAmelCase__ )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 695 | 0 |
'''simple docstring'''
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartaaTokenizer, MBartaaTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from ...test_tokenization_common import TokenizerTesterMixin
_UpperCAmelCase : int = get_tests_dir('''fixtures/test_sentencepiece.model''')
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
_UpperCAmelCase : List[Any] = 25_00_04
_UpperCAmelCase : Dict = 25_00_20
@require_sentencepiece
@require_tokenizers
class lowercase_ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
__lowerCAmelCase = MBartaaTokenizer
__lowerCAmelCase = MBartaaTokenizerFast
__lowerCAmelCase = True
__lowerCAmelCase = True
def __UpperCAmelCase ( self : List[str] ) -> Dict:
super().setUp()
# We have a SentencePiece fixture for testing
_A = MBartaaTokenizer(_a, src_lang='en_XX', tgt_lang='ro_RO', keep_accents=_a )
tokenizer.save_pretrained(self.tmpdirname )
def __UpperCAmelCase ( self : List[Any] ) -> str:
_A = '<s>'
_A = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_a ), _a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_a ), _a )
def __UpperCAmelCase ( self : Optional[Any] ) -> Tuple:
_A = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0], '<s>' )
self.assertEqual(vocab_keys[1], '<pad>' )
self.assertEqual(vocab_keys[-1], '<mask>' )
self.assertEqual(len(_a ), 10_54 )
def __UpperCAmelCase ( self : List[Any] ) -> int:
self.assertEqual(self.get_tokenizer().vocab_size, 10_54 )
def __UpperCAmelCase ( self : int ) -> Union[str, Any]:
_A = MBartaaTokenizer(_a, src_lang='en_XX', tgt_lang='ro_RO', keep_accents=_a )
_A = tokenizer.tokenize('This is a test' )
self.assertListEqual(_a, ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_a ), [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]], )
_A = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
_a, [SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '9', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', 'é', '.'], )
_A = tokenizer.convert_tokens_to_ids(_a )
self.assertListEqual(
_a, [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
], )
_A = tokenizer.convert_ids_to_tokens(_a )
self.assertListEqual(
_a, [SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '<unk>', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', '<unk>', '.'], )
@slow
def __UpperCAmelCase ( self : Dict ) -> Optional[Any]:
# fmt: off
_A = {'input_ids': [[25_00_04, 1_10_62, 8_27_72, 7, 15, 8_27_72, 5_38, 5_15_29, 2_37, 1_71_98, 12_90, 2_06, 9, 21_51_75, 13_14, 1_36, 1_71_98, 12_90, 2_06, 9, 5_63_59, 42, 12_20_09, 9, 1_64_66, 16, 8_73_44, 45_37, 9, 47_17, 7_83_81, 6, 15_99_58, 7, 15, 2_44_80, 6_18, 4, 5_27, 2_26_93, 54_28, 4, 27_77, 2_44_80, 98_74, 4, 4_35_23, 5_94, 4, 8_03, 1_83_92, 3_31_89, 18, 4, 4_35_23, 2_44_47, 1_23_99, 1_00, 2_49_55, 8_36_58, 96_26, 14_40_57, 15, 8_39, 2_23_35, 16, 1_36, 2_49_55, 8_36_58, 8_34_79, 15, 3_91_02, 7_24, 16, 6_78, 6_45, 27_89, 13_28, 45_89, 42, 12_20_09, 11_57_74, 23, 8_05, 13_28, 4_68_76, 7, 1_36, 5_38_94, 19_40, 4_22_27, 4_11_59, 1_77_21, 8_23, 4_25, 4, 2_75_12, 9_87_22, 2_06, 1_36, 55_31, 49_70, 9_19, 1_73_36, 5, 2], [25_00_04, 2_00_80, 6_18, 83, 8_27_75, 47, 4_79, 9, 15_17, 73, 5_38_94, 3_33, 8_05_81, 11_01_17, 1_88_11, 52_56, 12_95, 51, 15_25_26, 2_97, 79_86, 3_90, 12_44_16, 5_38, 3_54_31, 2_14, 98, 1_50_44, 2_57_37, 1_36, 71_08, 4_37_01, 23, 7_56, 13_53_55, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [25_00_04, 5_81, 6_37_73, 11_94_55, 6, 14_77_97, 8_82_03, 7, 6_45, 70, 21, 32_85, 1_02_69, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_a, model_name='facebook/mbart-large-50', revision='d3913889c59cd5c9e456b269c376325eabad57e2', )
def __UpperCAmelCase ( self : Union[str, Any] ) -> Tuple:
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
_A = (self.rust_tokenizer_class, 'hf-internal-testing/tiny-random-mbart50', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
_A = self.rust_tokenizer_class.from_pretrained(_a, **_a )
_A = self.tokenizer_class.from_pretrained(_a, **_a )
_A = tempfile.mkdtemp()
_A = tokenizer_r.save_pretrained(_a )
_A = tokenizer_p.save_pretrained(_a )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
_A = tuple(f for f in tokenizer_r_files if 'tokenizer.json' not in f )
self.assertSequenceEqual(_a, _a )
# Checks everything loads correctly in the same way
_A = tokenizer_r.from_pretrained(_a )
_A = tokenizer_p.from_pretrained(_a )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_a, _a ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(_a )
# Save tokenizer rust, legacy_format=True
_A = tempfile.mkdtemp()
_A = tokenizer_r.save_pretrained(_a, legacy_format=_a )
_A = tokenizer_p.save_pretrained(_a )
# Checks it save with the same files
self.assertSequenceEqual(_a, _a )
# Checks everything loads correctly in the same way
_A = tokenizer_r.from_pretrained(_a )
_A = tokenizer_p.from_pretrained(_a )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_a, _a ) )
shutil.rmtree(_a )
# Save tokenizer rust, legacy_format=False
_A = tempfile.mkdtemp()
_A = tokenizer_r.save_pretrained(_a, legacy_format=_a )
_A = tokenizer_p.save_pretrained(_a )
# Checks it saved the tokenizer.json file
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
_A = tokenizer_r.from_pretrained(_a )
_A = tokenizer_p.from_pretrained(_a )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_a, _a ) )
shutil.rmtree(_a )
@require_torch
@require_sentencepiece
@require_tokenizers
class lowercase_ ( unittest.TestCase ):
"""simple docstring"""
__lowerCAmelCase = 'facebook/mbart-large-50-one-to-many-mmt'
__lowerCAmelCase = [
' UN Chief Says There Is No Military Solution in Syria',
' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.',
]
__lowerCAmelCase = [
'Şeful ONU declară că nu există o soluţie militară în Siria',
'Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei'
' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor'
' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.',
]
__lowerCAmelCase = [EN_CODE, 8_2_7_4, 1_2_7_8_7_3, 2_5_9_1_6, 7, 8_6_2_2, 2_0_7_1, 4_3_8, 6_7_4_8_5, 5_3, 1_8_7_8_9_5, 2_3, 5_1_7_1_2, 2]
@classmethod
def __UpperCAmelCase ( cls : Tuple ) -> int:
_A = MBartaaTokenizer.from_pretrained(
cls.checkpoint_name, src_lang='en_XX', tgt_lang='ro_RO' )
_A = 1
return cls
def __UpperCAmelCase ( self : Optional[Any] ) -> str:
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ar_AR'], 25_00_01 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['en_EN'], 25_00_04 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ro_RO'], 25_00_20 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['mr_IN'], 25_00_38 )
def __UpperCAmelCase ( self : Any ) -> Tuple:
_A = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens, _a )
def __UpperCAmelCase ( self : str ) -> Dict:
self.assertIn(_a, self.tokenizer.all_special_ids )
_A = [RO_CODE, 8_84, 90_19, 96, 9, 9_16, 8_67_92, 36, 1_87_43, 1_55_96, 5, 2]
_A = self.tokenizer.decode(_a, skip_special_tokens=_a )
_A = self.tokenizer.decode(generated_ids[1:], skip_special_tokens=_a )
self.assertEqual(_a, _a )
self.assertNotIn(self.tokenizer.eos_token, _a )
def __UpperCAmelCase ( self : Any ) -> List[str]:
_A = ['this is gunna be a long sentence ' * 20]
assert isinstance(src_text[0], _a )
_A = 10
_A = self.tokenizer(_a, max_length=_a, truncation=_a ).input_ids[0]
self.assertEqual(ids[0], _a )
self.assertEqual(ids[-1], 2 )
self.assertEqual(len(_a ), _a )
def __UpperCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['<mask>', 'ar_AR'] ), [25_00_53, 25_00_01] )
def __UpperCAmelCase ( self : Union[str, Any] ) -> List[Any]:
_A = tempfile.mkdtemp()
_A = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(_a )
_A = MBartaaTokenizer.from_pretrained(_a )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids, _a )
@require_torch
def __UpperCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]:
_A = self.tokenizer(self.src_text, text_target=self.tgt_text, padding=_a, return_tensors='pt' )
_A = shift_tokens_right(batch['labels'], self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == RO_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2].tolist() == [2, RO_CODE]
@require_torch
def __UpperCAmelCase ( self : Tuple ) -> int:
_A = self.tokenizer(
self.src_text, text_target=self.tgt_text, padding=_a, truncation=_a, max_length=len(self.expected_src_tokens ), return_tensors='pt', )
_A = shift_tokens_right(batch['labels'], self.tokenizer.pad_token_id )
self.assertIsInstance(_a, _a )
self.assertEqual((2, 14), batch.input_ids.shape )
self.assertEqual((2, 14), batch.attention_mask.shape )
_A = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens, _a )
self.assertEqual(2, batch.decoder_input_ids[0, 0] ) # decoder_start_token_id
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens, [EN_CODE] )
self.assertEqual(self.tokenizer.suffix_tokens, [self.tokenizer.eos_token_id] )
def __UpperCAmelCase ( self : Any ) -> Dict:
_A = self.tokenizer(self.src_text, padding=_a, truncation=_a, max_length=3, return_tensors='pt' )
_A = self.tokenizer(
text_target=self.tgt_text, padding=_a, truncation=_a, max_length=10, return_tensors='pt' )
_A = targets['input_ids']
_A = shift_tokens_right(_a, self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1], 3 )
self.assertEqual(batch.decoder_input_ids.shape[1], 10 )
@require_torch
def __UpperCAmelCase ( self : Optional[int] ) -> List[str]:
_A = self.tokenizer._build_translation_inputs(
'A test', return_tensors='pt', src_lang='en_XX', tgt_lang='ar_AR' )
self.assertEqual(
nested_simplify(_a ), {
# en_XX, A, test, EOS
'input_ids': [[25_00_04, 62, 30_34, 2]],
'attention_mask': [[1, 1, 1, 1]],
# ar_AR
'forced_bos_token_id': 25_00_01,
}, )
| 107 |
"""simple docstring"""
import math
def lowercase ( lowerCAmelCase__ : int ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(lowerCAmelCase__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def lowercase ( lowerCAmelCase__ : float = 0.1 ) -> int:
__a = 3
__a = 3
while primes / (2 * j - 1) >= ratio:
for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1 ):
primes += is_prime(lowerCAmelCase__ )
j += 2
return j
if __name__ == "__main__":
import doctest
doctest.testmod()
| 695 | 0 |
from jiwer import compute_measures
import datasets
a_ : Dict = '\\n@inproceedings{inproceedings,\n author = {Morris, Andrew and Maier, Viktoria and Green, Phil},\n year = {2004},\n month = {01},\n pages = {},\n title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}\n}\n'
a_ : Dict = '\\nWord error rate (WER) is a common metric of the performance of an automatic speech recognition system.\n\nThe general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.\n\nThis problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate.\n\nWord error rate can then be computed as:\n\nWER = (S + D + I) / N = (S + D + I) / (S + D + C)\n\nwhere\n\nS is the number of substitutions,\nD is the number of deletions,\nI is the number of insertions,\nC is the number of correct words,\nN is the number of words in the reference (N=S+D+C).\n\nThis value indicates the average number of errors per reference word. The lower the value, the better the\nperformance of the ASR system with a WER of 0 being a perfect score.\n'
a_ : int = '\nCompute WER score of transcribed segments against references.\n\nArgs:\n references: List of references for each speech input.\n predictions: List of transcriptions to score.\n concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively.\n\nReturns:\n (float): the word error rate\n\nExamples:\n\n >>> predictions = [\"this is the prediction\", \"there is an other sample\"]\n >>> references = [\"this is the reference\", \"there is another one\"]\n >>> wer = datasets.load_metric(\"wer\")\n >>> wer_score = wer.compute(predictions=predictions, references=references)\n >>> print(wer_score)\n 0.5\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __UpperCamelCase ( datasets.Metric ):
"""simple docstring"""
def _UpperCAmelCase ( self ) -> int:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/jitsi/jiwer/'''] , reference_urls=[
'''https://en.wikipedia.org/wiki/Word_error_rate''',
] , )
def _UpperCAmelCase ( self , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=False ) -> Optional[Any]:
if concatenate_texts:
return compute_measures(_a , _a )["wer"]
else:
a__ = 0
a__ = 0
for prediction, reference in zip(_a , _a ):
a__ = compute_measures(_a , _a )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 194 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase_ = {
"configuration_mctct": ["MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP", "MCTCTConfig"],
"feature_extraction_mctct": ["MCTCTFeatureExtractor"],
"processing_mctct": ["MCTCTProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST",
"MCTCTForCTC",
"MCTCTModel",
"MCTCTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 695 | 0 |
'''simple docstring'''
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def __A ( ) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase : Any = HfArgumentParser(lowerCAmelCase__ )
_UpperCamelCase : int = parser.parse_args_into_dataclasses()[0]
_UpperCamelCase : List[Any] = TensorFlowBenchmark(args=lowerCAmelCase__ )
try:
_UpperCamelCase : Optional[int] = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
_UpperCamelCase : Optional[int] = "Arg --no_{0} is no longer used, please use --no-{0} instead."
_UpperCamelCase : Union[str, Any] = " ".join(str(lowerCAmelCase__ ).split(" " )[:-1] )
_UpperCamelCase : int = ""
_UpperCamelCase : Any = eval(str(lowerCAmelCase__ ).split(" " )[-1] )
_UpperCamelCase : Dict = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(lowerCAmelCase__ )
if len(lowerCAmelCase__ ) > 0:
_UpperCamelCase : Optional[int] = full_error_msg + begin_error_msg + str(lowerCAmelCase__ )
raise ValueError(lowerCAmelCase__ )
benchmark.run()
if __name__ == "__main__":
main()
| 435 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json"
),
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json"
),
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json"
),
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = 'dpr'
def __init__( self , _a=30_522 , _a=768 , _a=12 , _a=12 , _a=3_072 , _a="gelu" , _a=0.1 , _a=0.1 , _a=512 , _a=2 , _a=0.02 , _a=1E-12 , _a=0 , _a="absolute" , _a = 0 , **_a , ):
super().__init__(pad_token_id=_a , **_a )
__a = vocab_size
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = hidden_act
__a = intermediate_size
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = max_position_embeddings
__a = type_vocab_size
__a = initializer_range
__a = layer_norm_eps
__a = projection_dim
__a = position_embedding_type
| 695 | 0 |
'''simple docstring'''
def lowercase__( _UpperCamelCase : str )-> List[str]:
"""simple docstring"""
return [
{
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
},
{
0: [6],
1: [9],
2: [4, 5],
3: [4],
4: [2, 3],
5: [2],
6: [0, 7],
7: [6],
8: [],
9: [1],
},
{
0: [4],
1: [6],
2: [],
3: [5, 6, 7],
4: [0, 6],
5: [3, 8, 9],
6: [1, 3, 4, 7],
7: [3, 6, 8, 9],
8: [5, 7],
9: [5, 7],
},
{
0: [1, 3],
1: [0, 2, 4],
2: [1, 3, 4],
3: [0, 2, 4],
4: [1, 2, 3],
},
][index]
def lowercase__( _UpperCamelCase : dict[int, list[int]] )-> list[tuple[int, int]]:
"""simple docstring"""
_UpperCamelCase = 0
_UpperCamelCase = len(lowerCAmelCase__ ) # No of vertices in graph
_UpperCamelCase = [0] * n
_UpperCamelCase = [False] * n
def dfs(_UpperCamelCase : Dict , _UpperCamelCase : int , _UpperCamelCase : Any , _UpperCamelCase : Optional[int] ):
_UpperCamelCase = True
_UpperCamelCase = id_
id_ += 1
for to in graph[at]:
if to == parent:
pass
elif not visited[to]:
dfs(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , id_ )
_UpperCamelCase = min(low[at] , low[to] )
if id_ <= low[to]:
bridges.append((at, to) if at < to else (to, at) )
else:
# This edge is a back edge and cannot be a bridge
_UpperCamelCase = min(low[at] , low[to] )
_UpperCamelCase = []
for i in range(lowerCAmelCase__ ):
if not visited[i]:
dfs(lowerCAmelCase__ , -1 , lowerCAmelCase__ , id_ )
return bridges
if __name__ == "__main__":
import doctest
doctest.testmod()
| 138 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = StableDiffusionInpaintPipeline
__UpperCAmelCase : int = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
__UpperCAmelCase : str = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
__UpperCAmelCase : int = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
__UpperCAmelCase : Tuple = frozenset([] )
def __UpperCAmelCase ( self ):
torch.manual_seed(0 )
__a = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=9 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=_a , )
__a = PNDMScheduler(skip_prk_steps=_a )
torch.manual_seed(0 )
__a = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
__a = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act='''gelu''' , projection_dim=512 , )
__a = CLIPTextModel(_a )
__a = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
__a = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def __UpperCAmelCase ( self , _a , _a=0 ):
# TODO: use tensor inputs instead of PIL, this is here just to leave the old expected_slices untouched
__a = floats_tensor((1, 3, 32, 32) , rng=random.Random(_a ) ).to(_a )
__a = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__a = Image.fromarray(np.uinta(_a ) ).convert('''RGB''' ).resize((64, 64) )
__a = Image.fromarray(np.uinta(image + 4 ) ).convert('''RGB''' ).resize((64, 64) )
if str(_a ).startswith('''mps''' ):
__a = torch.manual_seed(_a )
else:
__a = torch.Generator(device=_a ).manual_seed(_a )
__a = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': init_image,
'''mask_image''': mask_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def __UpperCAmelCase ( self ):
__a = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__a = self.get_dummy_components()
__a = StableDiffusionInpaintPipeline(**_a )
__a = sd_pipe.to(_a )
sd_pipe.set_progress_bar_config(disable=_a )
__a = self.get_dummy_inputs(_a )
__a = sd_pipe(**_a ).images
__a = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__a = np.array([0.4727, 0.5735, 0.3941, 0.5446, 0.5926, 0.4394, 0.5062, 0.4654, 0.4476] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __UpperCAmelCase ( self ):
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCAmelCase ( self ):
__a = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''' )
__a = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' )
__a = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'''
'''/yellow_cat_sitting_on_a_park_bench.npy''' )
__a = '''stabilityai/stable-diffusion-2-inpainting'''
__a = StableDiffusionInpaintPipeline.from_pretrained(_a , safety_checker=_a )
pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
pipe.enable_attention_slicing()
__a = '''Face of a yellow cat, high resolution, sitting on a park bench'''
__a = torch.manual_seed(0 )
__a = pipe(
prompt=_a , image=_a , mask_image=_a , generator=_a , output_type='''np''' , )
__a = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 9E-3
def __UpperCAmelCase ( self ):
__a = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''' )
__a = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' )
__a = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'''
'''/yellow_cat_sitting_on_a_park_bench_fp16.npy''' )
__a = '''stabilityai/stable-diffusion-2-inpainting'''
__a = StableDiffusionInpaintPipeline.from_pretrained(
_a , torch_dtype=torch.floataa , safety_checker=_a , )
pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
pipe.enable_attention_slicing()
__a = '''Face of a yellow cat, high resolution, sitting on a park bench'''
__a = torch.manual_seed(0 )
__a = pipe(
prompt=_a , image=_a , mask_image=_a , generator=_a , output_type='''np''' , )
__a = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 5E-1
def __UpperCAmelCase ( self ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__a = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''' )
__a = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' )
__a = '''stabilityai/stable-diffusion-2-inpainting'''
__a = PNDMScheduler.from_pretrained(_a , subfolder='''scheduler''' )
__a = StableDiffusionInpaintPipeline.from_pretrained(
_a , safety_checker=_a , scheduler=_a , torch_dtype=torch.floataa , )
pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
__a = '''Face of a yellow cat, high resolution, sitting on a park bench'''
__a = torch.manual_seed(0 )
__a = pipe(
prompt=_a , image=_a , mask_image=_a , generator=_a , num_inference_steps=2 , output_type='''np''' , )
__a = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.65 * 10**9
| 695 | 0 |
"""simple docstring"""
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Image
from .base import TaskTemplate
@dataclass(frozen=__SCREAMING_SNAKE_CASE )
class __UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase : str = field(default="image-classification", metadata={"include_in_asdict_even_if_is_default": True} )
lowercase : ClassVar[Features] = Features({"image": Image()} )
lowercase : ClassVar[Features] = Features({"labels": ClassLabel} )
lowercase : str = "image"
lowercase : str = "labels"
def UpperCamelCase_ ( self , _A ):
'''simple docstring'''
if self.label_column not in features:
raise ValueError(f"""Column {self.label_column} is not present in features.""" )
if not isinstance(features[self.label_column] , _a ):
raise ValueError(f"""Column {self.label_column} is not a ClassLabel.""" )
_SCREAMING_SNAKE_CASE =copy.deepcopy(self )
_SCREAMING_SNAKE_CASE =self.label_schema.copy()
_SCREAMING_SNAKE_CASE =features[self.label_column]
_SCREAMING_SNAKE_CASE =label_schema
return task_template
@property
def UpperCamelCase_ ( self ):
'''simple docstring'''
return {
self.image_column: "image",
self.label_column: "labels",
}
| 255 |
"""simple docstring"""
import inspect
import os
import unittest
from dataclasses import dataclass
import torch
from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs
from accelerate.state import AcceleratorState
from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu
from accelerate.utils import KwargsHandler
@dataclass
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : int = 0
__UpperCAmelCase : bool = False
__UpperCAmelCase : float = 3.0
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self ):
# If no defaults are changed, `to_kwargs` returns an empty dict.
self.assertDictEqual(MockClass().to_kwargs() , {} )
self.assertDictEqual(MockClass(a=2 ).to_kwargs() , {'''a''': 2} )
self.assertDictEqual(MockClass(a=2 , b=_a ).to_kwargs() , {'''a''': 2, '''b''': True} )
self.assertDictEqual(MockClass(a=2 , c=2.25 ).to_kwargs() , {'''a''': 2, '''c''': 2.25} )
@require_cuda
def __UpperCAmelCase ( self ):
# If no defaults are changed, `to_kwargs` returns an empty dict.
__a = GradScalerKwargs(init_scale=1_024 , growth_factor=2 )
AcceleratorState._reset_state()
__a = Accelerator(mixed_precision='''fp16''' , kwargs_handlers=[scaler_handler] )
print(accelerator.use_fpaa )
__a = accelerator.scaler
# Check the kwargs have been applied
self.assertEqual(scaler._init_scale , 1024.0 )
self.assertEqual(scaler._growth_factor , 2.0 )
# Check the other values are at the default
self.assertEqual(scaler._backoff_factor , 0.5 )
self.assertEqual(scaler._growth_interval , 2_000 )
self.assertEqual(scaler._enabled , _a )
@require_multi_gpu
def __UpperCAmelCase ( self ):
__a = ['''torchrun''', f'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__ )]
execute_subprocess_async(_a , env=os.environ.copy() )
if __name__ == "__main__":
lowercase_ = DistributedDataParallelKwargs(bucket_cap_mb=1_5, find_unused_parameters=True)
lowercase_ = Accelerator(kwargs_handlers=[ddp_scaler])
lowercase_ = torch.nn.Linear(1_0_0, 2_0_0)
lowercase_ = accelerator.prepare(model)
# Check the values changed in kwargs
lowercase_ = ""
lowercase_ = model.bucket_bytes_cap // (1_0_2_4 * 1_0_2_4)
if observed_bucket_cap_map != 1_5:
error_msg += F"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n"
if model.find_unused_parameters is not True:
error_msg += F"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n"
# Check the values of the defaults
if model.dim != 0:
error_msg += F"Default value not respected, should have `0` but found {model.dim}.\n"
if model.broadcast_buffers is not True:
error_msg += F"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n"
if model.gradient_as_bucket_view is not False:
error_msg += F"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n"
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 695 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase : List[str] = logging.get_logger(__name__)
lowercase : List[str] = {}
class _a (__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCAmelCase_ : List[Any] = 'llama'
lowerCAmelCase_ : List[Any] = ['past_key_values']
def __init__( self ,__a=32_000 ,__a=4_096 ,__a=11_008 ,__a=32 ,__a=32 ,__a=None ,__a="silu" ,__a=2_048 ,__a=0.02 ,__a=1E-6 ,__a=True ,__a=0 ,__a=1 ,__a=2 ,__a=1 ,__a=False ,__a=None ,**__a ,) -> str:
snake_case : List[str] = vocab_size
snake_case : Optional[Any] = max_position_embeddings
snake_case : Union[str, Any] = hidden_size
snake_case : Tuple = intermediate_size
snake_case : int = num_hidden_layers
snake_case : Union[str, Any] = num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
snake_case : Tuple = num_attention_heads
snake_case : Any = num_key_value_heads
snake_case : Optional[Any] = hidden_act
snake_case : Any = initializer_range
snake_case : List[str] = rms_norm_eps
snake_case : Union[str, Any] = pretraining_tp
snake_case : Optional[int] = use_cache
snake_case : Optional[int] = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=_a ,bos_token_id=_a ,eos_token_id=_a ,tie_word_embeddings=_a ,**_a ,)
def snake_case_ ( self ) -> Optional[int]:
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling ,_a ) or len(self.rope_scaling ) != 2:
raise ValueError(
"""`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, """
F'''got {self.rope_scaling}''' )
snake_case : str = self.rope_scaling.get("""type""" ,_a )
snake_case : str = self.rope_scaling.get("""factor""" ,_a )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F'''`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}''' )
if rope_scaling_factor is None or not isinstance(_a ,_a ) or rope_scaling_factor <= 1.0:
raise ValueError(F'''`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}''' )
| 116 |
"""simple docstring"""
import inspect
import os
import sys
import unittest
import accelerate
from accelerate.test_utils import execute_subprocess_async, require_tpu
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self ):
__a = inspect.getfile(accelerate.test_utils )
__a = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_script.py'''] )
__a = os.path.sep.join(inspect.getfile(self.__class__ ).split(os.path.sep )[:-1] )
@require_tpu
def __UpperCAmelCase ( self ):
__a = f'''
{self.test_dir}/xla_spawn.py
--num_cores 8
{self.test_file_path}
'''.split()
__a = [sys.executable] + distributed_args
execute_subprocess_async(_a , env=os.environ.copy() )
| 695 | 0 |
"""simple docstring"""
import inspect
import os
import sys
import unittest
import accelerate
from accelerate.test_utils import execute_subprocess_async, require_tpu
class snake_case__ ( unittest.TestCase ):
def __lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase : Any = inspect.getfile(accelerate.test_utils )
UpperCAmelCase : Any = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "test_script.py"] )
UpperCAmelCase : Union[str, Any] = os.path.sep.join(inspect.getfile(self.__class__ ).split(os.path.sep )[:-1] )
@require_tpu
def __lowerCAmelCase ( self : Dict ):
'''simple docstring'''
UpperCAmelCase : List[str] = f"""
{self.test_dir}/xla_spawn.py
--num_cores 8
{self.test_file_path}
""".split()
UpperCAmelCase : List[Any] = [sys.executable] + distributed_args
execute_subprocess_async(_a , env=os.environ.copy() )
| 595 |
"""simple docstring"""
import os
import unittest
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
BertTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : str = BertTokenizer
__UpperCAmelCase : Optional[Any] = BertTokenizerFast
__UpperCAmelCase : str = True
__UpperCAmelCase : Tuple = True
__UpperCAmelCase : Any = filter_non_english
def __UpperCAmelCase ( self ):
super().setUp()
__a = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
__a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def __UpperCAmelCase ( self , _a ):
__a = '''UNwant\u00E9d,running'''
__a = '''unwanted, running'''
return input_text, output_text
def __UpperCAmelCase ( self ):
__a = self.tokenizer_class(self.vocab_file )
__a = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(_a , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_a ) , [9, 6, 7, 12, 10, 11] )
def __UpperCAmelCase ( self ):
if not self.test_rust_tokenizer:
return
__a = self.get_tokenizer()
__a = self.get_rust_tokenizer()
__a = '''UNwant\u00E9d,running'''
__a = tokenizer.tokenize(_a )
__a = rust_tokenizer.tokenize(_a )
self.assertListEqual(_a , _a )
__a = tokenizer.encode(_a , add_special_tokens=_a )
__a = rust_tokenizer.encode(_a , add_special_tokens=_a )
self.assertListEqual(_a , _a )
__a = self.get_rust_tokenizer()
__a = tokenizer.encode(_a )
__a = rust_tokenizer.encode(_a )
self.assertListEqual(_a , _a )
# With lower casing
__a = self.get_tokenizer(do_lower_case=_a )
__a = self.get_rust_tokenizer(do_lower_case=_a )
__a = '''UNwant\u00E9d,running'''
__a = tokenizer.tokenize(_a )
__a = rust_tokenizer.tokenize(_a )
self.assertListEqual(_a , _a )
__a = tokenizer.encode(_a , add_special_tokens=_a )
__a = rust_tokenizer.encode(_a , add_special_tokens=_a )
self.assertListEqual(_a , _a )
__a = self.get_rust_tokenizer()
__a = tokenizer.encode(_a )
__a = rust_tokenizer.encode(_a )
self.assertListEqual(_a , _a )
def __UpperCAmelCase ( self ):
__a = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize('''ah\u535A\u63A8zz''' ) , ['''ah''', '''\u535A''', '''\u63A8''', '''zz'''] )
def __UpperCAmelCase ( self ):
__a = BasicTokenizer(do_lower_case=_a )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def __UpperCAmelCase ( self ):
__a = BasicTokenizer(do_lower_case=_a , strip_accents=_a )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hällo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''h\u00E9llo'''] )
def __UpperCAmelCase ( self ):
__a = BasicTokenizer(do_lower_case=_a , strip_accents=_a )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def __UpperCAmelCase ( self ):
__a = BasicTokenizer(do_lower_case=_a )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def __UpperCAmelCase ( self ):
__a = BasicTokenizer(do_lower_case=_a )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def __UpperCAmelCase ( self ):
__a = BasicTokenizer(do_lower_case=_a , strip_accents=_a )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HäLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def __UpperCAmelCase ( self ):
__a = BasicTokenizer(do_lower_case=_a , strip_accents=_a )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HaLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def __UpperCAmelCase ( self ):
__a = BasicTokenizer(do_lower_case=_a , never_split=['''[UNK]'''] )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? [UNK]''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?''', '''[UNK]'''] )
def __UpperCAmelCase ( self ):
__a = BasicTokenizer()
__a = '''a\n\'ll !!to?\'d of, can\'t.'''
__a = ['''a''', '''\'''', '''ll''', '''!''', '''!''', '''to''', '''?''', '''\'''', '''d''', '''of''', ''',''', '''can''', '''\'''', '''t''', '''.''']
self.assertListEqual(tokenizer.tokenize(_a ) , _a )
def __UpperCAmelCase ( self ):
__a = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''']
__a = {}
for i, token in enumerate(_a ):
__a = i
__a = WordpieceTokenizer(vocab=_a , unk_token='''[UNK]''' )
self.assertListEqual(tokenizer.tokenize('''''' ) , [] )
self.assertListEqual(tokenizer.tokenize('''unwanted running''' ) , ['''un''', '''##want''', '''##ed''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.tokenize('''unwantedX running''' ) , ['''[UNK]''', '''runn''', '''##ing'''] )
def __UpperCAmelCase ( self ):
self.assertTrue(_is_whitespace(''' ''' ) )
self.assertTrue(_is_whitespace('''\t''' ) )
self.assertTrue(_is_whitespace('''\r''' ) )
self.assertTrue(_is_whitespace('''\n''' ) )
self.assertTrue(_is_whitespace('''\u00A0''' ) )
self.assertFalse(_is_whitespace('''A''' ) )
self.assertFalse(_is_whitespace('''-''' ) )
def __UpperCAmelCase ( self ):
self.assertTrue(_is_control('''\u0005''' ) )
self.assertFalse(_is_control('''A''' ) )
self.assertFalse(_is_control(''' ''' ) )
self.assertFalse(_is_control('''\t''' ) )
self.assertFalse(_is_control('''\r''' ) )
def __UpperCAmelCase ( self ):
self.assertTrue(_is_punctuation('''-''' ) )
self.assertTrue(_is_punctuation('''$''' ) )
self.assertTrue(_is_punctuation('''`''' ) )
self.assertTrue(_is_punctuation('''.''' ) )
self.assertFalse(_is_punctuation('''A''' ) )
self.assertFalse(_is_punctuation(''' ''' ) )
def __UpperCAmelCase ( self ):
__a = self.get_tokenizer()
__a = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(_a ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] )
self.assertListEqual(
[rust_tokenizer.tokenize(_a ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] )
@slow
def __UpperCAmelCase ( self ):
__a = self.tokenizer_class.from_pretrained('''bert-base-uncased''' )
__a = tokenizer.encode('''sequence builders''' , add_special_tokens=_a )
__a = tokenizer.encode('''multi-sequence build''' , add_special_tokens=_a )
__a = tokenizer.build_inputs_with_special_tokens(_a )
__a = tokenizer.build_inputs_with_special_tokens(_a , _a )
assert encoded_sentence == [101] + text + [102]
assert encoded_pair == [101] + text + [102] + text_a + [102]
def __UpperCAmelCase ( self ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
__a = self.rust_tokenizer_class.from_pretrained(_a , **_a )
__a = f'''A, naïve {tokenizer_r.mask_token} AllenNLP sentence.'''
__a = tokenizer_r.encode_plus(
_a , return_attention_mask=_a , return_token_type_ids=_a , return_offsets_mapping=_a , add_special_tokens=_a , )
__a = tokenizer_r.do_lower_case if hasattr(_a , '''do_lower_case''' ) else False
__a = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), '''A'''),
((1, 2), ''','''),
((3, 5), '''na'''),
((5, 6), '''##ï'''),
((6, 8), '''##ve'''),
((9, 15), tokenizer_r.mask_token),
((16, 21), '''Allen'''),
((21, 23), '''##NL'''),
((23, 24), '''##P'''),
((25, 33), '''sentence'''),
((33, 34), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), '''a'''),
((1, 2), ''','''),
((3, 8), '''naive'''),
((9, 15), tokenizer_r.mask_token),
((16, 21), '''allen'''),
((21, 23), '''##nl'''),
((23, 24), '''##p'''),
((25, 33), '''sentence'''),
((33, 34), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['''input_ids'''] ) )
self.assertEqual([e[0] for e in expected_results] , tokens['''offset_mapping'''] )
def __UpperCAmelCase ( self ):
__a = ['''的''', '''人''', '''有''']
__a = ''''''.join(_a )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
__a = True
__a = self.tokenizer_class.from_pretrained(_a , **_a )
__a = self.rust_tokenizer_class.from_pretrained(_a , **_a )
__a = tokenizer_p.encode(_a , add_special_tokens=_a )
__a = tokenizer_r.encode(_a , add_special_tokens=_a )
__a = tokenizer_r.convert_ids_to_tokens(_a )
__a = tokenizer_p.convert_ids_to_tokens(_a )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(_a , _a )
self.assertListEqual(_a , _a )
__a = False
__a = self.rust_tokenizer_class.from_pretrained(_a , **_a )
__a = self.tokenizer_class.from_pretrained(_a , **_a )
__a = tokenizer_r.encode(_a , add_special_tokens=_a )
__a = tokenizer_p.encode(_a , add_special_tokens=_a )
__a = tokenizer_r.convert_ids_to_tokens(_a )
__a = tokenizer_p.convert_ids_to_tokens(_a )
# it is expected that only the first Chinese character is not preceded by "##".
__a = [
f'''##{token}''' if idx != 0 else token for idx, token in enumerate(_a )
]
self.assertListEqual(_a , _a )
self.assertListEqual(_a , _a )
| 695 | 0 |
from transformers import DistilBertTokenizer, DistilBertTokenizerFast
from transformers.testing_utils import require_tokenizers, slow
from ..bert.test_tokenization_bert import BertTokenizationTest
@require_tokenizers
class lowercase ( __SCREAMING_SNAKE_CASE ):
_a = DistilBertTokenizer
_a = DistilBertTokenizerFast
_a = True
@slow
def a__ ( self ) -> List[str]:
_A : Tuple = DistilBertTokenizer.from_pretrained("""distilbert-base-uncased""" )
_A : List[Any] = tokenizer.encode("""sequence builders""" , add_special_tokens=_a )
_A : Union[str, Any] = tokenizer.encode("""multi-sequence build""" , add_special_tokens=_a )
_A : Tuple = tokenizer.build_inputs_with_special_tokens(_a )
_A : Optional[Any] = tokenizer.build_inputs_with_special_tokens(_a , _a )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
| 307 |
"""simple docstring"""
from __future__ import annotations
def lowercase ( lowerCAmelCase__ : float , lowerCAmelCase__ : float , lowerCAmelCase__ : float ) -> float:
if days_between_payments <= 0:
raise ValueError('''days_between_payments must be > 0''' )
if daily_interest_rate < 0:
raise ValueError('''daily_interest_rate must be >= 0''' )
if principal <= 0:
raise ValueError('''principal must be > 0''' )
return principal * daily_interest_rate * days_between_payments
def lowercase ( lowerCAmelCase__ : float , lowerCAmelCase__ : float , lowerCAmelCase__ : float , ) -> float:
if number_of_compounding_periods <= 0:
raise ValueError('''number_of_compounding_periods must be > 0''' )
if nominal_annual_interest_rate_percentage < 0:
raise ValueError('''nominal_annual_interest_rate_percentage must be >= 0''' )
if principal <= 0:
raise ValueError('''principal must be > 0''' )
return principal * (
(1 + nominal_annual_interest_rate_percentage) ** number_of_compounding_periods
- 1
)
def lowercase ( lowerCAmelCase__ : float , lowerCAmelCase__ : float , lowerCAmelCase__ : float , ) -> float:
if number_of_years <= 0:
raise ValueError('''number_of_years must be > 0''' )
if nominal_annual_percentage_rate < 0:
raise ValueError('''nominal_annual_percentage_rate must be >= 0''' )
if principal <= 0:
raise ValueError('''principal must be > 0''' )
return compound_interest(
lowerCAmelCase__ , nominal_annual_percentage_rate / 365 , number_of_years * 365 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 695 | 0 |
"""simple docstring"""
def lowercase__ ( lowerCAmelCase : int ) -> "list[int]":
"""simple docstring"""
if upper_limit < 0:
raise ValueError('Limit for the Catalan sequence must be ≥ 0' )
UpperCAmelCase = [0] * (upper_limit + 1)
# Base case: C(0) = C(1) = 1
UpperCAmelCase = 1
if upper_limit > 0:
UpperCAmelCase = 1
# Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i
for i in range(2 , upper_limit + 1 ):
for j in range(lowerCAmelCase__ ):
catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1]
return catalan_list
if __name__ == "__main__":
print('''\n********* Catalan Numbers Using Dynamic Programming ************\n''')
print('''\n*** Enter -1 at any time to quit ***''')
print('''\nEnter the upper limit (≥ 0) for the Catalan number sequence: ''', end='''''')
try:
while True:
SCREAMING_SNAKE_CASE_ = int(input().strip())
if N < 0:
print('''\n********* Goodbye!! ************''')
break
else:
print(F'The Catalan numbers from 0 through {N} are:')
print(catalan_numbers(N))
print('''Try another upper limit for the sequence: ''', end='''''')
except (NameError, ValueError):
print('''\n********* Invalid input, goodbye! ************\n''')
import doctest
doctest.testmod()
| 373 |
"""simple docstring"""
def lowercase ( lowerCAmelCase__ : Any , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Any=False ) -> Any:
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
__a = len(set_a.intersection(lowerCAmelCase__ ) )
if alternative_union:
__a = len(lowerCAmelCase__ ) + len(lowerCAmelCase__ )
else:
__a = len(set_a.union(lowerCAmelCase__ ) )
return intersection / union
if isinstance(lowerCAmelCase__ , (list, tuple) ) and isinstance(lowerCAmelCase__ , (list, tuple) ):
__a = [element for element in set_a if element in set_b]
if alternative_union:
__a = len(lowerCAmelCase__ ) + len(lowerCAmelCase__ )
return len(lowerCAmelCase__ ) / union
else:
__a = set_a + [element for element in set_b if element not in set_a]
return len(lowerCAmelCase__ ) / len(lowerCAmelCase__ )
return len(lowerCAmelCase__ ) / len(lowerCAmelCase__ )
return None
if __name__ == "__main__":
lowercase_ = {"a", "b", "c", "d", "e"}
lowercase_ = {"c", "d", "e", "f", "h", "i"}
print(jaccard_similarity(set_a, set_b))
| 695 | 0 |
'''simple docstring'''
def UpperCAmelCase ( ):
'''simple docstring'''
return [list(range(10_00 - i , -10_00 - i , -1 ) ) for i in range(10_00 )]
__A : str = generate_large_matrix()
__A : Tuple = (
[[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]],
[[3, 2], [1, 0]],
[[7, 7, 6]],
[[7, 7, 6], [-1, -2, -3]],
grid,
)
def UpperCAmelCase ( lowerCamelCase_ :list[list[int]] ):
'''simple docstring'''
assert all(row == sorted(lowerCAmelCase__ , reverse=lowerCAmelCase__ ) for row in grid )
assert all(list(lowerCAmelCase__ ) == sorted(lowerCAmelCase__ , reverse=lowerCAmelCase__ ) for col in zip(*lowerCAmelCase__ ) )
def UpperCAmelCase ( lowerCamelCase_ :list[int] ):
'''simple docstring'''
snake_case_ : str = 0
snake_case_ : Union[str, Any] = len(lowerCAmelCase__ ) - 1
# Edge cases such as no values or all numbers are negative.
if not array or array[0] < 0:
return 0
while right + 1 > left:
snake_case_ : Any = (left + right) // 2
snake_case_ : Tuple = array[mid]
# Num must be negative and the index must be greater than or equal to 0.
if num < 0 and array[mid - 1] >= 0:
return mid
if num >= 0:
snake_case_ : List[str] = mid + 1
else:
snake_case_ : List[Any] = mid - 1
# No negative numbers so return the last index of the array + 1 which is the length.
return len(lowerCAmelCase__ )
def UpperCAmelCase ( lowerCamelCase_ :list[list[int]] ):
'''simple docstring'''
snake_case_ : List[Any] = 0
snake_case_ : str = len(grid[0] )
for i in range(len(lowerCAmelCase__ ) ):
snake_case_ : Optional[Any] = find_negative_index(grid[i][:bound] )
total += bound
return (len(lowerCAmelCase__ ) * len(grid[0] )) - total
def UpperCAmelCase ( lowerCamelCase_ :list[list[int]] ):
'''simple docstring'''
return len([number for row in grid for number in row if number < 0] )
def UpperCAmelCase ( lowerCamelCase_ :list[list[int]] ):
'''simple docstring'''
snake_case_ : List[Any] = 0
for row in grid:
for i, number in enumerate(lowerCAmelCase__ ):
if number < 0:
total += len(lowerCAmelCase__ ) - i
break
return total
def UpperCAmelCase ( ):
'''simple docstring'''
from timeit import timeit
print("""Running benchmarks""" )
snake_case_ : List[Any] = (
"""from __main__ import count_negatives_binary_search, """
"""count_negatives_brute_force, count_negatives_brute_force_with_break, grid"""
)
for func in (
"count_negatives_binary_search", # took 0.7727 seconds
"count_negatives_brute_force_with_break", # took 4.6505 seconds
"count_negatives_brute_force", # took 12.8160 seconds
):
snake_case_ : List[Any] = timeit(F'''{func}(grid=grid)''' , setup=lowerCAmelCase__ , number=5_00 )
print(F'''{func}() took {time:0.4f} seconds''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark() | 334 |
"""simple docstring"""
from __future__ import annotations
import requests
def lowercase ( lowerCAmelCase__ : str ) -> dict:
__a = f'''https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty'''
return requests.get(lowerCAmelCase__ ).json()
def lowercase ( lowerCAmelCase__ : int = 10 ) -> list[dict]:
__a = '''https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty'''
__a = requests.get(lowerCAmelCase__ ).json()[:max_stories]
return [get_hackernews_story(lowerCAmelCase__ ) for story_id in story_ids]
def lowercase ( lowerCAmelCase__ : int = 10 ) -> str:
__a = hackernews_top_stories(lowerCAmelCase__ )
return "\n".join('''* [{title}]({url})'''.format(**lowerCAmelCase__ ) for story in stories )
if __name__ == "__main__":
print(hackernews_top_stories_as_markdown())
| 695 | 0 |
'''simple docstring'''
import json
import os
from typing import Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__lowerCAmelCase : Dict =logging.get_logger(__name__)
__lowerCAmelCase : Optional[int] ={"vocab_file": "vocab.json"}
__lowerCAmelCase : Optional[int] ={
"vocab_file": {
"mgp-str": "https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json",
}
}
__lowerCAmelCase : List[Any] ={"mgp-str": 27}
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
__lowercase = VOCAB_FILES_NAMES
__lowercase = PRETRAINED_VOCAB_FILES_MAP
__lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self :List[Any] , lowercase_ :Tuple , lowercase_ :Tuple="[GO]" , lowercase_ :str="[GO]" , lowercase_ :Optional[Any]="[s]" , lowercase_ :List[str]="[GO]" , **lowercase_ :int )-> List[Any]:
super().__init__(
unk_token=_a , bos_token=_a , eos_token=_a , pad_token=_a , **_a , )
with open(_a , encoding="utf-8" ) as vocab_handle:
A__ = json.load(_a )
A__ = {v: k for k, v in self.vocab.items()}
@property
def UpperCAmelCase_ ( self :Union[str, Any] )-> Union[str, Any]:
return len(self.vocab )
def UpperCAmelCase_ ( self :Optional[int] )-> Union[str, Any]:
return dict(self.vocab , **self.added_tokens_encoder )
def UpperCAmelCase_ ( self :int , lowercase_ :Optional[Any] )-> Dict:
A__ = []
for s in text:
char_tokens.extend(_a )
return char_tokens
def UpperCAmelCase_ ( self :str , lowercase_ :str )-> Tuple:
return self.vocab.get(_a , self.vocab.get(self.unk_token ) )
def UpperCAmelCase_ ( self :List[str] , lowercase_ :str )-> Any:
return self.decoder.get(_a )
def UpperCAmelCase_ ( self :Tuple , lowercase_ :int , lowercase_ :Union[str, Any] = None )-> List[str]:
if not os.path.isdir(_a ):
logger.error("Vocabulary path ({}) should be a directory".format(_a ) )
return
A__ = os.path.join(
_a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
with open(_a , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.vocab , indent=2 , sort_keys=_a , ensure_ascii=_a ) + "\n" )
return (vocab_file,)
| 440 |
"""simple docstring"""
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"salesforce/blip2-opt-2.7b": "https://huggingface.co/salesforce/blip2-opt-2.7b/resolve/main/config.json",
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = 'blip_2_vision_model'
def __init__( self , _a=1_408 , _a=6_144 , _a=39 , _a=16 , _a=224 , _a=14 , _a="gelu" , _a=0.0_0001 , _a=0.0 , _a=1E-10 , _a=True , **_a , ):
super().__init__(**_a )
__a = hidden_size
__a = intermediate_size
__a = num_hidden_layers
__a = num_attention_heads
__a = patch_size
__a = image_size
__a = initializer_range
__a = attention_dropout
__a = layer_norm_eps
__a = hidden_act
__a = qkv_bias
@classmethod
def __UpperCAmelCase ( cls , _a , **_a ):
cls._set_token_in_kwargs(_a )
__a , __a = cls.get_config_dict(_a , **_a )
# get the vision config dict if we are loading from Blip2Config
if config_dict.get('''model_type''' ) == "blip-2":
__a = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(_a , **_a )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : str = 'blip_2_qformer'
def __init__( self , _a=30_522 , _a=768 , _a=12 , _a=12 , _a=3_072 , _a="gelu" , _a=0.1 , _a=0.1 , _a=512 , _a=0.02 , _a=1E-12 , _a=0 , _a="absolute" , _a=2 , _a=1_408 , **_a , ):
super().__init__(pad_token_id=_a , **_a )
__a = vocab_size
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = hidden_act
__a = intermediate_size
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = max_position_embeddings
__a = initializer_range
__a = layer_norm_eps
__a = position_embedding_type
__a = cross_attention_frequency
__a = encoder_hidden_size
@classmethod
def __UpperCAmelCase ( cls , _a , **_a ):
cls._set_token_in_kwargs(_a )
__a , __a = cls.get_config_dict(_a , **_a )
# get the qformer config dict if we are loading from Blip2Config
if config_dict.get('''model_type''' ) == "blip-2":
__a = config_dict['''qformer_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(_a , **_a )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : Any = 'blip-2'
__UpperCAmelCase : List[str] = True
def __init__( self , _a=None , _a=None , _a=None , _a=32 , **_a ):
super().__init__(**_a )
if vision_config is None:
__a = {}
logger.info('''vision_config is None. initializing the Blip2VisionConfig with default values.''' )
if qformer_config is None:
__a = {}
logger.info('''qformer_config is None. Initializing the Blip2QFormerConfig with default values.''' )
if text_config is None:
__a = {}
logger.info('''text_config is None. Initializing the text config with default values (`OPTConfig`).''' )
__a = BlipaVisionConfig(**_a )
__a = BlipaQFormerConfig(**_a )
__a = text_config['''model_type'''] if '''model_type''' in text_config else '''opt'''
__a = CONFIG_MAPPING[text_model_type](**_a )
__a = self.text_config.tie_word_embeddings
__a = self.text_config.is_encoder_decoder
__a = num_query_tokens
__a = self.vision_config.hidden_size
__a = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
__a = 1.0
__a = 0.02
@classmethod
def __UpperCAmelCase ( cls , _a , _a , _a , **_a , ):
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **_a , )
def __UpperCAmelCase ( self ):
__a = copy.deepcopy(self.__dict__ )
__a = self.vision_config.to_dict()
__a = self.qformer_config.to_dict()
__a = self.text_config.to_dict()
__a = self.__class__.model_type
return output
| 695 | 0 |
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( __snake_case : int ):
_A = 0
while num > 0:
digit_sum += num % 1_0
num //= 1_0
return digit_sum
def _SCREAMING_SNAKE_CASE ( __snake_case : int = 1_0_0 ):
_A = 1
_A = 2
for i in range(2 , max_n + 1 ):
_A = pre_numerator
_A = 2 * i // 3 if i % 3 == 0 else 1
_A = cur_numerator
_A = e_cont * pre_numerator + temp
return sum_digits(lowerCAmelCase__ )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 107 |
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"microsoft/deberta-v2-xlarge": "https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json",
"microsoft/deberta-v2-xxlarge": "https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json",
"microsoft/deberta-v2-xlarge-mnli": (
"https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json"
),
"microsoft/deberta-v2-xxlarge-mnli": (
"https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json"
),
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : Dict = 'deberta-v2'
def __init__( self , _a=128_100 , _a=1_536 , _a=24 , _a=24 , _a=6_144 , _a="gelu" , _a=0.1 , _a=0.1 , _a=512 , _a=0 , _a=0.02 , _a=1E-7 , _a=False , _a=-1 , _a=0 , _a=True , _a=None , _a=0 , _a="gelu" , **_a , ):
super().__init__(**_a )
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = intermediate_size
__a = hidden_act
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = max_position_embeddings
__a = type_vocab_size
__a = initializer_range
__a = relative_attention
__a = max_relative_positions
__a = pad_token_id
__a = position_biased_input
# Backwards compatibility
if type(_a ) == str:
__a = [x.strip() for x in pos_att_type.lower().split('''|''' )]
__a = pos_att_type
__a = vocab_size
__a = layer_norm_eps
__a = kwargs.get('''pooler_hidden_size''' , _a )
__a = pooler_dropout
__a = pooler_hidden_act
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@property
def __UpperCAmelCase ( self ):
if self.task == "multiple-choice":
__a = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
__a = {0: '''batch''', 1: '''sequence'''}
if self._config.type_vocab_size > 0:
return OrderedDict(
[('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ('''token_type_ids''', dynamic_axis)] )
else:
return OrderedDict([('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis)] )
@property
def __UpperCAmelCase ( self ):
return 12
def __UpperCAmelCase ( self , _a , _a = -1 , _a = -1 , _a = -1 , _a = False , _a = None , _a = 3 , _a = 40 , _a = 40 , _a = None , ):
__a = super().generate_dummy_inputs(preprocessor=_a , framework=_a )
if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs:
del dummy_inputs["token_type_ids"]
return dummy_inputs
| 695 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import (
DiffusionPipeline,
UnCLIPImageVariationPipeline,
UnCLIPScheduler,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.pipelines.unclip.text_proj import UnCLIPTextProjModel
from diffusers.utils import floats_tensor, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, load_image, require_torch_gpu, skip_mps
from ..pipeline_params import IMAGE_VARIATION_BATCH_PARAMS, IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __UpperCamelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
_lowercase : Tuple = UnCLIPImageVariationPipeline
_lowercase : Any = IMAGE_VARIATION_PARAMS - {'height', 'width', 'guidance_scale'}
_lowercase : Any = IMAGE_VARIATION_BATCH_PARAMS
_lowercase : Optional[int] = [
'generator',
'return_dict',
'decoder_num_inference_steps',
'super_res_num_inference_steps',
]
_lowercase : List[str] = False
@property
def _UpperCAmelCase ( self ) -> int:
return 3_2
@property
def _UpperCAmelCase ( self ) -> List[str]:
return 3_2
@property
def _UpperCAmelCase ( self ) -> Tuple:
return self.time_input_dim
@property
def _UpperCAmelCase ( self ) -> Dict:
return self.time_input_dim * 4
@property
def _UpperCAmelCase ( self ) -> List[Any]:
return 1_0_0
@property
def _UpperCAmelCase ( self ) -> Union[str, Any]:
a__ = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
return tokenizer
@property
def _UpperCAmelCase ( self ) -> Any:
torch.manual_seed(0 )
a__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=3_7 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
return CLIPTextModelWithProjection(_a )
@property
def _UpperCAmelCase ( self ) -> List[str]:
torch.manual_seed(0 )
a__ = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , num_hidden_layers=5 , num_attention_heads=4 , image_size=3_2 , intermediate_size=3_7 , patch_size=1 , )
return CLIPVisionModelWithProjection(_a )
@property
def _UpperCAmelCase ( self ) -> Any:
torch.manual_seed(0 )
a__ = {
'''clip_embeddings_dim''': self.text_embedder_hidden_size,
'''time_embed_dim''': self.time_embed_dim,
'''cross_attention_dim''': self.cross_attention_dim,
}
a__ = UnCLIPTextProjModel(**_a )
return model
@property
def _UpperCAmelCase ( self ) -> Union[str, Any]:
torch.manual_seed(0 )
a__ = {
'''sample_size''': 3_2,
# RGB in channels
'''in_channels''': 3,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 6,
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': '''identity''',
}
a__ = UNetaDConditionModel(**_a )
return model
@property
def _UpperCAmelCase ( self ) -> Optional[int]:
return {
"sample_size": 6_4,
"layers_per_block": 1,
"down_block_types": ("ResnetDownsampleBlock2D", "ResnetDownsampleBlock2D"),
"up_block_types": ("ResnetUpsampleBlock2D", "ResnetUpsampleBlock2D"),
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"in_channels": 6,
"out_channels": 3,
}
@property
def _UpperCAmelCase ( self ) -> int:
torch.manual_seed(0 )
a__ = UNetaDModel(**self.dummy_super_res_kwargs )
return model
@property
def _UpperCAmelCase ( self ) -> Tuple:
# seeded differently to get different unet than `self.dummy_super_res_first`
torch.manual_seed(1 )
a__ = UNetaDModel(**self.dummy_super_res_kwargs )
return model
def _UpperCAmelCase ( self ) -> Tuple:
a__ = self.dummy_decoder
a__ = self.dummy_text_proj
a__ = self.dummy_text_encoder
a__ = self.dummy_tokenizer
a__ = self.dummy_super_res_first
a__ = self.dummy_super_res_last
a__ = UnCLIPScheduler(
variance_type='''learned_range''' , prediction_type='''epsilon''' , num_train_timesteps=1_0_0_0 , )
a__ = UnCLIPScheduler(
variance_type='''fixed_small_log''' , prediction_type='''epsilon''' , num_train_timesteps=1_0_0_0 , )
a__ = CLIPImageProcessor(crop_size=3_2 , size=3_2 )
a__ = self.dummy_image_encoder
return {
"decoder": decoder,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"text_proj": text_proj,
"feature_extractor": feature_extractor,
"image_encoder": image_encoder,
"super_res_first": super_res_first,
"super_res_last": super_res_last,
"decoder_scheduler": decoder_scheduler,
"super_res_scheduler": super_res_scheduler,
}
def _UpperCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=0 , SCREAMING_SNAKE_CASE=True ) -> Optional[int]:
a__ = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(_a ) ).to(_a )
if str(_a ).startswith('''mps''' ):
a__ = torch.manual_seed(_a )
else:
a__ = torch.Generator(device=_a ).manual_seed(_a )
if pil_image:
a__ = input_image * 0.5 + 0.5
a__ = input_image.clamp(0 , 1 )
a__ = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
a__ = DiffusionPipeline.numpy_to_pil(_a )[0]
return {
"image": input_image,
"generator": generator,
"decoder_num_inference_steps": 2,
"super_res_num_inference_steps": 2,
"output_type": "np",
}
def _UpperCAmelCase ( self ) -> Tuple:
a__ = '''cpu'''
a__ = self.get_dummy_components()
a__ = self.pipeline_class(**_a )
a__ = pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
a__ = self.get_dummy_inputs(_a , pil_image=_a )
a__ = pipe(**_a )
a__ = output.images
a__ = self.get_dummy_inputs(_a , pil_image=_a )
a__ = pipe(
**_a , return_dict=_a , )[0]
a__ = image[0, -3:, -3:, -1]
a__ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
a__ = np.array(
[
0.99_97,
0.00_02,
0.99_97,
0.99_97,
0.99_69,
0.00_23,
0.99_97,
0.99_69,
0.99_70,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def _UpperCAmelCase ( self ) -> str:
a__ = '''cpu'''
a__ = self.get_dummy_components()
a__ = self.pipeline_class(**_a )
a__ = pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
a__ = self.get_dummy_inputs(_a , pil_image=_a )
a__ = pipe(**_a )
a__ = output.images
a__ = self.get_dummy_inputs(_a , pil_image=_a )
a__ = pipe(
**_a , return_dict=_a , )[0]
a__ = image[0, -3:, -3:, -1]
a__ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
a__ = np.array([0.99_97, 0.00_03, 0.99_97, 0.99_97, 0.99_70, 0.00_24, 0.99_97, 0.99_71, 0.99_71] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def _UpperCAmelCase ( self ) -> Dict:
a__ = '''cpu'''
a__ = self.get_dummy_components()
a__ = self.pipeline_class(**_a )
a__ = pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
a__ = self.get_dummy_inputs(_a , pil_image=_a )
a__ = [
pipeline_inputs['''image'''],
pipeline_inputs['''image'''],
]
a__ = pipe(**_a )
a__ = output.images
a__ = self.get_dummy_inputs(_a , pil_image=_a )
a__ = [
tuple_pipeline_inputs['''image'''],
tuple_pipeline_inputs['''image'''],
]
a__ = pipe(
**_a , return_dict=_a , )[0]
a__ = image[0, -3:, -3:, -1]
a__ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (2, 6_4, 6_4, 3)
a__ = np.array(
[
0.99_97,
0.99_89,
0.00_08,
0.00_21,
0.99_60,
0.00_18,
0.00_14,
0.00_02,
0.99_33,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def _UpperCAmelCase ( self ) -> Dict:
a__ = torch.device('''cpu''' )
class __UpperCamelCase :
"""simple docstring"""
_lowercase : Optional[Any] = 1
a__ = self.get_dummy_components()
a__ = self.pipeline_class(**_a )
a__ = pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
a__ = torch.Generator(device=_a ).manual_seed(0 )
a__ = pipe.decoder.dtype
a__ = 1
a__ = (
batch_size,
pipe.decoder.config.in_channels,
pipe.decoder.config.sample_size,
pipe.decoder.config.sample_size,
)
a__ = pipe.prepare_latents(
_a , dtype=_a , device=_a , generator=_a , latents=_a , scheduler=DummyScheduler() )
a__ = (
batch_size,
pipe.super_res_first.config.in_channels // 2,
pipe.super_res_first.config.sample_size,
pipe.super_res_first.config.sample_size,
)
a__ = pipe.prepare_latents(
_a , dtype=_a , device=_a , generator=_a , latents=_a , scheduler=DummyScheduler() )
a__ = self.get_dummy_inputs(_a , pil_image=_a )
a__ = pipe(
**_a , decoder_latents=_a , super_res_latents=_a ).images
a__ = self.get_dummy_inputs(_a , pil_image=_a )
# Don't pass image, instead pass embedding
a__ = pipeline_inputs.pop('''image''' )
a__ = pipe.image_encoder(_a ).image_embeds
a__ = pipe(
**_a , decoder_latents=_a , super_res_latents=_a , image_embeddings=_a , ).images
# make sure passing text embeddings manually is identical
assert np.abs(img_out_a - img_out_a ).max() < 1e-4
@skip_mps
def _UpperCAmelCase ( self ) -> List[Any]:
a__ = torch_device == '''cpu'''
# Check is relaxed because there is not a torch 2.0 sliced attention added kv processor
a__ = 1e-2
self._test_attention_slicing_forward_pass(
test_max_difference=_a , expected_max_diff=_a )
@skip_mps
def _UpperCAmelCase ( self ) -> int:
a__ = torch_device == '''cpu'''
a__ = True
a__ = [
'''decoder_num_inference_steps''',
'''super_res_num_inference_steps''',
]
self._test_inference_batch_single_identical(
test_max_difference=_a , relax_max_difference=_a , additional_params_copy_to_batched_inputs=_a , )
def _UpperCAmelCase ( self ) -> Any:
a__ = [
'''decoder_num_inference_steps''',
'''super_res_num_inference_steps''',
]
if torch_device == "mps":
# TODO: MPS errors with larger batch sizes
a__ = [2, 3]
self._test_inference_batch_consistent(
batch_sizes=_a , additional_params_copy_to_batched_inputs=_a , )
else:
self._test_inference_batch_consistent(
additional_params_copy_to_batched_inputs=_a )
@skip_mps
def _UpperCAmelCase ( self ) -> int:
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def _UpperCAmelCase ( self ) -> Tuple:
return super().test_save_load_local()
@skip_mps
def _UpperCAmelCase ( self ) -> Optional[Any]:
return super().test_save_load_optional_components()
@slow
@require_torch_gpu
class __UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
def _UpperCAmelCase ( self ) -> Dict:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _UpperCAmelCase ( self ) -> int:
a__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/unclip/cat.png''' )
a__ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/unclip/karlo_v1_alpha_cat_variation_fp16.npy''' )
a__ = UnCLIPImageVariationPipeline.from_pretrained(
'''kakaobrain/karlo-v1-alpha-image-variations''' , torch_dtype=torch.floataa )
a__ = pipeline.to(_a )
pipeline.set_progress_bar_config(disable=_a )
a__ = torch.Generator(device='''cpu''' ).manual_seed(0 )
a__ = pipeline(
_a , generator=_a , output_type='''np''' , )
a__ = output.images[0]
assert image.shape == (2_5_6, 2_5_6, 3)
assert_mean_pixel_difference(_a , _a , 1_5 )
| 194 |
"""simple docstring"""
import importlib.metadata
import operator
import re
import sys
from typing import Optional
from packaging import version
lowercase_ = {
"<": operator.lt,
"<=": operator.le,
"==": operator.eq,
"!=": operator.ne,
">=": operator.ge,
">": operator.gt,
}
def lowercase ( lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : int , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Optional[Any] ) -> Dict:
if got_ver is None or want_ver is None:
raise ValueError(
f'''Unable to compare versions for {requirement}: need={want_ver} found={got_ver}. This is unusual. Consider'''
f''' reinstalling {pkg}.''' )
if not ops[op](version.parse(lowerCAmelCase__ ) , version.parse(lowerCAmelCase__ ) ):
raise ImportError(
f'''{requirement} is required for a normal functioning of this module, but found {pkg}=={got_ver}.{hint}''' )
def lowercase ( lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[str] = None ) -> None:
__a = f'''\n{hint}''' if hint is not None else ''''''
# non-versioned check
if re.match(r'''^[\w_\-\d]+$''' , lowerCAmelCase__ ):
__a , __a , __a = requirement, None, None
else:
__a = re.findall(r'''^([^!=<>\s]+)([\s!=<>]{1,2}.+)''' , lowerCAmelCase__ )
if not match:
raise ValueError(
'''requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23, but'''
f''' got {requirement}''' )
__a , __a = match[0]
__a = want_full.split(''',''' ) # there could be multiple requirements
__a = {}
for w in want_range:
__a = re.findall(r'''^([\s!=<>]{1,2})(.+)''' , lowerCAmelCase__ )
if not match:
raise ValueError(
'''requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23,'''
f''' but got {requirement}''' )
__a , __a = match[0]
__a = want_ver
if op not in ops:
raise ValueError(f'''{requirement}: need one of {list(ops.keys() )}, but got {op}''' )
# special case
if pkg == "python":
__a = '''.'''.join([str(lowerCAmelCase__ ) for x in sys.version_info[:3]] )
for op, want_ver in wanted.items():
_compare_versions(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
return
# check if any version is installed
try:
__a = importlib.metadata.version(lowerCAmelCase__ )
except importlib.metadata.PackageNotFoundError:
raise importlib.metadata.PackageNotFoundError(
f'''The \'{requirement}\' distribution was not found and is required by this application. {hint}''' )
# check that the right version is installed if version number or a range was provided
if want_ver is not None:
for op, want_ver in wanted.items():
_compare_versions(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def lowercase ( lowerCAmelCase__ : Tuple ) -> Optional[Any]:
__a = '''Try: pip install transformers -U or pip install -e \'.[dev]\' if you\'re working with git main'''
return require_version(lowerCAmelCase__ , lowerCAmelCase__ )
| 695 | 0 |
'''simple docstring'''
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pytest
from datasets.arrow_dataset import Dataset
from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex
from .utils import require_elasticsearch, require_faiss
lowerCAmelCase_ : Union[str, Any] = pytest.mark.integration
@require_faiss
class SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def snake_case__ ( self : List[str] ) ->Union[str, Any]:
'''simple docstring'''
_UpperCamelCase : Dict = Dataset.from_dict({"filename": ["my_name-train" + "_" + str(_a ) for x in np.arange(30 ).tolist()]} )
return dset
def snake_case__ ( self : List[Any] ) ->List[str]:
'''simple docstring'''
import faiss
_UpperCamelCase : Optional[Any] = self._create_dummy_dataset()
_UpperCamelCase : Optional[int] = dset.map(
lambda lowercase__ , lowercase__ : {"vecs": i * np.ones(5 , dtype=np.floataa )} , with_indices=_a , keep_in_memory=_a )
_UpperCamelCase : List[str] = dset.add_faiss_index("vecs" , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT )
_UpperCamelCase , _UpperCamelCase : Dict = dset.get_nearest_examples("vecs" , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples["filename"][0] , "my_name-train_29" )
dset.drop_index("vecs" )
def snake_case__ ( self : int ) ->Dict:
'''simple docstring'''
import faiss
_UpperCamelCase : Optional[int] = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="vecs" , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT , )
_UpperCamelCase , _UpperCamelCase : int = dset.get_nearest_examples("vecs" , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples["filename"][0] , "my_name-train_29" )
def snake_case__ ( self : List[Any] ) ->Any:
'''simple docstring'''
import faiss
_UpperCamelCase : Tuple = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="vecs" , metric_type=faiss.METRIC_INNER_PRODUCT , )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=_a ) as tmp_file:
dset.save_faiss_index("vecs" , tmp_file.name )
dset.load_faiss_index("vecs2" , tmp_file.name )
os.unlink(tmp_file.name )
_UpperCamelCase , _UpperCamelCase : str = dset.get_nearest_examples("vecs2" , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples["filename"][0] , "my_name-train_29" )
def snake_case__ ( self : int ) ->Union[str, Any]:
'''simple docstring'''
_UpperCamelCase : int = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="vecs" )
dset.drop_index("vecs" )
self.assertRaises(_a , partial(dset.get_nearest_examples , "vecs2" , np.ones(5 , dtype=np.floataa ) ) )
def snake_case__ ( self : Union[str, Any] ) ->Dict:
'''simple docstring'''
from elasticsearch import Elasticsearch
_UpperCamelCase : Tuple = self._create_dummy_dataset()
with patch("elasticsearch.Elasticsearch.search" ) as mocked_search, patch(
"elasticsearch.client.IndicesClient.create" ) as mocked_index_create, patch("elasticsearch.helpers.streaming_bulk" ) as mocked_bulk:
_UpperCamelCase : List[Any] = {"acknowledged": True}
mocked_bulk.return_value([(True, None)] * 30 )
_UpperCamelCase : Optional[int] = {"hits": {"hits": [{"_score": 1, "_id": 29}]}}
_UpperCamelCase : Optional[int] = Elasticsearch()
dset.add_elasticsearch_index("filename" , es_client=_a )
_UpperCamelCase , _UpperCamelCase : Optional[Any] = dset.get_nearest_examples("filename" , "my_name-train_29" )
self.assertEqual(examples["filename"][0] , "my_name-train_29" )
@require_faiss
class SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def snake_case__ ( self : List[Any] ) ->List[Any]:
'''simple docstring'''
import faiss
_UpperCamelCase : Union[str, Any] = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
# add vectors
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsNotNone(index.faiss_index )
self.assertEqual(index.faiss_index.ntotal , 5 )
index.add_vectors(np.zeros((5, 5) , dtype=np.floataa ) )
self.assertEqual(index.faiss_index.ntotal , 10 )
# single query
_UpperCamelCase : int = np.zeros(5 , dtype=np.floataa )
_UpperCamelCase : Any = 1
_UpperCamelCase , _UpperCamelCase : Dict = index.search(_a )
self.assertRaises(_a , index.search , query.reshape(-1 , 1 ) )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
# batched queries
_UpperCamelCase : List[Any] = np.eye(5 , dtype=np.floataa )[::-1]
_UpperCamelCase , _UpperCamelCase : int = index.search_batch(_a )
self.assertRaises(_a , index.search_batch , queries[0] )
_UpperCamelCase : List[Any] = [scores[0] for scores in total_scores]
_UpperCamelCase : List[Any] = [indices[0] for indices in total_indices]
self.assertGreater(np.min(_a ) , 0 )
self.assertListEqual([4, 3, 2, 1, 0] , _a )
def snake_case__ ( self : str ) ->str:
'''simple docstring'''
import faiss
_UpperCamelCase : Tuple = FaissIndex(string_factory="Flat" )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
_UpperCamelCase : Union[str, Any] = FaissIndex(string_factory="LSH" )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexLSH )
with self.assertRaises(_a ):
_UpperCamelCase : int = FaissIndex(string_factory="Flat" , custom_index=faiss.IndexFlat(5 ) )
def snake_case__ ( self : Optional[int] ) ->List[Any]:
'''simple docstring'''
import faiss
_UpperCamelCase : str = faiss.IndexFlat(5 )
_UpperCamelCase : Dict = FaissIndex(custom_index=_a )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
def snake_case__ ( self : List[Any] ) ->str:
'''simple docstring'''
import faiss
_UpperCamelCase : Tuple = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=_a ) as tmp_file:
index.save(tmp_file.name )
_UpperCamelCase : str = FaissIndex.load(tmp_file.name )
os.unlink(tmp_file.name )
_UpperCamelCase : str = np.zeros(5 , dtype=np.floataa )
_UpperCamelCase : Dict = 1
_UpperCamelCase , _UpperCamelCase : List[Any] = index.search(_a )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
@require_faiss
def __A ( UpperCAmelCase ) -> str:
'''simple docstring'''
import faiss
_UpperCamelCase : List[str] = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 ,dtype=np.floataa ) )
_UpperCamelCase : List[str] = "index.faiss"
_UpperCamelCase : Optional[int] = f'''mock://{index_name}'''
index.save(lowerCAmelCase__ ,storage_options=mockfs.storage_options )
_UpperCamelCase : Optional[int] = FaissIndex.load(lowerCAmelCase__ ,storage_options=mockfs.storage_options )
_UpperCamelCase : Dict = np.zeros(5 ,dtype=np.floataa )
_UpperCamelCase : Any = 1
_UpperCamelCase , _UpperCamelCase : Any = index.search(lowerCAmelCase__ )
assert scores[0] > 0
assert indices[0] == 1
@require_elasticsearch
class SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def snake_case__ ( self : Optional[Any] ) ->List[Any]:
'''simple docstring'''
from elasticsearch import Elasticsearch
with patch("elasticsearch.Elasticsearch.search" ) as mocked_search, patch(
"elasticsearch.client.IndicesClient.create" ) as mocked_index_create, patch("elasticsearch.helpers.streaming_bulk" ) as mocked_bulk:
_UpperCamelCase : Tuple = Elasticsearch()
_UpperCamelCase : List[str] = {"acknowledged": True}
_UpperCamelCase : int = ElasticSearchIndex(es_client=_a )
mocked_bulk.return_value([(True, None)] * 3 )
index.add_documents(["foo", "bar", "foobar"] )
# single query
_UpperCamelCase : Dict = "foo"
_UpperCamelCase : Optional[Any] = {"hits": {"hits": [{"_score": 1, "_id": 0}]}}
_UpperCamelCase , _UpperCamelCase : str = index.search(_a )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# single query with timeout
_UpperCamelCase : List[Any] = "foo"
_UpperCamelCase : Union[str, Any] = {"hits": {"hits": [{"_score": 1, "_id": 0}]}}
_UpperCamelCase , _UpperCamelCase : Tuple = index.search(_a , request_timeout=30 )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# batched queries
_UpperCamelCase : int = ["foo", "bar", "foobar"]
_UpperCamelCase : List[str] = {"hits": {"hits": [{"_score": 1, "_id": 1}]}}
_UpperCamelCase , _UpperCamelCase : Dict = index.search_batch(_a )
_UpperCamelCase : Optional[int] = [scores[0] for scores in total_scores]
_UpperCamelCase : Dict = [indices[0] for indices in total_indices]
self.assertGreater(np.min(_a ) , 0 )
self.assertListEqual([1, 1, 1] , _a )
# batched queries with timeout
_UpperCamelCase : Tuple = ["foo", "bar", "foobar"]
_UpperCamelCase : Union[str, Any] = {"hits": {"hits": [{"_score": 1, "_id": 1}]}}
_UpperCamelCase , _UpperCamelCase : List[str] = index.search_batch(_a , request_timeout=30 )
_UpperCamelCase : List[Any] = [scores[0] for scores in total_scores]
_UpperCamelCase : List[Any] = [indices[0] for indices in total_indices]
self.assertGreater(np.min(_a ) , 0 )
self.assertListEqual([1, 1, 1] , _a )
| 435 |
"""simple docstring"""
from __future__ import annotations
lowercase_ = list[tuple[int, int]]
lowercase_ = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
lowercase_ = ([-1, 0], [0, -1], [1, 0], [0, 1]) # up, left, down, right
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self , _a , _a , _a , _a , _a , _a , ):
__a = pos_x
__a = pos_y
__a = (pos_y, pos_x)
__a = goal_x
__a = goal_y
__a = g_cost
__a = parent
__a = self.calculate_heuristic()
def __UpperCAmelCase ( self ):
__a = abs(self.pos_x - self.goal_x )
__a = abs(self.pos_y - self.goal_y )
return dx + dy
def __lt__( self , _a ):
return self.f_cost < other.f_cost
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self , _a , _a ):
__a = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , _a )
__a = Node(goal[1] , goal[0] , goal[1] , goal[0] , 99_999 , _a )
__a = [self.start]
__a = []
__a = False
def __UpperCAmelCase ( self ):
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
__a = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
__a = True
return self.retrace_path(_a )
self.closed_nodes.append(_a )
__a = self.get_successors(_a )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(_a )
else:
# retrieve the best current path
__a = self.open_nodes.pop(self.open_nodes.index(_a ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(_a )
else:
self.open_nodes.append(_a )
if not self.reached:
return [self.start.pos]
return None
def __UpperCAmelCase ( self , _a ):
__a = []
for action in delta:
__a = parent.pos_x + action[1]
__a = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(_a ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
_a , _a , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , _a , ) )
return successors
def __UpperCAmelCase ( self , _a ):
__a = node
__a = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
__a = current_node.parent
path.reverse()
return path
if __name__ == "__main__":
lowercase_ = (0, 0)
lowercase_ = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
print("------")
lowercase_ = GreedyBestFirst(init, goal)
lowercase_ = greedy_bf.search()
if path:
for pos_x, pos_y in path:
lowercase_ = 2
for elem in grid:
print(elem)
| 695 | 0 |
'''simple docstring'''
class A_ : # Public class to implement a graph
'''simple docstring'''
def __init__( self , A_ , A_ , A_ ):
_UpperCamelCase = row
_UpperCamelCase = col
_UpperCamelCase = graph
def a ( self , A_ , A_ , A_ ):
return (
0 <= i < self.ROW
and 0 <= j < self.COL
and not visited[i][j]
and self.graph[i][j]
)
def a ( self , A_ , A_ , A_ ):
# Checking all 8 elements surrounding nth element
_UpperCamelCase = [-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order
_UpperCamelCase = [-1, 0, 1, -1, 1, -1, 0, 1]
_UpperCamelCase = True # Make those cells visited
for k in range(8 ):
if self.is_safe(i + row_nbr[k] , j + col_nbr[k] , _a ):
self.diffs(i + row_nbr[k] , j + col_nbr[k] , _a )
def a ( self ): # And finally, count all islands.
_UpperCamelCase = [[False for j in range(self.COL )] for i in range(self.ROW )]
_UpperCamelCase = 0
for i in range(self.ROW ):
for j in range(self.COL ):
if visited[i][j] is False and self.graph[i][j] == 1:
self.diffs(_a , _a , _a )
count += 1
return count
| 138 |
"""simple docstring"""
import argparse
import torch
from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert
from transformers.utils import logging
logging.set_verbosity_info()
def lowercase ( lowerCAmelCase__ : Any , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : str ) -> List[Any]:
# Initialise PyTorch model
__a = RemBertConfig.from_json_file(lowerCAmelCase__ )
print('''Building PyTorch model from configuration: {}'''.format(str(lowerCAmelCase__ ) ) )
__a = RemBertModel(lowerCAmelCase__ )
# Load weights from tf checkpoint
load_tf_weights_in_rembert(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# Save pytorch-model
print('''Save PyTorch model to {}'''.format(lowerCAmelCase__ ) )
torch.save(model.state_dict() , lowerCAmelCase__ )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--rembert_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained RemBERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
lowercase_ = parser.parse_args()
convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path)
| 695 | 0 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Generator
import requests
from bsa import BeautifulSoup
UpperCAmelCase_ : List[Any] = '''https://www.indeed.co.in/jobs?q=mobile+app+development&l='''
def _lowerCAmelCase(a : str = "mumbai" ) -> Generator[tuple[str, str], None, None]:
_SCREAMING_SNAKE_CASE =BeautifulSoup(requests.get(url + location ).content , '''html.parser''' )
# This attribute finds out all the specifics listed in a job
for job in soup.find_all('''div''' , attrs={'''data-tn-component''': '''organicJob'''} ):
_SCREAMING_SNAKE_CASE =job.find('''a''' , attrs={'''data-tn-element''': '''jobTitle'''} ).text.strip()
_SCREAMING_SNAKE_CASE =job.find('''span''' , {'''class''': '''company'''} ).text.strip()
yield job_title, company_name
if __name__ == "__main__":
for i, job in enumerate(fetch_jobs('''Bangalore'''), 1):
print(f"Job {i:>2} is {job[0]} at {job[1]}")
| 255 |
"""simple docstring"""
import tempfile
import unittest
import numpy as np
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_flax
if is_flax_available():
import os
from flax.core.frozen_dict import unfreeze
from flax.traverse_util import flatten_dict
from transformers import FlaxBertModel
lowercase_ = "0.12" # assumed parallelism: 8
@require_flax
@is_staging_test
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def __UpperCAmelCase ( cls ):
__a = TOKEN
HfFolder.save_token(_a )
@classmethod
def __UpperCAmelCase ( cls ):
try:
delete_repo(token=cls._token , repo_id='''test-model-flax''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-model-flax-org''' )
except HTTPError:
pass
def __UpperCAmelCase ( self ):
__a = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
__a = FlaxBertModel(_a )
model.push_to_hub('''test-model-flax''' , use_auth_token=self._token )
__a = FlaxBertModel.from_pretrained(f'''{USER}/test-model-flax''' )
__a = flatten_dict(unfreeze(model.params ) )
__a = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
__a = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_a , 1E-3 , msg=f'''{key} not identical''' )
# Reset repo
delete_repo(token=self._token , repo_id='''test-model-flax''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(_a , repo_id='''test-model-flax''' , push_to_hub=_a , use_auth_token=self._token )
__a = FlaxBertModel.from_pretrained(f'''{USER}/test-model-flax''' )
__a = flatten_dict(unfreeze(model.params ) )
__a = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
__a = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_a , 1E-3 , msg=f'''{key} not identical''' )
def __UpperCAmelCase ( self ):
__a = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
__a = FlaxBertModel(_a )
model.push_to_hub('''valid_org/test-model-flax-org''' , use_auth_token=self._token )
__a = FlaxBertModel.from_pretrained('''valid_org/test-model-flax-org''' )
__a = flatten_dict(unfreeze(model.params ) )
__a = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
__a = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_a , 1E-3 , msg=f'''{key} not identical''' )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-model-flax-org''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(
_a , repo_id='''valid_org/test-model-flax-org''' , push_to_hub=_a , use_auth_token=self._token )
__a = FlaxBertModel.from_pretrained('''valid_org/test-model-flax-org''' )
__a = flatten_dict(unfreeze(model.params ) )
__a = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
__a = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_a , 1E-3 , msg=f'''{key} not identical''' )
def lowercase ( lowerCAmelCase__ : str , lowerCAmelCase__ : Dict ) -> Optional[int]:
__a = True
__a = flatten_dict(modela.params )
__a = flatten_dict(modela.params )
for key in flat_params_a.keys():
if np.sum(np.abs(flat_params_a[key] - flat_params_a[key] ) ) > 1e-4:
__a = False
return models_are_equal
@require_flax
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self ):
__a = BertConfig.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' )
__a = FlaxBertModel(_a )
__a = '''bert'''
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(_a , _a ) )
with self.assertRaises(_a ):
__a = FlaxBertModel.from_pretrained(_a )
__a = FlaxBertModel.from_pretrained(_a , subfolder=_a )
self.assertTrue(check_models_equal(_a , _a ) )
def __UpperCAmelCase ( self ):
__a = BertConfig.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' )
__a = FlaxBertModel(_a )
__a = '''bert'''
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(_a , _a ) , max_shard_size='''10KB''' )
with self.assertRaises(_a ):
__a = FlaxBertModel.from_pretrained(_a )
__a = FlaxBertModel.from_pretrained(_a , subfolder=_a )
self.assertTrue(check_models_equal(_a , _a ) )
def __UpperCAmelCase ( self ):
__a = '''bert'''
__a = '''hf-internal-testing/tiny-random-bert-subfolder'''
with self.assertRaises(_a ):
__a = FlaxBertModel.from_pretrained(_a )
__a = FlaxBertModel.from_pretrained(_a , subfolder=_a )
self.assertIsNotNone(_a )
def __UpperCAmelCase ( self ):
__a = '''bert'''
__a = '''hf-internal-testing/tiny-random-bert-sharded-subfolder'''
with self.assertRaises(_a ):
__a = FlaxBertModel.from_pretrained(_a )
__a = FlaxBertModel.from_pretrained(_a , subfolder=_a )
self.assertIsNotNone(_a )
| 695 | 0 |
'''simple docstring'''
import unittest
from diffusers import FlaxAutoencoderKL
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax
from .test_modeling_common_flax import FlaxModelTesterMixin
if is_flax_available():
import jax
@require_flax
class _a (__SCREAMING_SNAKE_CASE, unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ : Dict = FlaxAutoencoderKL
@property
def snake_case_ ( self ) -> Union[str, Any]:
snake_case : List[Any] = 4
snake_case : List[Any] = 3
snake_case : Tuple = (32, 32)
snake_case : Union[str, Any] = jax.random.PRNGKey(0 )
snake_case : Dict = jax.random.uniform(_a ,((batch_size, num_channels) + sizes) )
return {"sample": image, "prng_key": prng_key}
def snake_case_ ( self ) -> Optional[Any]:
snake_case : Optional[Any] = {
"""block_out_channels""": [32, 64],
"""in_channels""": 3,
"""out_channels""": 3,
"""down_block_types""": ["""DownEncoderBlock2D""", """DownEncoderBlock2D"""],
"""up_block_types""": ["""UpDecoderBlock2D""", """UpDecoderBlock2D"""],
"""latent_channels""": 4,
}
snake_case : int = self.dummy_input
return init_dict, inputs_dict
| 116 |
"""simple docstring"""
import unittest
from diffusers.models.unet_ad_blocks import * # noqa F403
from diffusers.utils import torch_device
from .test_unet_blocks_common import UNetBlockTesterMixin
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = DownBlockaD # noqa F405
__UpperCAmelCase : Any = 'down'
def __UpperCAmelCase ( self ):
__a = [-0.0232, -0.9869, 0.8054, -0.0637, -0.1688, -1.4264, 0.4470, -1.3394, 0.0904]
super().test_output(_a )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : str = ResnetDownsampleBlockaD # noqa F405
__UpperCAmelCase : List[str] = 'down'
def __UpperCAmelCase ( self ):
__a = [0.0710, 0.2410, -0.7320, -1.0757, -1.1343, 0.3540, -0.0133, -0.2576, 0.0948]
super().test_output(_a )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] = AttnDownBlockaD # noqa F405
__UpperCAmelCase : Optional[Any] = 'down'
def __UpperCAmelCase ( self ):
__a = [0.0636, 0.8964, -0.6234, -1.0131, 0.0844, 0.4935, 0.3437, 0.0911, -0.2957]
super().test_output(_a )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = CrossAttnDownBlockaD # noqa F405
__UpperCAmelCase : Optional[Any] = 'down'
def __UpperCAmelCase ( self ):
__a , __a = super().prepare_init_args_and_inputs_for_common()
__a = 32
return init_dict, inputs_dict
def __UpperCAmelCase ( self ):
__a = [0.2238, -0.7396, -0.2255, -0.3829, 0.1925, 1.1665, 0.0603, -0.7295, 0.1983]
super().test_output(_a )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : int = SimpleCrossAttnDownBlockaD # noqa F405
__UpperCAmelCase : Any = 'down'
@property
def __UpperCAmelCase ( self ):
return super().get_dummy_input(include_encoder_hidden_states=_a )
def __UpperCAmelCase ( self ):
__a , __a = super().prepare_init_args_and_inputs_for_common()
__a = 32
return init_dict, inputs_dict
@unittest.skipIf(torch_device == '''mps''' , '''MPS result is not consistent''' )
def __UpperCAmelCase ( self ):
__a = [0.7921, -0.0992, -0.1962, -0.7695, -0.4242, 0.7804, 0.4737, 0.2765, 0.3338]
super().test_output(_a )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : int = SkipDownBlockaD # noqa F405
__UpperCAmelCase : Tuple = 'down'
@property
def __UpperCAmelCase ( self ):
return super().get_dummy_input(include_skip_sample=_a )
def __UpperCAmelCase ( self ):
__a = [-0.0845, -0.2087, -0.2465, 0.0971, 0.1900, -0.0484, 0.2664, 0.4179, 0.5069]
super().test_output(_a )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = AttnSkipDownBlockaD # noqa F405
__UpperCAmelCase : Optional[int] = 'down'
@property
def __UpperCAmelCase ( self ):
return super().get_dummy_input(include_skip_sample=_a )
def __UpperCAmelCase ( self ):
__a = [0.5539, 0.1609, 0.4924, 0.0537, -0.1995, 0.4050, 0.0979, -0.2721, -0.0642]
super().test_output(_a )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : int = DownEncoderBlockaD # noqa F405
__UpperCAmelCase : Optional[int] = 'down'
@property
def __UpperCAmelCase ( self ):
return super().get_dummy_input(include_temb=_a )
def __UpperCAmelCase ( self ):
__a = {
'''in_channels''': 32,
'''out_channels''': 32,
}
__a = self.dummy_input
return init_dict, inputs_dict
def __UpperCAmelCase ( self ):
__a = [1.1102, 0.5302, 0.4872, -0.0023, -0.8042, 0.0483, -0.3489, -0.5632, 0.7626]
super().test_output(_a )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = AttnDownEncoderBlockaD # noqa F405
__UpperCAmelCase : Any = 'down'
@property
def __UpperCAmelCase ( self ):
return super().get_dummy_input(include_temb=_a )
def __UpperCAmelCase ( self ):
__a = {
'''in_channels''': 32,
'''out_channels''': 32,
}
__a = self.dummy_input
return init_dict, inputs_dict
def __UpperCAmelCase ( self ):
__a = [0.8966, -0.1486, 0.8568, 0.8141, -0.9046, -0.1342, -0.0972, -0.7417, 0.1538]
super().test_output(_a )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : str = UNetMidBlockaD # noqa F405
__UpperCAmelCase : Any = 'mid'
def __UpperCAmelCase ( self ):
__a = {
'''in_channels''': 32,
'''temb_channels''': 128,
}
__a = self.dummy_input
return init_dict, inputs_dict
def __UpperCAmelCase ( self ):
__a = [-0.1062, 1.7248, 0.3494, 1.4569, -0.0910, -1.2421, -0.9984, 0.6736, 1.0028]
super().test_output(_a )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : str = UNetMidBlockaDCrossAttn # noqa F405
__UpperCAmelCase : str = 'mid'
def __UpperCAmelCase ( self ):
__a , __a = super().prepare_init_args_and_inputs_for_common()
__a = 32
return init_dict, inputs_dict
def __UpperCAmelCase ( self ):
__a = [0.0187, 2.4220, 0.4484, 1.1203, -0.6121, -1.5122, -0.8270, 0.7851, 1.8335]
super().test_output(_a )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : Any = UNetMidBlockaDSimpleCrossAttn # noqa F405
__UpperCAmelCase : List[Any] = 'mid'
@property
def __UpperCAmelCase ( self ):
return super().get_dummy_input(include_encoder_hidden_states=_a )
def __UpperCAmelCase ( self ):
__a , __a = super().prepare_init_args_and_inputs_for_common()
__a = 32
return init_dict, inputs_dict
def __UpperCAmelCase ( self ):
__a = [0.7143, 1.9974, 0.5448, 1.3977, 0.1282, -1.1237, -1.4238, 0.5530, 0.8880]
super().test_output(_a )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = UpBlockaD # noqa F405
__UpperCAmelCase : Union[str, Any] = 'up'
@property
def __UpperCAmelCase ( self ):
return super().get_dummy_input(include_res_hidden_states_tuple=_a )
def __UpperCAmelCase ( self ):
__a = [-0.2041, -0.4165, -0.3022, 0.0041, -0.6628, -0.7053, 0.1928, -0.0325, 0.0523]
super().test_output(_a )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : str = ResnetUpsampleBlockaD # noqa F405
__UpperCAmelCase : int = 'up'
@property
def __UpperCAmelCase ( self ):
return super().get_dummy_input(include_res_hidden_states_tuple=_a )
def __UpperCAmelCase ( self ):
__a = [0.2287, 0.3549, -0.1346, 0.4797, -0.1715, -0.9649, 0.7305, -0.5864, -0.6244]
super().test_output(_a )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : Dict = CrossAttnUpBlockaD # noqa F405
__UpperCAmelCase : List[Any] = 'up'
@property
def __UpperCAmelCase ( self ):
return super().get_dummy_input(include_res_hidden_states_tuple=_a )
def __UpperCAmelCase ( self ):
__a , __a = super().prepare_init_args_and_inputs_for_common()
__a = 32
return init_dict, inputs_dict
def __UpperCAmelCase ( self ):
__a = [-0.1403, -0.3515, -0.0420, -0.1425, 0.3167, 0.5094, -0.2181, 0.5931, 0.5582]
super().test_output(_a )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = SimpleCrossAttnUpBlockaD # noqa F405
__UpperCAmelCase : Optional[int] = 'up'
@property
def __UpperCAmelCase ( self ):
return super().get_dummy_input(include_res_hidden_states_tuple=_a , include_encoder_hidden_states=_a )
def __UpperCAmelCase ( self ):
__a , __a = super().prepare_init_args_and_inputs_for_common()
__a = 32
return init_dict, inputs_dict
def __UpperCAmelCase ( self ):
__a = [0.2645, 0.1480, 0.0909, 0.8044, -0.9758, -0.9083, 0.0994, -1.1453, -0.7402]
super().test_output(_a )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : Any = AttnUpBlockaD # noqa F405
__UpperCAmelCase : List[Any] = 'up'
@property
def __UpperCAmelCase ( self ):
return super().get_dummy_input(include_res_hidden_states_tuple=_a )
@unittest.skipIf(torch_device == '''mps''' , '''MPS result is not consistent''' )
def __UpperCAmelCase ( self ):
__a = [0.0979, 0.1326, 0.0021, 0.0659, 0.2249, 0.0059, 0.1132, 0.5952, 0.1033]
super().test_output(_a )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : Any = SkipUpBlockaD # noqa F405
__UpperCAmelCase : str = 'up'
@property
def __UpperCAmelCase ( self ):
return super().get_dummy_input(include_res_hidden_states_tuple=_a )
def __UpperCAmelCase ( self ):
__a = [-0.0893, -0.1234, -0.1506, -0.0332, 0.0123, -0.0211, 0.0566, 0.0143, 0.0362]
super().test_output(_a )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = AttnSkipUpBlockaD # noqa F405
__UpperCAmelCase : int = 'up'
@property
def __UpperCAmelCase ( self ):
return super().get_dummy_input(include_res_hidden_states_tuple=_a )
def __UpperCAmelCase ( self ):
__a = [0.0361, 0.0617, 0.2787, -0.0350, 0.0342, 0.3421, -0.0843, 0.0913, 0.3015]
super().test_output(_a )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = UpDecoderBlockaD # noqa F405
__UpperCAmelCase : List[str] = 'up'
@property
def __UpperCAmelCase ( self ):
return super().get_dummy_input(include_temb=_a )
def __UpperCAmelCase ( self ):
__a = {'''in_channels''': 32, '''out_channels''': 32}
__a = self.dummy_input
return init_dict, inputs_dict
def __UpperCAmelCase ( self ):
__a = [0.4404, 0.1998, -0.9886, -0.3320, -0.3128, -0.7034, -0.6955, -0.2338, -0.3137]
super().test_output(_a )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] = AttnUpDecoderBlockaD # noqa F405
__UpperCAmelCase : Any = 'up'
@property
def __UpperCAmelCase ( self ):
return super().get_dummy_input(include_temb=_a )
def __UpperCAmelCase ( self ):
__a = {'''in_channels''': 32, '''out_channels''': 32}
__a = self.dummy_input
return init_dict, inputs_dict
def __UpperCAmelCase ( self ):
__a = [0.6738, 0.4491, 0.1055, 1.0710, 0.7316, 0.3339, 0.3352, 0.1023, 0.3568]
super().test_output(_a )
| 695 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case_ : Tuple = logging.get_logger(__name__)
snake_case_ : Any = {"""ctrl""": """https://huggingface.co/ctrl/resolve/main/config.json"""}
class snake_case__ ( __SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE__ = 'ctrl'
SCREAMING_SNAKE_CASE__ = ['past_key_values']
SCREAMING_SNAKE_CASE__ = {
'max_position_embeddings': 'n_positions',
'hidden_size': 'n_embd',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self : List[str] , lowercase : Dict=24_65_34 , lowercase : Tuple=2_56 , lowercase : Optional[Any]=12_80 , lowercase : Tuple=81_92 , lowercase : Tuple=48 , lowercase : int=16 , lowercase : Tuple=0.1 , lowercase : Dict=0.1 , lowercase : int=1E-6 , lowercase : int=0.0_2 , lowercase : Union[str, Any]=True , **lowercase : int , ):
'''simple docstring'''
UpperCAmelCase : str = vocab_size
UpperCAmelCase : Optional[Any] = n_positions
UpperCAmelCase : Tuple = n_embd
UpperCAmelCase : Any = n_layer
UpperCAmelCase : int = n_head
UpperCAmelCase : Union[str, Any] = dff
UpperCAmelCase : List[Any] = resid_pdrop
UpperCAmelCase : Any = embd_pdrop
UpperCAmelCase : Optional[Any] = layer_norm_epsilon
UpperCAmelCase : Optional[Any] = initializer_range
UpperCAmelCase : Tuple = use_cache
super().__init__(**_a )
| 595 |
"""simple docstring"""
import copy
from typing import Dict, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
from ..detr import DetrConfig
from ..swin import SwinConfig
lowercase_ = {
"facebook/maskformer-swin-base-ade": (
"https://huggingface.co/facebook/maskformer-swin-base-ade/blob/main/config.json"
)
# See all MaskFormer models at https://huggingface.co/models?filter=maskformer
}
lowercase_ = logging.get_logger(__name__)
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : str = 'maskformer'
__UpperCAmelCase : Optional[int] = {'hidden_size': 'mask_feature_size'}
__UpperCAmelCase : Any = ['resnet', 'swin']
__UpperCAmelCase : Dict = ['detr']
def __init__( self , _a = 256 , _a = 256 , _a = 0.1 , _a = False , _a = None , _a = None , _a = 0.02 , _a = 1.0 , _a = 1.0 , _a = 1.0 , _a = 20.0 , _a = None , **_a , ):
if backbone_config is None:
# fall back to https://huggingface.co/microsoft/swin-base-patch4-window12-384-in22k
__a = SwinConfig(
image_size=384 , in_channels=3 , patch_size=4 , embed_dim=128 , depths=[2, 2, 18, 2] , num_heads=[4, 8, 16, 32] , window_size=12 , drop_path_rate=0.3 , out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] , )
if isinstance(_a , _a ):
__a = backbone_config.pop('''model_type''' )
__a = CONFIG_MAPPING[backbone_model_type]
__a = config_class.from_dict(_a )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
f'''Backbone {backbone_config.model_type} is not a supported model and may not be compatible with MaskFormer. '''
f'''Supported model types: {','.join(self.backbones_supported )}''' )
if decoder_config is None:
# fall back to https://huggingface.co/facebook/detr-resnet-50
__a = DetrConfig()
else:
# verify that the decoder is supported
__a = (
decoder_config.pop('''model_type''' ) if isinstance(_a , _a ) else decoder_config.model_type
)
if decoder_type not in self.decoders_supported:
raise ValueError(
f'''Transformer Decoder {decoder_type} not supported, please use one of'''
f''' {','.join(self.decoders_supported )}''' )
if isinstance(_a , _a ):
__a = CONFIG_MAPPING[decoder_type]
__a = config_class.from_dict(_a )
__a = backbone_config
__a = decoder_config
# main feature dimension for the model
__a = fpn_feature_size
__a = mask_feature_size
# initializer
__a = init_std
__a = init_xavier_std
# Hungarian matcher && loss
__a = cross_entropy_weight
__a = dice_weight
__a = mask_weight
__a = use_auxiliary_loss
__a = no_object_weight
__a = output_auxiliary_logits
__a = self.decoder_config.encoder_attention_heads
__a = self.decoder_config.num_hidden_layers
super().__init__(**_a )
@classmethod
def __UpperCAmelCase ( cls , _a , _a , **_a ):
return cls(
backbone_config=_a , decoder_config=_a , **_a , )
def __UpperCAmelCase ( self ):
__a = copy.deepcopy(self.__dict__ )
__a = self.backbone_config.to_dict()
__a = self.decoder_config.to_dict()
__a = self.__class__.model_type
return output
| 695 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class lowercase ( __SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,unittest.TestCase ):
_a = StableDiffusionInpaintPipeline
_a = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
_a = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
_a = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
_a = frozenset([] )
def a__ ( self ) -> Tuple:
torch.manual_seed(0 )
_A : Union[str, Any] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=9 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=_a , )
_A : Tuple = PNDMScheduler(skip_prk_steps=_a )
torch.manual_seed(0 )
_A : Dict = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
_A : Union[str, Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="""gelu""" , projection_dim=512 , )
_A : Any = CLIPTextModel(_a )
_A : Any = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
_A : Dict = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def a__ ( self , _a , _a=0 ) -> Tuple:
# TODO: use tensor inputs instead of PIL, this is here just to leave the old expected_slices untouched
_A : List[str] = floats_tensor((1, 3, 32, 32) , rng=random.Random(_a ) ).to(_a )
_A : List[str] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_A : List[str] = Image.fromarray(np.uinta(_a ) ).convert("""RGB""" ).resize((64, 64) )
_A : Optional[Any] = Image.fromarray(np.uinta(image + 4 ) ).convert("""RGB""" ).resize((64, 64) )
if str(_a ).startswith("""mps""" ):
_A : Optional[int] = torch.manual_seed(_a )
else:
_A : int = torch.Generator(device=_a ).manual_seed(_a )
_A : int = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": init_image,
"""mask_image""": mask_image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def a__ ( self ) -> str:
_A : List[str] = """cpu""" # ensure determinism for the device-dependent torch.Generator
_A : Optional[Any] = self.get_dummy_components()
_A : str = StableDiffusionInpaintPipeline(**_a )
_A : Any = sd_pipe.to(_a )
sd_pipe.set_progress_bar_config(disable=_a )
_A : Any = self.get_dummy_inputs(_a )
_A : List[str] = sd_pipe(**_a ).images
_A : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_A : Any = np.array([0.4727, 0.5735, 0.3941, 0.5446, 0.5926, 0.4394, 0.5062, 0.4654, 0.4476] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def a__ ( self ) -> Optional[int]:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class lowercase ( unittest.TestCase ):
def a__ ( self ) -> str:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a__ ( self ) -> List[str]:
_A : List[Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-inpaint/init_image.png""" )
_A : Optional[int] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" )
_A : Tuple = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint"""
"""/yellow_cat_sitting_on_a_park_bench.npy""" )
_A : Tuple = """stabilityai/stable-diffusion-2-inpainting"""
_A : List[str] = StableDiffusionInpaintPipeline.from_pretrained(_a , safety_checker=_a )
pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
pipe.enable_attention_slicing()
_A : Tuple = """Face of a yellow cat, high resolution, sitting on a park bench"""
_A : Optional[Any] = torch.manual_seed(0 )
_A : List[str] = pipe(
prompt=_a , image=_a , mask_image=_a , generator=_a , output_type="""np""" , )
_A : int = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 9e-3
def a__ ( self ) -> List[Any]:
_A : List[Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-inpaint/init_image.png""" )
_A : Optional[int] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" )
_A : str = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint"""
"""/yellow_cat_sitting_on_a_park_bench_fp16.npy""" )
_A : List[str] = """stabilityai/stable-diffusion-2-inpainting"""
_A : str = StableDiffusionInpaintPipeline.from_pretrained(
_a , torch_dtype=torch.floataa , safety_checker=_a , )
pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
pipe.enable_attention_slicing()
_A : List[Any] = """Face of a yellow cat, high resolution, sitting on a park bench"""
_A : Optional[Any] = torch.manual_seed(0 )
_A : Any = pipe(
prompt=_a , image=_a , mask_image=_a , generator=_a , output_type="""np""" , )
_A : Dict = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 5e-1
def a__ ( self ) -> List[str]:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
_A : Union[str, Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-inpaint/init_image.png""" )
_A : Any = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" )
_A : List[Any] = """stabilityai/stable-diffusion-2-inpainting"""
_A : List[str] = PNDMScheduler.from_pretrained(_a , subfolder="""scheduler""" )
_A : Optional[int] = StableDiffusionInpaintPipeline.from_pretrained(
_a , safety_checker=_a , scheduler=_a , torch_dtype=torch.floataa , )
pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
_A : List[str] = """Face of a yellow cat, high resolution, sitting on a park bench"""
_A : List[str] = torch.manual_seed(0 )
_A : List[Any] = pipe(
prompt=_a , image=_a , mask_image=_a , generator=_a , num_inference_steps=2 , output_type="""np""" , )
_A : int = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.65 * 10**9
| 307 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Generator
import requests
from bsa import BeautifulSoup
lowercase_ = "https://www.indeed.co.in/jobs?q=mobile+app+development&l="
def lowercase ( lowerCAmelCase__ : str = "mumbai" ) -> Generator[tuple[str, str], None, None]:
__a = BeautifulSoup(requests.get(url + location ).content , '''html.parser''' )
# This attribute finds out all the specifics listed in a job
for job in soup.find_all('''div''' , attrs={'''data-tn-component''': '''organicJob'''} ):
__a = job.find('''a''' , attrs={'''data-tn-element''': '''jobTitle'''} ).text.strip()
__a = job.find('''span''' , {'''class''': '''company'''} ).text.strip()
yield job_title, company_name
if __name__ == "__main__":
for i, job in enumerate(fetch_jobs("Bangalore"), 1):
print(F'''Job {i:>2} is {job[0]} at {job[1]}''')
| 695 | 0 |
"""simple docstring"""
from typing import Any
def lowercase__ ( lowerCAmelCase : list , lowerCAmelCase : list , lowerCAmelCase : dict , lowerCAmelCase : dict , lowerCAmelCase : dict , ) -> list:
"""simple docstring"""
_validation(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , )
# Creates data structures and fill initial step
UpperCAmelCase = {}
UpperCAmelCase = {}
for state in states_space:
UpperCAmelCase = observations_space[0]
UpperCAmelCase = (
initial_probabilities[state] * emission_probabilities[state][observation]
)
UpperCAmelCase = None
# Fills the data structure with the probabilities of
# different transitions and pointers to previous states
for o in range(1 , len(lowerCAmelCase__ ) ):
UpperCAmelCase = observations_space[o]
UpperCAmelCase = observations_space[o - 1]
for state in states_space:
# Calculates the argmax for probability function
UpperCAmelCase = ''
UpperCAmelCase = -1
for k_state in states_space:
UpperCAmelCase = (
probabilities[(k_state, prior_observation)]
* transition_probabilities[k_state][state]
* emission_probabilities[state][observation]
)
if probability > max_probability:
UpperCAmelCase = probability
UpperCAmelCase = k_state
# Update probabilities and pointers dicts
UpperCAmelCase = (
probabilities[(arg_max, prior_observation)]
* transition_probabilities[arg_max][state]
* emission_probabilities[state][observation]
)
UpperCAmelCase = arg_max
# The final observation
UpperCAmelCase = observations_space[len(lowerCAmelCase__ ) - 1]
# argmax for given final observation
UpperCAmelCase = ''
UpperCAmelCase = -1
for k_state in states_space:
UpperCAmelCase = probabilities[(k_state, final_observation)]
if probability > max_probability:
UpperCAmelCase = probability
UpperCAmelCase = k_state
UpperCAmelCase = arg_max
# Process pointers backwards
UpperCAmelCase = last_state
UpperCAmelCase = []
for o in range(len(lowerCAmelCase__ ) - 1 , -1 , -1 ):
result.append(lowerCAmelCase__ )
UpperCAmelCase = pointers[previous, observations_space[o]]
result.reverse()
return result
def lowercase__ ( lowerCAmelCase : Any , lowerCAmelCase : Any , lowerCAmelCase : Any , lowerCAmelCase : Any , lowerCAmelCase : Any , ) -> None:
"""simple docstring"""
_validate_not_empty(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , )
_validate_lists(lowerCAmelCase__ , lowerCAmelCase__ )
_validate_dicts(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def lowercase__ ( lowerCAmelCase : Any , lowerCAmelCase : Any , lowerCAmelCase : Any , lowerCAmelCase : Any , lowerCAmelCase : Any , ) -> None:
"""simple docstring"""
if not all(
[
observations_space,
states_space,
initial_probabilities,
transition_probabilities,
emission_probabilities,
] ):
raise ValueError('There\'s an empty parameter' )
def lowercase__ ( lowerCAmelCase : Any , lowerCAmelCase : Any ) -> None:
"""simple docstring"""
_validate_list(lowerCAmelCase__ , 'observations_space' )
_validate_list(lowerCAmelCase__ , 'states_space' )
def lowercase__ ( lowerCAmelCase : Any , lowerCAmelCase : str ) -> None:
"""simple docstring"""
if not isinstance(_object , lowerCAmelCase__ ):
UpperCAmelCase = F"{var_name} must be a list"
raise ValueError(lowerCAmelCase__ )
else:
for x in _object:
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase = F"{var_name} must be a list of strings"
raise ValueError(lowerCAmelCase__ )
def lowercase__ ( lowerCAmelCase : Any , lowerCAmelCase : Any , lowerCAmelCase : Any , ) -> None:
"""simple docstring"""
_validate_dict(lowerCAmelCase__ , 'initial_probabilities' , lowerCAmelCase__ )
_validate_nested_dict(lowerCAmelCase__ , 'transition_probabilities' )
_validate_nested_dict(lowerCAmelCase__ , 'emission_probabilities' )
def lowercase__ ( lowerCAmelCase : Any , lowerCAmelCase : str ) -> None:
"""simple docstring"""
_validate_dict(_object , lowerCAmelCase__ , lowerCAmelCase__ )
for x in _object.values():
_validate_dict(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def lowercase__ ( lowerCAmelCase : Any , lowerCAmelCase : str , lowerCAmelCase : type , lowerCAmelCase : bool = False ) -> None:
"""simple docstring"""
if not isinstance(_object , lowerCAmelCase__ ):
UpperCAmelCase = F"{var_name} must be a dict"
raise ValueError(lowerCAmelCase__ )
if not all(isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) for x in _object ):
UpperCAmelCase = F"{var_name} all keys must be strings"
raise ValueError(lowerCAmelCase__ )
if not all(isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) for x in _object.values() ):
UpperCAmelCase = 'nested dictionary ' if nested else ''
UpperCAmelCase = F"{var_name} {nested_text}all values must be {value_type.__name__}"
raise ValueError(lowerCAmelCase__ )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 373 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"bigcode/gpt_bigcode-santacoder": "https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json",
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : List[str] = 'gpt_bigcode'
__UpperCAmelCase : Tuple = ['past_key_values']
__UpperCAmelCase : Dict = {
'hidden_size': 'n_embd',
'max_position_embeddings': 'n_positions',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self , _a=50_257 , _a=1_024 , _a=768 , _a=12 , _a=12 , _a=None , _a="gelu_pytorch_tanh" , _a=0.1 , _a=0.1 , _a=0.1 , _a=1E-5 , _a=0.02 , _a=True , _a=True , _a=50_256 , _a=50_256 , _a=True , _a=True , _a=True , **_a , ):
__a = vocab_size
__a = n_positions
__a = n_embd
__a = n_layer
__a = n_head
__a = n_inner
__a = activation_function
__a = resid_pdrop
__a = embd_pdrop
__a = attn_pdrop
__a = layer_norm_epsilon
__a = initializer_range
__a = scale_attn_weights
__a = use_cache
__a = attention_softmax_in_fpaa
__a = scale_attention_softmax_in_fpaa
__a = multi_query
__a = bos_token_id
__a = eos_token_id
super().__init__(bos_token_id=_a , eos_token_id=_a , **_a )
| 695 | 0 |
'''simple docstring'''
import argparse
import torch
from transformers import (
EncodecConfig,
EncodecFeatureExtractor,
EncodecModel,
logging,
)
# checkpoints downloaded from:
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_24khz-d7cc33bc.th
# https://huggingface.co/facebook/musicgen-small/resolve/main/compression_state_dict.bin
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_48khz-7e698e3e.th
logging.set_verbosity_info()
__A : int = logging.get_logger('transformers.models.encodec')
__A : Optional[int] = {
'quantizer.vq.layers.*._codebook.inited': 'quantizer.layers.*.codebook.inited',
'quantizer.vq.layers.*._codebook.cluster_size': 'quantizer.layers.*.codebook.cluster_size',
'quantizer.vq.layers.*._codebook.embed': 'quantizer.layers.*.codebook.embed',
'quantizer.vq.layers.*._codebook.embed_avg': 'quantizer.layers.*.codebook.embed_avg',
}
__A : Tuple = {
'encoder.model.0.conv.conv': 'encoder.layers.0.conv',
'encoder.model.1.block.1.conv.conv': 'encoder.layers.1.block.1.conv',
'encoder.model.1.block.3.conv.conv': 'encoder.layers.1.block.3.conv',
'encoder.model.1.shortcut.conv.conv': 'encoder.layers.1.shortcut.conv',
'encoder.model.3.conv.conv': 'encoder.layers.3.conv',
'encoder.model.4.block.1.conv.conv': 'encoder.layers.4.block.1.conv',
'encoder.model.4.block.3.conv.conv': 'encoder.layers.4.block.3.conv',
'encoder.model.4.shortcut.conv.conv': 'encoder.layers.4.shortcut.conv',
'encoder.model.6.conv.conv': 'encoder.layers.6.conv',
'encoder.model.7.block.1.conv.conv': 'encoder.layers.7.block.1.conv',
'encoder.model.7.block.3.conv.conv': 'encoder.layers.7.block.3.conv',
'encoder.model.7.shortcut.conv.conv': 'encoder.layers.7.shortcut.conv',
'encoder.model.9.conv.conv': 'encoder.layers.9.conv',
'encoder.model.10.block.1.conv.conv': 'encoder.layers.10.block.1.conv',
'encoder.model.10.block.3.conv.conv': 'encoder.layers.10.block.3.conv',
'encoder.model.10.shortcut.conv.conv': 'encoder.layers.10.shortcut.conv',
'encoder.model.12.conv.conv': 'encoder.layers.12.conv',
'encoder.model.13.lstm': 'encoder.layers.13.lstm',
'encoder.model.15.conv.conv': 'encoder.layers.15.conv',
}
__A : Optional[int] = {
'encoder.model.0.conv.norm': 'encoder.layers.0.norm',
'encoder.model.1.block.1.conv.norm': 'encoder.layers.1.block.1.norm',
'encoder.model.1.block.3.conv.norm': 'encoder.layers.1.block.3.norm',
'encoder.model.1.shortcut.conv.norm': 'encoder.layers.1.shortcut.norm',
'encoder.model.3.conv.norm': 'encoder.layers.3.norm',
'encoder.model.4.block.1.conv.norm': 'encoder.layers.4.block.1.norm',
'encoder.model.4.block.3.conv.norm': 'encoder.layers.4.block.3.norm',
'encoder.model.4.shortcut.conv.norm': 'encoder.layers.4.shortcut.norm',
'encoder.model.6.conv.norm': 'encoder.layers.6.norm',
'encoder.model.7.block.1.conv.norm': 'encoder.layers.7.block.1.norm',
'encoder.model.7.block.3.conv.norm': 'encoder.layers.7.block.3.norm',
'encoder.model.7.shortcut.conv.norm': 'encoder.layers.7.shortcut.norm',
'encoder.model.9.conv.norm': 'encoder.layers.9.norm',
'encoder.model.10.block.1.conv.norm': 'encoder.layers.10.block.1.norm',
'encoder.model.10.block.3.conv.norm': 'encoder.layers.10.block.3.norm',
'encoder.model.10.shortcut.conv.norm': 'encoder.layers.10.shortcut.norm',
'encoder.model.12.conv.norm': 'encoder.layers.12.norm',
'encoder.model.15.conv.norm': 'encoder.layers.15.norm',
}
__A : List[Any] = {
'decoder.model.0.conv.conv': 'decoder.layers.0.conv',
'decoder.model.1.lstm': 'decoder.layers.1.lstm',
'decoder.model.3.convtr.convtr': 'decoder.layers.3.conv',
'decoder.model.4.block.1.conv.conv': 'decoder.layers.4.block.1.conv',
'decoder.model.4.block.3.conv.conv': 'decoder.layers.4.block.3.conv',
'decoder.model.4.shortcut.conv.conv': 'decoder.layers.4.shortcut.conv',
'decoder.model.6.convtr.convtr': 'decoder.layers.6.conv',
'decoder.model.7.block.1.conv.conv': 'decoder.layers.7.block.1.conv',
'decoder.model.7.block.3.conv.conv': 'decoder.layers.7.block.3.conv',
'decoder.model.7.shortcut.conv.conv': 'decoder.layers.7.shortcut.conv',
'decoder.model.9.convtr.convtr': 'decoder.layers.9.conv',
'decoder.model.10.block.1.conv.conv': 'decoder.layers.10.block.1.conv',
'decoder.model.10.block.3.conv.conv': 'decoder.layers.10.block.3.conv',
'decoder.model.10.shortcut.conv.conv': 'decoder.layers.10.shortcut.conv',
'decoder.model.12.convtr.convtr': 'decoder.layers.12.conv',
'decoder.model.13.block.1.conv.conv': 'decoder.layers.13.block.1.conv',
'decoder.model.13.block.3.conv.conv': 'decoder.layers.13.block.3.conv',
'decoder.model.13.shortcut.conv.conv': 'decoder.layers.13.shortcut.conv',
'decoder.model.15.conv.conv': 'decoder.layers.15.conv',
}
__A : Optional[Any] = {
'decoder.model.0.conv.norm': 'decoder.layers.0.norm',
'decoder.model.3.convtr.norm': 'decoder.layers.3.norm',
'decoder.model.4.block.1.conv.norm': 'decoder.layers.4.block.1.norm',
'decoder.model.4.block.3.conv.norm': 'decoder.layers.4.block.3.norm',
'decoder.model.4.shortcut.conv.norm': 'decoder.layers.4.shortcut.norm',
'decoder.model.6.convtr.norm': 'decoder.layers.6.norm',
'decoder.model.7.block.1.conv.norm': 'decoder.layers.7.block.1.norm',
'decoder.model.7.block.3.conv.norm': 'decoder.layers.7.block.3.norm',
'decoder.model.7.shortcut.conv.norm': 'decoder.layers.7.shortcut.norm',
'decoder.model.9.convtr.norm': 'decoder.layers.9.norm',
'decoder.model.10.block.1.conv.norm': 'decoder.layers.10.block.1.norm',
'decoder.model.10.block.3.conv.norm': 'decoder.layers.10.block.3.norm',
'decoder.model.10.shortcut.conv.norm': 'decoder.layers.10.shortcut.norm',
'decoder.model.12.convtr.norm': 'decoder.layers.12.norm',
'decoder.model.13.block.1.conv.norm': 'decoder.layers.13.block.1.norm',
'decoder.model.13.block.3.conv.norm': 'decoder.layers.13.block.3.norm',
'decoder.model.13.shortcut.conv.norm': 'decoder.layers.13.shortcut.norm',
'decoder.model.15.conv.norm': 'decoder.layers.15.norm',
}
__A : str = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_DECODER,
}
__A : Dict = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_ENCODER_48K,
**MAPPING_DECODER,
**MAPPING_DECODER_48K,
}
__A : List[Any] = []
__A : Any = []
def UpperCAmelCase ( lowerCamelCase_ :List[str] , lowerCamelCase_ :int , lowerCamelCase_ :List[str] , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :List[Any] ):
'''simple docstring'''
for attribute in key.split(""".""" ):
snake_case_ : Optional[Any] = getattr(lowerCAmelCase__ , lowerCAmelCase__ )
if weight_type is not None:
snake_case_ : Dict = getattr(lowerCAmelCase__ , lowerCAmelCase__ ).shape
else:
snake_case_ : Any = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}''' )
if weight_type == "weight":
snake_case_ : Any = value
elif weight_type == "weight_g":
snake_case_ : Any = value
elif weight_type == "weight_v":
snake_case_ : List[str] = value
elif weight_type == "bias":
snake_case_ : Optional[int] = value
elif weight_type == "running_mean":
snake_case_ : Dict = value
elif weight_type == "running_var":
snake_case_ : Any = value
elif weight_type == "num_batches_tracked":
snake_case_ : List[Any] = value
elif weight_type == "weight_ih_l0":
snake_case_ : Dict = value
elif weight_type == "weight_hh_l0":
snake_case_ : Optional[int] = value
elif weight_type == "bias_ih_l0":
snake_case_ : Any = value
elif weight_type == "bias_hh_l0":
snake_case_ : Dict = value
elif weight_type == "weight_ih_l1":
snake_case_ : List[str] = value
elif weight_type == "weight_hh_l1":
snake_case_ : int = value
elif weight_type == "bias_ih_l1":
snake_case_ : int = value
elif weight_type == "bias_hh_l1":
snake_case_ : int = value
else:
snake_case_ : List[Any] = value
logger.info(F'''{key + ("." + weight_type if weight_type is not None else "")} was initialized from {full_name}.''' )
def UpperCAmelCase ( lowerCamelCase_ :Dict , lowerCamelCase_ :Dict ):
'''simple docstring'''
for key in ignore_keys:
if key.endswith(""".*""" ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
snake_case_ , snake_case_ : Any = key.split(""".*.""" )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def UpperCAmelCase ( lowerCamelCase_ :List[Any] , lowerCamelCase_ :int , lowerCamelCase_ :Optional[int] ):
'''simple docstring'''
snake_case_ : Dict = []
if model_name == "encodec_24khz" or "encodec_32khz":
snake_case_ : Any = MAPPING_24K
elif model_name == "encodec_48khz":
snake_case_ : Dict = MAPPING_48K
else:
raise ValueError(F'''Unsupported model: {model_name}''' )
for name, value in orig_dict.items():
if should_ignore(lowerCAmelCase__ , lowerCAmelCase__ ):
logger.info(F'''{name} was ignored''' )
continue
snake_case_ : Union[str, Any] = False
for key, mapped_key in MAPPING.items():
if "*" in key:
snake_case_ , snake_case_ : int = key.split(""".*.""" )
if prefix in name and suffix in name:
snake_case_ : Dict = suffix
if key in name:
# HACK otherwise .embed gets initialized with .embed_avg too
if key.endswith("""embed""" ) and name.endswith("""embed_avg""" ):
continue
snake_case_ : List[str] = True
if "*" in mapped_key:
snake_case_ : List[Any] = name.split(lowerCAmelCase__ )[0].split(""".""" )[-2]
snake_case_ : Union[str, Any] = mapped_key.replace("""*""" , lowerCAmelCase__ )
if "weight_g" in name:
snake_case_ : Tuple = """weight_g"""
elif "weight_v" in name:
snake_case_ : Optional[Any] = """weight_v"""
elif "weight_ih_l0" in name:
snake_case_ : Dict = """weight_ih_l0"""
elif "weight_hh_l0" in name:
snake_case_ : Any = """weight_hh_l0"""
elif "bias_ih_l0" in name:
snake_case_ : Optional[Any] = """bias_ih_l0"""
elif "bias_hh_l0" in name:
snake_case_ : List[str] = """bias_hh_l0"""
elif "weight_ih_l1" in name:
snake_case_ : Any = """weight_ih_l1"""
elif "weight_hh_l1" in name:
snake_case_ : List[str] = """weight_hh_l1"""
elif "bias_ih_l1" in name:
snake_case_ : str = """bias_ih_l1"""
elif "bias_hh_l1" in name:
snake_case_ : Union[str, Any] = """bias_hh_l1"""
elif "bias" in name:
snake_case_ : List[str] = """bias"""
elif "weight" in name:
snake_case_ : Tuple = """weight"""
elif "running_mean" in name:
snake_case_ : Union[str, Any] = """running_mean"""
elif "running_var" in name:
snake_case_ : List[Any] = """running_var"""
elif "num_batches_tracked" in name:
snake_case_ : Optional[int] = """num_batches_tracked"""
else:
snake_case_ : str = None
set_recursively(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
continue
if not is_used:
unused_weights.append(lowerCAmelCase__ )
logger.warning(F'''Unused weights: {unused_weights}''' )
@torch.no_grad()
def UpperCAmelCase ( lowerCamelCase_ :List[Any] , lowerCamelCase_ :str , lowerCamelCase_ :Tuple , lowerCamelCase_ :Tuple=None , lowerCamelCase_ :int=None , ):
'''simple docstring'''
if config_path is not None:
snake_case_ : List[str] = EncodecConfig.from_pretrained(lowerCAmelCase__ )
else:
snake_case_ : Dict = EncodecConfig()
if model_name == "encodec_24khz":
pass # config is already correct
elif model_name == "encodec_32khz":
snake_case_ : str = [8, 5, 4, 4]
snake_case_ : List[Any] = [2.2]
snake_case_ : Any = 64
snake_case_ : Optional[Any] = 3_20_00
snake_case_ : Union[str, Any] = 20_48
snake_case_ : Dict = False
snake_case_ : Tuple = False
snake_case_ : Union[str, Any] = False
elif model_name == "encodec_48khz":
snake_case_ : List[Any] = [8, 5, 4, 2]
snake_case_ : Optional[int] = [3.0, 6.0, 12.0, 24.0]
snake_case_ : int = 4_80_00
snake_case_ : Optional[Any] = 2
snake_case_ : Tuple = False
snake_case_ : Any = """time_group_norm"""
snake_case_ : Any = True
snake_case_ : Optional[Any] = 1.0
snake_case_ : List[str] = 0.01
else:
raise ValueError(F'''Unknown model name: {model_name}''' )
snake_case_ : int = EncodecModel(lowerCAmelCase__ )
snake_case_ : Dict = EncodecFeatureExtractor(
feature_size=config.audio_channels , sampling_rate=config.sampling_rate , chunk_length_s=config.chunk_length_s , overlap=config.overlap , )
feature_extractor.save_pretrained(lowerCAmelCase__ )
snake_case_ : str = torch.load(lowerCAmelCase__ )
if "best_state" in original_checkpoint:
# we might have a training state saved, in which case discard the yaml results and just retain the weights
snake_case_ : List[str] = original_checkpoint["""best_state"""]
recursively_load_weights(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
model.save_pretrained(lowerCAmelCase__ )
if repo_id:
print("""Pushing to the hub...""" )
feature_extractor.push_to_hub(lowerCAmelCase__ )
model.push_to_hub(lowerCAmelCase__ )
if __name__ == "__main__":
__A : Optional[Any] = argparse.ArgumentParser()
parser.add_argument(
'--model',
default='encodec_24khz',
type=str,
help='The model to convert. Should be one of \'encodec_24khz\', \'encodec_32khz\', \'encodec_48khz\'.',
)
parser.add_argument('--checkpoint_path', required=True, default=None, type=str, help='Path to original checkpoint')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--pytorch_dump_folder_path', required=True, default=None, type=str, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--push_to_hub', default=None, type=str, help='Where to upload the converted model on the 🤗 hub.'
)
__A : Optional[Any] = parser.parse_args()
convert_checkpoint(
args.model,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
) | 334 |
"""simple docstring"""
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
lowercase_ = 1_6
lowercase_ = 3_2
def lowercase ( lowerCAmelCase__ : Accelerator , lowerCAmelCase__ : int = 16 , lowerCAmelCase__ : str = "bert-base-cased" ) -> Optional[int]:
__a = AutoTokenizer.from_pretrained(lowerCAmelCase__ )
__a = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(lowerCAmelCase__ : Optional[Any] ):
# max_length=None => use the model max length (it's actually the default)
__a = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=lowerCAmelCase__ , max_length=lowerCAmelCase__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
__a = datasets.map(
lowerCAmelCase__ , batched=lowerCAmelCase__ , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , load_from_cache_file=lowerCAmelCase__ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__a = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(lowerCAmelCase__ : int ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(lowerCAmelCase__ , padding='''max_length''' , max_length=128 , return_tensors='''pt''' )
return tokenizer.pad(lowerCAmelCase__ , padding='''longest''' , return_tensors='''pt''' )
# Instantiate dataloaders.
__a = DataLoader(
tokenized_datasets['''train'''] , shuffle=lowerCAmelCase__ , collate_fn=lowerCAmelCase__ , batch_size=lowerCAmelCase__ )
__a = DataLoader(
tokenized_datasets['''validation'''] , shuffle=lowerCAmelCase__ , collate_fn=lowerCAmelCase__ , batch_size=lowerCAmelCase__ )
return train_dataloader, eval_dataloader
def lowercase ( lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Union[str, Any] ) -> Optional[int]:
# Initialize accelerator
__a = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__a = config['''lr''']
__a = int(config['''num_epochs'''] )
__a = int(config['''seed'''] )
__a = int(config['''batch_size'''] )
__a = args.model_name_or_path
set_seed(lowerCAmelCase__ )
__a , __a = get_dataloaders(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__a = AutoModelForSequenceClassification.from_pretrained(lowerCAmelCase__ , return_dict=lowerCAmelCase__ )
# Instantiate optimizer
__a = (
AdamW
if accelerator.state.deepspeed_plugin is None
or '''optimizer''' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
__a = optimizer_cls(params=model.parameters() , lr=lowerCAmelCase__ )
if accelerator.state.deepspeed_plugin is not None:
__a = accelerator.state.deepspeed_plugin.deepspeed_config[
'''gradient_accumulation_steps'''
]
else:
__a = 1
__a = (len(lowerCAmelCase__ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
__a = get_linear_schedule_with_warmup(
optimizer=lowerCAmelCase__ , num_warmup_steps=0 , num_training_steps=lowerCAmelCase__ , )
else:
__a = DummyScheduler(lowerCAmelCase__ , total_num_steps=lowerCAmelCase__ , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__a , __a , __a , __a , __a = accelerator.prepare(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# We need to keep track of how many total steps we have iterated over
__a = 0
# We also need to keep track of the stating epoch so files are named properly
__a = 0
# Now we train the model
__a = evaluate.load('''glue''' , '''mrpc''' )
__a = 0
__a = {}
for epoch in range(lowerCAmelCase__ , lowerCAmelCase__ ):
model.train()
for step, batch in enumerate(lowerCAmelCase__ ):
__a = model(**lowerCAmelCase__ )
__a = outputs.loss
__a = loss / gradient_accumulation_steps
accelerator.backward(lowerCAmelCase__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
model.eval()
__a = 0
for step, batch in enumerate(lowerCAmelCase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__a = model(**lowerCAmelCase__ )
__a = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
__a , __a = accelerator.gather(
(predictions, batch['''labels''']) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(lowerCAmelCase__ ) - 1:
__a = predictions[: len(eval_dataloader.dataset ) - samples_seen]
__a = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=lowerCAmelCase__ , references=lowerCAmelCase__ , )
__a = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'''epoch {epoch}:''' , lowerCAmelCase__ )
__a = eval_metric['''accuracy''']
if best_performance < eval_metric["accuracy"]:
__a = eval_metric['''accuracy''']
if args.performance_lower_bound is not None:
assert (
args.performance_lower_bound <= best_performance
), f'''Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}'''
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , '''all_results.json''' ) , '''w''' ) as f:
json.dump(lowerCAmelCase__ , lowerCAmelCase__ )
def lowercase ( ) -> List[str]:
__a = argparse.ArgumentParser(description='''Simple example of training script tracking peak GPU memory usage.''' )
parser.add_argument(
'''--model_name_or_path''' , type=lowerCAmelCase__ , default='''bert-base-cased''' , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , required=lowerCAmelCase__ , )
parser.add_argument(
'''--output_dir''' , type=lowerCAmelCase__ , default='''.''' , help='''Optional save directory where all checkpoint folders will be stored. Default is the current working directory.''' , )
parser.add_argument(
'''--performance_lower_bound''' , type=lowerCAmelCase__ , default=lowerCAmelCase__ , help='''Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value.''' , )
parser.add_argument(
'''--num_epochs''' , type=lowerCAmelCase__ , default=3 , help='''Number of train epochs.''' , )
__a = parser.parse_args()
__a = {'''lr''': 2e-5, '''num_epochs''': args.num_epochs, '''seed''': 42, '''batch_size''': 16}
training_function(lowerCAmelCase__ , lowerCAmelCase__ )
if __name__ == "__main__":
main()
| 695 | 0 |
'''simple docstring'''
import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def UpperCamelCase ( _lowerCamelCase : str ):
A__ = {}
A__ = tokenizer(example["content"] , truncation=lowerCAmelCase__ )["input_ids"]
A__ = len(example["content"] ) / len(output["input_ids"] )
return output
__lowerCAmelCase : Any =HfArgumentParser(PretokenizationArguments)
__lowerCAmelCase : Optional[int] =parser.parse_args()
if args.num_workers is None:
__lowerCAmelCase : int =multiprocessing.cpu_count()
__lowerCAmelCase : Tuple =AutoTokenizer.from_pretrained(args.tokenizer_dir)
__lowerCAmelCase : List[Any] =time.time()
__lowerCAmelCase : Union[str, Any] =load_dataset(args.dataset_name, split="train")
print(f"""Dataset loaded in {time.time()-t_start:.2f}s""")
__lowerCAmelCase : Any =time.time()
__lowerCAmelCase : List[Any] =ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
"repo_name",
"path",
"copies",
"size",
"content",
"license",
"hash",
"line_mean",
"line_max",
"alpha_frac",
"autogenerated",
],
)
print(f"""Dataset tokenized in {time.time()-t_start:.2f}s""")
__lowerCAmelCase : Optional[int] =time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(f"""Data pushed to the hub in {time.time()-t_start:.2f}s""")
| 440 |
"""simple docstring"""
from typing import Any
def lowercase ( lowerCAmelCase__ : list , lowerCAmelCase__ : list , lowerCAmelCase__ : dict , lowerCAmelCase__ : dict , lowerCAmelCase__ : dict , ) -> list:
_validation(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , )
# Creates data structures and fill initial step
__a = {}
__a = {}
for state in states_space:
__a = observations_space[0]
__a = (
initial_probabilities[state] * emission_probabilities[state][observation]
)
__a = None
# Fills the data structure with the probabilities of
# different transitions and pointers to previous states
for o in range(1 , len(lowerCAmelCase__ ) ):
__a = observations_space[o]
__a = observations_space[o - 1]
for state in states_space:
# Calculates the argmax for probability function
__a = ''''''
__a = -1
for k_state in states_space:
__a = (
probabilities[(k_state, prior_observation)]
* transition_probabilities[k_state][state]
* emission_probabilities[state][observation]
)
if probability > max_probability:
__a = probability
__a = k_state
# Update probabilities and pointers dicts
__a = (
probabilities[(arg_max, prior_observation)]
* transition_probabilities[arg_max][state]
* emission_probabilities[state][observation]
)
__a = arg_max
# The final observation
__a = observations_space[len(lowerCAmelCase__ ) - 1]
# argmax for given final observation
__a = ''''''
__a = -1
for k_state in states_space:
__a = probabilities[(k_state, final_observation)]
if probability > max_probability:
__a = probability
__a = k_state
__a = arg_max
# Process pointers backwards
__a = last_state
__a = []
for o in range(len(lowerCAmelCase__ ) - 1 , -1 , -1 ):
result.append(lowerCAmelCase__ )
__a = pointers[previous, observations_space[o]]
result.reverse()
return result
def lowercase ( lowerCAmelCase__ : Any , lowerCAmelCase__ : Any , lowerCAmelCase__ : Any , lowerCAmelCase__ : Any , lowerCAmelCase__ : Any , ) -> None:
_validate_not_empty(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , )
_validate_lists(lowerCAmelCase__ , lowerCAmelCase__ )
_validate_dicts(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def lowercase ( lowerCAmelCase__ : Any , lowerCAmelCase__ : Any , lowerCAmelCase__ : Any , lowerCAmelCase__ : Any , lowerCAmelCase__ : Any , ) -> None:
if not all(
[
observations_space,
states_space,
initial_probabilities,
transition_probabilities,
emission_probabilities,
] ):
raise ValueError('''There\'s an empty parameter''' )
def lowercase ( lowerCAmelCase__ : Any , lowerCAmelCase__ : Any ) -> None:
_validate_list(lowerCAmelCase__ , '''observations_space''' )
_validate_list(lowerCAmelCase__ , '''states_space''' )
def lowercase ( lowerCAmelCase__ : Any , lowerCAmelCase__ : str ) -> None:
if not isinstance(_object , lowerCAmelCase__ ):
__a = f'''{var_name} must be a list'''
raise ValueError(lowerCAmelCase__ )
else:
for x in _object:
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
__a = f'''{var_name} must be a list of strings'''
raise ValueError(lowerCAmelCase__ )
def lowercase ( lowerCAmelCase__ : Any , lowerCAmelCase__ : Any , lowerCAmelCase__ : Any , ) -> None:
_validate_dict(lowerCAmelCase__ , '''initial_probabilities''' , lowerCAmelCase__ )
_validate_nested_dict(lowerCAmelCase__ , '''transition_probabilities''' )
_validate_nested_dict(lowerCAmelCase__ , '''emission_probabilities''' )
def lowercase ( lowerCAmelCase__ : Any , lowerCAmelCase__ : str ) -> None:
_validate_dict(_object , lowerCAmelCase__ , lowerCAmelCase__ )
for x in _object.values():
_validate_dict(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def lowercase ( lowerCAmelCase__ : Any , lowerCAmelCase__ : str , lowerCAmelCase__ : type , lowerCAmelCase__ : bool = False ) -> None:
if not isinstance(_object , lowerCAmelCase__ ):
__a = f'''{var_name} must be a dict'''
raise ValueError(lowerCAmelCase__ )
if not all(isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) for x in _object ):
__a = f'''{var_name} all keys must be strings'''
raise ValueError(lowerCAmelCase__ )
if not all(isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) for x in _object.values() ):
__a = '''nested dictionary ''' if nested else ''''''
__a = f'''{var_name} {nested_text}all values must be {value_type.__name__}'''
raise ValueError(lowerCAmelCase__ )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 695 | 0 |
'''simple docstring'''
import argparse
from collections import defaultdict
def _SCREAMING_SNAKE_CASE ( __snake_case : int , __snake_case : Optional[int] , __snake_case : Tuple , __snake_case : Optional[Any] , __snake_case : str ):
_A = F'{file}_{class_name}_{test_name}'
done_test[_id] += 1
with open(lowerCAmelCase__ , 'r' ) as f:
_A = f.readlines()
_A = F'class {class_name}('
_A = F'{4 * " "}def {test_name}('
_A = F'{8 * " "}{correct_line.split()[0]}'
_A = F'{1_6 * " "}{correct_line.split()[0]}'
_A = False
_A = False
_A = False
_A = False
_A = 0
_A = 0
_A = []
for line in lines:
if line.startswith(lowerCAmelCase__ ):
_A = True
elif in_class and line.startswith(lowerCAmelCase__ ):
_A = True
elif in_class and in_func and (line.startswith(lowerCAmelCase__ ) or line.startswith(lowerCAmelCase__ )):
_A = len(line.split(correct_line.split()[0] )[0] )
count += 1
if count == done_test[_id]:
_A = True
if in_class and in_func and in_line:
if ")" not in line:
continue
else:
_A = True
if in_class and in_func and in_line and insert_line:
new_lines.append(F'{spaces * " "}{correct_line}' )
_A = _A = _A = _A = False
else:
new_lines.append(lowerCAmelCase__ )
with open(lowerCAmelCase__ , 'w' ) as f:
for line in new_lines:
f.write(lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ( __snake_case : Dict , __snake_case : Dict=None ):
if fail is not None:
with open(lowerCAmelCase__ , 'r' ) as f:
_A = {l.strip() for l in f.readlines()}
else:
_A = None
with open(lowerCAmelCase__ , 'r' ) as f:
_A = f.readlines()
_A = defaultdict(lowerCAmelCase__ )
for line in correct_lines:
_A , _A , _A , _A = line.split(';' )
if test_failures is None or "::".join([file, class_name, test_name] ) in test_failures:
overwrite_file(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
if __name__ == "__main__":
_UpperCAmelCase : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument('''--correct_filename''', help='''filename of tests with expected result''')
parser.add_argument('''--fail_filename''', help='''filename of test failures''', type=str, default=None)
_UpperCAmelCase : str = parser.parse_args()
main(args.correct_filename, args.fail_filename)
| 107 |
"""simple docstring"""
import math
def lowercase ( lowerCAmelCase__ : int ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(lowerCAmelCase__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def lowercase ( lowerCAmelCase__ : float = 0.1 ) -> int:
__a = 3
__a = 3
while primes / (2 * j - 1) >= ratio:
for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1 ):
primes += is_prime(lowerCAmelCase__ )
j += 2
return j
if __name__ == "__main__":
import doctest
doctest.testmod()
| 695 | 0 |
from __future__ import annotations
def __a ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
if (direction == 1 and array[indexa] > array[indexa]) or (
direction == 0 and array[indexa] < array[indexa]
):
a__ , a__ = array[indexa], array[indexa]
def __a ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
if length > 1:
a__ = int(length / 2 )
for i in range(lowerCAmelCase__ , low + middle ):
comp_and_swap(lowerCAmelCase__ , lowerCAmelCase__ , i + middle , lowerCAmelCase__ )
bitonic_merge(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
bitonic_merge(lowerCAmelCase__ , low + middle , lowerCAmelCase__ , lowerCAmelCase__ )
def __a ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
if length > 1:
a__ = int(length / 2 )
bitonic_sort(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , 1 )
bitonic_sort(lowerCAmelCase__ , low + middle , lowerCAmelCase__ , 0 )
bitonic_merge(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
if __name__ == "__main__":
a_ : Optional[Any] = input('Enter numbers separated by a comma:\n').strip()
a_ : List[Any] = [int(item.strip()) for item in user_input.split(',')]
bitonic_sort(unsorted, 0, len(unsorted), 1)
print('\nSorted array in ascending order is: ', end='')
print(*unsorted, sep=', ')
bitonic_merge(unsorted, 0, len(unsorted), 0)
print('Sorted array in descending order is: ', end='')
print(*unsorted, sep=', ')
| 194 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase_ = {
"configuration_mctct": ["MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP", "MCTCTConfig"],
"feature_extraction_mctct": ["MCTCTFeatureExtractor"],
"processing_mctct": ["MCTCTProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST",
"MCTCTForCTC",
"MCTCTModel",
"MCTCTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 695 | 0 |
'''simple docstring'''
import pytest
lowerCAmelCase_ : Optional[Any] = """__dummy_dataset1__"""
lowerCAmelCase_ : List[str] = """\nimport json\nimport os\n\nimport datasets\n\n\nREPO_URL = \"https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/\"\nURLS = {\"train\": REPO_URL + \"wikiann-bn-train.jsonl\", \"validation\": REPO_URL + \"wikiann-bn-validation.jsonl\"}\n\n\nclass __DummyDataset1__(datasets.GeneratorBasedBuilder):\n\n def _info(self):\n features = datasets.Features(\n {\n \"tokens\": datasets.Sequence(datasets.Value(\"string\")),\n \"ner_tags\": datasets.Sequence(\n datasets.features.ClassLabel(\n names=[\n \"O\",\n \"B-PER\",\n \"I-PER\",\n \"B-ORG\",\n \"I-ORG\",\n \"B-LOC\",\n \"I-LOC\",\n ]\n )\n ),\n \"langs\": datasets.Sequence(datasets.Value(\"string\")),\n \"spans\": datasets.Sequence(datasets.Value(\"string\")),\n }\n )\n return datasets.DatasetInfo(features=features)\n\n def _split_generators(self, dl_manager):\n dl_path = dl_manager.download(URLS)\n return [\n datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={\"filepath\": dl_path[\"train\"]}),\n datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={\"filepath\": dl_path[\"validation\"]}),\n ]\n\n def _generate_examples(self, filepath):\n with open(filepath, \"r\", encoding=\"utf-8\") as f:\n for i, line in enumerate(f):\n yield i, json.loads(line)\n"""
@pytest.fixture
def __A ( ) -> Any:
'''simple docstring'''
return DATASET_LOADING_SCRIPT_NAME
@pytest.fixture
def __A ( ) -> int:
'''simple docstring'''
return DATASET_LOADING_SCRIPT_CODE
@pytest.fixture
def __A ( UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase ) -> Any:
'''simple docstring'''
_UpperCamelCase : List[Any] = dataset_loading_script_name
_UpperCamelCase : str = tmp_path / "datasets" / script_name
script_dir.mkdir(parents=lowerCAmelCase__ )
_UpperCamelCase : Dict = script_dir / f'''{script_name}.py'''
with open(lowerCAmelCase__ ,"w" ) as f:
f.write(lowerCAmelCase__ )
return str(lowerCAmelCase__ )
| 435 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json"
),
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json"
),
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json"
),
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = 'dpr'
def __init__( self , _a=30_522 , _a=768 , _a=12 , _a=12 , _a=3_072 , _a="gelu" , _a=0.1 , _a=0.1 , _a=512 , _a=2 , _a=0.02 , _a=1E-12 , _a=0 , _a="absolute" , _a = 0 , **_a , ):
super().__init__(pad_token_id=_a , **_a )
__a = vocab_size
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = hidden_act
__a = intermediate_size
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = max_position_embeddings
__a = type_vocab_size
__a = initializer_range
__a = layer_norm_eps
__a = projection_dim
__a = position_embedding_type
| 695 | 0 |
'''simple docstring'''
import argparse
import os
import re
snake_case_ : int = '''src/diffusers'''
# Pattern that looks at the indentation in a line.
snake_case_ : Union[str, Any] = re.compile(R'''^(\s*)\S''')
# Pattern that matches `"key":" and puts `key` in group 0.
snake_case_ : Any = re.compile(R'''^\s*\"([^\"]+)\":''')
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
snake_case_ : Union[str, Any] = re.compile(R'''^\s*_import_structure\[\"([^\"]+)\"\]''')
# Pattern that matches `"key",` and puts `key` in group 0.
snake_case_ : Union[str, Any] = re.compile(R'''^\s*\"([^\"]+)\",\s*$''')
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
snake_case_ : Tuple = re.compile(R'''\[([^\]]+)\]''')
def lowercase__( _UpperCamelCase : Any )-> str:
"""simple docstring"""
_UpperCamelCase = _re_indent.search(lowerCAmelCase__ )
return "" if search is None else search.groups()[0]
def lowercase__( _UpperCamelCase : int , _UpperCamelCase : Union[str, Any]="" , _UpperCamelCase : List[str]=None , _UpperCamelCase : int=None )-> List[Any]:
"""simple docstring"""
_UpperCamelCase = 0
_UpperCamelCase = code.split("\n" )
if start_prompt is not None:
while not lines[index].startswith(lowerCAmelCase__ ):
index += 1
_UpperCamelCase = ["\n".join(lines[:index] )]
else:
_UpperCamelCase = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
_UpperCamelCase = [lines[index]]
index += 1
while index < len(lowerCAmelCase__ ) and (end_prompt is None or not lines[index].startswith(lowerCAmelCase__ )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(lowerCAmelCase__ ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + " " ):
current_block.append(lines[index] )
blocks.append("\n".join(lowerCAmelCase__ ) )
if index < len(lowerCAmelCase__ ) - 1:
_UpperCamelCase = [lines[index + 1]]
index += 1
else:
_UpperCamelCase = []
else:
blocks.append("\n".join(lowerCAmelCase__ ) )
_UpperCamelCase = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(lowerCAmelCase__ ) > 0:
blocks.append("\n".join(lowerCAmelCase__ ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(lowerCAmelCase__ ):
blocks.append("\n".join(lines[index:] ) )
return blocks
def lowercase__( _UpperCamelCase : int )-> Tuple:
"""simple docstring"""
def _inner(_UpperCamelCase : int ):
return key(lowerCAmelCase__ ).lower().replace("_" , "" )
return _inner
def lowercase__( _UpperCamelCase : int , _UpperCamelCase : Union[str, Any]=None )-> Tuple:
"""simple docstring"""
def noop(_UpperCamelCase : Optional[int] ):
return x
if key is None:
_UpperCamelCase = noop
# Constants are all uppercase, they go first.
_UpperCamelCase = [obj for obj in objects if key(lowerCAmelCase__ ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
_UpperCamelCase = [obj for obj in objects if key(lowerCAmelCase__ )[0].isupper() and not key(lowerCAmelCase__ ).isupper()]
# Functions begin with a lowercase, they go last.
_UpperCamelCase = [obj for obj in objects if not key(lowerCAmelCase__ )[0].isupper()]
_UpperCamelCase = ignore_underscore(lowerCAmelCase__ )
return sorted(lowerCAmelCase__ , key=lowerCAmelCase__ ) + sorted(lowerCAmelCase__ , key=lowerCAmelCase__ ) + sorted(lowerCAmelCase__ , key=lowerCAmelCase__ )
def lowercase__( _UpperCamelCase : Union[str, Any] )-> Any:
"""simple docstring"""
def _replace(_UpperCamelCase : str ):
_UpperCamelCase = match.groups()[0]
if "," not in imports:
return f"[{imports}]"
_UpperCamelCase = [part.strip().replace("\"" , "" ) for part in imports.split("," )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
_UpperCamelCase = keys[:-1]
return "[" + ", ".join([f"\"{k}\"" for k in sort_objects(lowerCAmelCase__ )] ) + "]"
_UpperCamelCase = import_statement.split("\n" )
if len(lowerCAmelCase__ ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
_UpperCamelCase = 2 if lines[1].strip() == "[" else 1
_UpperCamelCase = [(i, _re_strip_line.search(lowerCAmelCase__ ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
_UpperCamelCase = sort_objects(lowerCAmelCase__ , key=lambda _UpperCamelCase : x[1] )
_UpperCamelCase = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(lowerCAmelCase__ ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
_UpperCamelCase = _re_bracket_content.sub(_replace , lines[1] )
else:
_UpperCamelCase = [part.strip().replace("\"" , "" ) for part in lines[1].split("," )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
_UpperCamelCase = keys[:-1]
_UpperCamelCase = get_indent(lines[1] ) + ", ".join([f"\"{k}\"" for k in sort_objects(lowerCAmelCase__ )] )
return "\n".join(lowerCAmelCase__ )
else:
# Finally we have to deal with imports fitting on one line
_UpperCamelCase = _re_bracket_content.sub(_replace , lowerCAmelCase__ )
return import_statement
def lowercase__( _UpperCamelCase : str , _UpperCamelCase : Optional[int]=True )-> Optional[int]:
"""simple docstring"""
with open(lowerCAmelCase__ , "r" ) as f:
_UpperCamelCase = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
_UpperCamelCase = split_code_in_indented_blocks(
lowerCAmelCase__ , start_prompt="_import_structure = {" , end_prompt="if TYPE_CHECKING:" )
# We ignore block 0 (everything until start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(lowerCAmelCase__ ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
_UpperCamelCase = main_blocks[block_idx]
_UpperCamelCase = block.split("\n" )
# Get to the start of the imports.
_UpperCamelCase = 0
while line_idx < len(lowerCAmelCase__ ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
_UpperCamelCase = len(lowerCAmelCase__ )
else:
line_idx += 1
if line_idx >= len(lowerCAmelCase__ ):
continue
# Ignore beginning and last line: they don't contain anything.
_UpperCamelCase = "\n".join(block_lines[line_idx:-1] )
_UpperCamelCase = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
_UpperCamelCase = split_code_in_indented_blocks(lowerCAmelCase__ , indent_level=lowerCAmelCase__ )
# We have two categories of import key: list or _import_structure[key].append/extend
_UpperCamelCase = _re_direct_key if "_import_structure" in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
_UpperCamelCase = [(pattern.search(lowerCAmelCase__ ).groups()[0] if pattern.search(lowerCAmelCase__ ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
_UpperCamelCase = [(i, key) for i, key in enumerate(lowerCAmelCase__ ) if key is not None]
_UpperCamelCase = [x[0] for x in sorted(lowerCAmelCase__ , key=lambda _UpperCamelCase : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
_UpperCamelCase = 0
_UpperCamelCase = []
for i in range(len(lowerCAmelCase__ ) ):
if keys[i] is None:
reordered_blocks.append(internal_blocks[i] )
else:
_UpperCamelCase = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reordered_blocks.append(lowerCAmelCase__ )
count += 1
# And we put our main block back together with its first and last line.
_UpperCamelCase = "\n".join(block_lines[:line_idx] + reordered_blocks + [block_lines[-1]] )
if code != "\n".join(lowerCAmelCase__ ):
if check_only:
return True
else:
print(f"Overwriting {file}." )
with open(lowerCAmelCase__ , "w" ) as f:
f.write("\n".join(lowerCAmelCase__ ) )
def lowercase__( _UpperCamelCase : Tuple=True )-> int:
"""simple docstring"""
_UpperCamelCase = []
for root, _, files in os.walk(lowerCAmelCase__ ):
if "__init__.py" in files:
_UpperCamelCase = sort_imports(os.path.join(lowerCAmelCase__ , "__init__.py" ) , check_only=lowerCAmelCase__ )
if result:
_UpperCamelCase = [os.path.join(lowerCAmelCase__ , "__init__.py" )]
if len(lowerCAmelCase__ ) > 0:
raise ValueError(f"Would overwrite {len(lowerCAmelCase__ )} files, run `make style`." )
if __name__ == "__main__":
snake_case_ : Dict = argparse.ArgumentParser()
parser.add_argument('''--check_only''', action='''store_true''', help='''Whether to only check or fix style.''')
snake_case_ : int = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 138 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = StableDiffusionInpaintPipeline
__UpperCAmelCase : int = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
__UpperCAmelCase : str = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
__UpperCAmelCase : int = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
__UpperCAmelCase : Tuple = frozenset([] )
def __UpperCAmelCase ( self ):
torch.manual_seed(0 )
__a = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=9 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=_a , )
__a = PNDMScheduler(skip_prk_steps=_a )
torch.manual_seed(0 )
__a = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
__a = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act='''gelu''' , projection_dim=512 , )
__a = CLIPTextModel(_a )
__a = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
__a = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def __UpperCAmelCase ( self , _a , _a=0 ):
# TODO: use tensor inputs instead of PIL, this is here just to leave the old expected_slices untouched
__a = floats_tensor((1, 3, 32, 32) , rng=random.Random(_a ) ).to(_a )
__a = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__a = Image.fromarray(np.uinta(_a ) ).convert('''RGB''' ).resize((64, 64) )
__a = Image.fromarray(np.uinta(image + 4 ) ).convert('''RGB''' ).resize((64, 64) )
if str(_a ).startswith('''mps''' ):
__a = torch.manual_seed(_a )
else:
__a = torch.Generator(device=_a ).manual_seed(_a )
__a = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': init_image,
'''mask_image''': mask_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def __UpperCAmelCase ( self ):
__a = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__a = self.get_dummy_components()
__a = StableDiffusionInpaintPipeline(**_a )
__a = sd_pipe.to(_a )
sd_pipe.set_progress_bar_config(disable=_a )
__a = self.get_dummy_inputs(_a )
__a = sd_pipe(**_a ).images
__a = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__a = np.array([0.4727, 0.5735, 0.3941, 0.5446, 0.5926, 0.4394, 0.5062, 0.4654, 0.4476] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __UpperCAmelCase ( self ):
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCAmelCase ( self ):
__a = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''' )
__a = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' )
__a = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'''
'''/yellow_cat_sitting_on_a_park_bench.npy''' )
__a = '''stabilityai/stable-diffusion-2-inpainting'''
__a = StableDiffusionInpaintPipeline.from_pretrained(_a , safety_checker=_a )
pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
pipe.enable_attention_slicing()
__a = '''Face of a yellow cat, high resolution, sitting on a park bench'''
__a = torch.manual_seed(0 )
__a = pipe(
prompt=_a , image=_a , mask_image=_a , generator=_a , output_type='''np''' , )
__a = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 9E-3
def __UpperCAmelCase ( self ):
__a = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''' )
__a = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' )
__a = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'''
'''/yellow_cat_sitting_on_a_park_bench_fp16.npy''' )
__a = '''stabilityai/stable-diffusion-2-inpainting'''
__a = StableDiffusionInpaintPipeline.from_pretrained(
_a , torch_dtype=torch.floataa , safety_checker=_a , )
pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
pipe.enable_attention_slicing()
__a = '''Face of a yellow cat, high resolution, sitting on a park bench'''
__a = torch.manual_seed(0 )
__a = pipe(
prompt=_a , image=_a , mask_image=_a , generator=_a , output_type='''np''' , )
__a = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 5E-1
def __UpperCAmelCase ( self ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__a = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''' )
__a = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' )
__a = '''stabilityai/stable-diffusion-2-inpainting'''
__a = PNDMScheduler.from_pretrained(_a , subfolder='''scheduler''' )
__a = StableDiffusionInpaintPipeline.from_pretrained(
_a , safety_checker=_a , scheduler=_a , torch_dtype=torch.floataa , )
pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
__a = '''Face of a yellow cat, high resolution, sitting on a park bench'''
__a = torch.manual_seed(0 )
__a = pipe(
prompt=_a , image=_a , mask_image=_a , generator=_a , num_inference_steps=2 , output_type='''np''' , )
__a = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.65 * 10**9
| 695 | 0 |
"""simple docstring"""
import operator as op
UpperCAmelCase_ : List[Any] = '''scaler.pt'''
UpperCAmelCase_ : str = '''pytorch_model'''
UpperCAmelCase_ : List[Any] = '''random_states'''
UpperCAmelCase_ : Optional[int] = '''optimizer'''
UpperCAmelCase_ : Union[str, Any] = '''scheduler'''
UpperCAmelCase_ : List[Any] = '''pytorch_model.bin'''
UpperCAmelCase_ : List[str] = '''pytorch_model.bin.index.json'''
UpperCAmelCase_ : Tuple = '''model.safetensors'''
UpperCAmelCase_ : str = '''model.safetensors.index.json'''
UpperCAmelCase_ : str = '''1.10.2'''
UpperCAmelCase_ : Optional[Any] = '''py38'''
UpperCAmelCase_ : List[str] = '''4.17.0'''
UpperCAmelCase_ : int = ['''ml.p3.16xlarge''', '''ml.p3dn.24xlarge''', '''ml.p4dn.24xlarge''']
UpperCAmelCase_ : Optional[int] = ['''FULL_SHARD''', '''SHARD_GRAD_OP''', '''NO_SHARD''', '''HYBRID_SHARD''', '''HYBRID_SHARD_ZERO2''']
UpperCAmelCase_ : Optional[Any] = ['''TRANSFORMER_BASED_WRAP''', '''SIZE_BASED_WRAP''', '''NO_WRAP''']
UpperCAmelCase_ : Any = ['''BACKWARD_PRE''', '''BACKWARD_POST''', '''NO_PREFETCH''']
UpperCAmelCase_ : Optional[int] = ['''FULL_STATE_DICT''', '''LOCAL_STATE_DICT''', '''SHARDED_STATE_DICT''']
UpperCAmelCase_ : int = '''2.0.1'''
UpperCAmelCase_ : Dict = ['''pdsh''', '''standard''', '''openmpi''', '''mvapich''']
UpperCAmelCase_ : int = ['''default''', '''reduce-overhead''', '''max-autotune''']
UpperCAmelCase_ : Any = {'''>''': op.gt, '''>=''': op.ge, '''==''': op.eq, '''!=''': op.ne, '''<=''': op.le, '''<''': op.lt}
# These are the args for `torch.distributed.launch` for pytorch < 1.9
UpperCAmelCase_ : List[Any] = [
'''nnodes''',
'''nproc_per_node''',
'''rdzv_backend''',
'''rdzv_endpoint''',
'''rdzv_id''',
'''rdzv_conf''',
'''standalone''',
'''max_restarts''',
'''monitor_interval''',
'''start_method''',
'''role''',
'''module''',
'''m''',
'''no_python''',
'''run_path''',
'''log_dir''',
'''r''',
'''redirects''',
'''t''',
'''tee''',
'''node_rank''',
'''master_addr''',
'''master_port''',
]
UpperCAmelCase_ : Tuple = ['''DEEPSPEED''', '''MULTI_GPU''', '''FSDP''', '''MEGATRON_LM''']
UpperCAmelCase_ : List[Any] = ['''DEEPSPEED''', '''MULTI_XPU''', '''FSDP''']
| 255 |
"""simple docstring"""
import inspect
import os
import unittest
from dataclasses import dataclass
import torch
from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs
from accelerate.state import AcceleratorState
from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu
from accelerate.utils import KwargsHandler
@dataclass
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : int = 0
__UpperCAmelCase : bool = False
__UpperCAmelCase : float = 3.0
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self ):
# If no defaults are changed, `to_kwargs` returns an empty dict.
self.assertDictEqual(MockClass().to_kwargs() , {} )
self.assertDictEqual(MockClass(a=2 ).to_kwargs() , {'''a''': 2} )
self.assertDictEqual(MockClass(a=2 , b=_a ).to_kwargs() , {'''a''': 2, '''b''': True} )
self.assertDictEqual(MockClass(a=2 , c=2.25 ).to_kwargs() , {'''a''': 2, '''c''': 2.25} )
@require_cuda
def __UpperCAmelCase ( self ):
# If no defaults are changed, `to_kwargs` returns an empty dict.
__a = GradScalerKwargs(init_scale=1_024 , growth_factor=2 )
AcceleratorState._reset_state()
__a = Accelerator(mixed_precision='''fp16''' , kwargs_handlers=[scaler_handler] )
print(accelerator.use_fpaa )
__a = accelerator.scaler
# Check the kwargs have been applied
self.assertEqual(scaler._init_scale , 1024.0 )
self.assertEqual(scaler._growth_factor , 2.0 )
# Check the other values are at the default
self.assertEqual(scaler._backoff_factor , 0.5 )
self.assertEqual(scaler._growth_interval , 2_000 )
self.assertEqual(scaler._enabled , _a )
@require_multi_gpu
def __UpperCAmelCase ( self ):
__a = ['''torchrun''', f'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__ )]
execute_subprocess_async(_a , env=os.environ.copy() )
if __name__ == "__main__":
lowercase_ = DistributedDataParallelKwargs(bucket_cap_mb=1_5, find_unused_parameters=True)
lowercase_ = Accelerator(kwargs_handlers=[ddp_scaler])
lowercase_ = torch.nn.Linear(1_0_0, 2_0_0)
lowercase_ = accelerator.prepare(model)
# Check the values changed in kwargs
lowercase_ = ""
lowercase_ = model.bucket_bytes_cap // (1_0_2_4 * 1_0_2_4)
if observed_bucket_cap_map != 1_5:
error_msg += F"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n"
if model.find_unused_parameters is not True:
error_msg += F"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n"
# Check the values of the defaults
if model.dim != 0:
error_msg += F"Default value not respected, should have `0` but found {model.dim}.\n"
if model.broadcast_buffers is not True:
error_msg += F"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n"
if model.gradient_as_bucket_view is not False:
error_msg += F"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n"
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 695 | 0 |
'''simple docstring'''
import time
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers.generation import (
MaxLengthCriteria,
MaxNewTokensCriteria,
MaxTimeCriteria,
StoppingCriteriaList,
validate_stopping_criteria,
)
@require_torch
class _a (unittest.TestCase ):
'''simple docstring'''
def snake_case_ ( self ,__a ) -> int:
snake_case : List[Any] = 3
snake_case : Optional[int] = 250
snake_case : Union[str, Any] = ids_tensor((batch_size, length) ,_a )
snake_case : Optional[int] = torch.ones((batch_size, length) ,device=_a ,dtype=torch.float ) / length
return input_ids, scores
def snake_case_ ( self ) -> str:
snake_case , snake_case : Tuple = self._get_tensors(5 )
snake_case : Tuple = StoppingCriteriaList(
[
MaxLengthCriteria(max_length=10 ),
MaxTimeCriteria(max_time=0.1 ),
] )
self.assertFalse(criteria(_a ,_a ) )
snake_case , snake_case : str = self._get_tensors(9 )
self.assertFalse(criteria(_a ,_a ) )
snake_case , snake_case : Optional[int] = self._get_tensors(10 )
self.assertTrue(criteria(_a ,_a ) )
def snake_case_ ( self ) -> Any:
snake_case : Dict = MaxLengthCriteria(max_length=10 )
snake_case , snake_case : str = self._get_tensors(5 )
self.assertFalse(criteria(_a ,_a ) )
snake_case , snake_case : List[str] = self._get_tensors(9 )
self.assertFalse(criteria(_a ,_a ) )
snake_case , snake_case : List[Any] = self._get_tensors(10 )
self.assertTrue(criteria(_a ,_a ) )
def snake_case_ ( self ) -> Dict:
snake_case : List[str] = MaxNewTokensCriteria(start_length=5 ,max_new_tokens=5 )
snake_case , snake_case : Tuple = self._get_tensors(5 )
self.assertFalse(criteria(_a ,_a ) )
snake_case , snake_case : str = self._get_tensors(9 )
self.assertFalse(criteria(_a ,_a ) )
snake_case , snake_case : Tuple = self._get_tensors(10 )
self.assertTrue(criteria(_a ,_a ) )
snake_case : List[str] = StoppingCriteriaList([criteria] )
self.assertEqual(criteria_list.max_length ,10 )
def snake_case_ ( self ) -> str:
snake_case , snake_case : Optional[int] = self._get_tensors(5 )
snake_case : List[str] = MaxTimeCriteria(max_time=0.1 )
self.assertFalse(criteria(_a ,_a ) )
snake_case : Tuple = MaxTimeCriteria(max_time=0.1 ,initial_timestamp=time.time() - 0.2 )
self.assertTrue(criteria(_a ,_a ) )
def snake_case_ ( self ) -> Any:
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) ,10 )
with self.assertWarns(_a ):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) ,11 )
snake_case : List[Any] = validate_stopping_criteria(StoppingCriteriaList() ,11 )
self.assertEqual(len(_a ) ,1 )
| 116 |
"""simple docstring"""
import inspect
import os
import sys
import unittest
import accelerate
from accelerate.test_utils import execute_subprocess_async, require_tpu
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self ):
__a = inspect.getfile(accelerate.test_utils )
__a = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_script.py'''] )
__a = os.path.sep.join(inspect.getfile(self.__class__ ).split(os.path.sep )[:-1] )
@require_tpu
def __UpperCAmelCase ( self ):
__a = f'''
{self.test_dir}/xla_spawn.py
--num_cores 8
{self.test_file_path}
'''.split()
__a = [sys.executable] + distributed_args
execute_subprocess_async(_a , env=os.environ.copy() )
| 695 | 0 |
"""simple docstring"""
import copy
from typing import Dict, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
from ..detr import DetrConfig
from ..swin import SwinConfig
snake_case_ : Union[str, Any] = {
"""facebook/maskformer-swin-base-ade""": (
"""https://huggingface.co/facebook/maskformer-swin-base-ade/blob/main/config.json"""
)
# See all MaskFormer models at https://huggingface.co/models?filter=maskformer
}
snake_case_ : List[Any] = logging.get_logger(__name__)
class snake_case__ ( __SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE__ = 'maskformer'
SCREAMING_SNAKE_CASE__ = {'hidden_size': 'mask_feature_size'}
SCREAMING_SNAKE_CASE__ = ['resnet', 'swin']
SCREAMING_SNAKE_CASE__ = ['detr']
def __init__( self : Optional[int] , lowercase : int = 2_56 , lowercase : Union[str, Any] = 2_56 , lowercase : Dict = 0.1 , lowercase : Union[str, Any] = False , lowercase : str = None , lowercase : Tuple = None , lowercase : Any = 0.0_2 , lowercase : int = 1.0 , lowercase : Any = 1.0 , lowercase : Optional[Any] = 1.0 , lowercase : List[Any] = 2_0.0 , lowercase : int = None , **lowercase : Optional[int] , ):
'''simple docstring'''
if backbone_config is None:
# fall back to https://huggingface.co/microsoft/swin-base-patch4-window12-384-in22k
UpperCAmelCase : Tuple = SwinConfig(
image_size=3_84 , in_channels=3 , patch_size=4 , embed_dim=1_28 , depths=[2, 2, 18, 2] , num_heads=[4, 8, 16, 32] , window_size=12 , drop_path_rate=0.3 , out_features=["stage1", "stage2", "stage3", "stage4"] , )
if isinstance(_a , _a ):
UpperCAmelCase : Tuple = backbone_config.pop("model_type" )
UpperCAmelCase : Tuple = CONFIG_MAPPING[backbone_model_type]
UpperCAmelCase : int = config_class.from_dict(_a )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
f"""Backbone {backbone_config.model_type} is not a supported model and may not be compatible with MaskFormer. """
f"""Supported model types: {','.join(self.backbones_supported )}""" )
if decoder_config is None:
# fall back to https://huggingface.co/facebook/detr-resnet-50
UpperCAmelCase : Dict = DetrConfig()
else:
# verify that the decoder is supported
UpperCAmelCase : str = (
decoder_config.pop("model_type" ) if isinstance(_a , _a ) else decoder_config.model_type
)
if decoder_type not in self.decoders_supported:
raise ValueError(
f"""Transformer Decoder {decoder_type} not supported, please use one of"""
f""" {','.join(self.decoders_supported )}""" )
if isinstance(_a , _a ):
UpperCAmelCase : Dict = CONFIG_MAPPING[decoder_type]
UpperCAmelCase : List[Any] = config_class.from_dict(_a )
UpperCAmelCase : Dict = backbone_config
UpperCAmelCase : List[Any] = decoder_config
# main feature dimension for the model
UpperCAmelCase : Any = fpn_feature_size
UpperCAmelCase : Optional[int] = mask_feature_size
# initializer
UpperCAmelCase : Optional[int] = init_std
UpperCAmelCase : str = init_xavier_std
# Hungarian matcher && loss
UpperCAmelCase : Any = cross_entropy_weight
UpperCAmelCase : Dict = dice_weight
UpperCAmelCase : Optional[Any] = mask_weight
UpperCAmelCase : Union[str, Any] = use_auxiliary_loss
UpperCAmelCase : Tuple = no_object_weight
UpperCAmelCase : List[str] = output_auxiliary_logits
UpperCAmelCase : Tuple = self.decoder_config.encoder_attention_heads
UpperCAmelCase : str = self.decoder_config.num_hidden_layers
super().__init__(**_a )
@classmethod
def __lowerCAmelCase ( cls : Optional[int] , lowercase : Optional[int] , lowercase : Optional[int] , **lowercase : List[Any] ):
'''simple docstring'''
return cls(
backbone_config=_a , decoder_config=_a , **_a , )
def __lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase : Any = copy.deepcopy(self.__dict__ )
UpperCAmelCase : Tuple = self.backbone_config.to_dict()
UpperCAmelCase : int = self.decoder_config.to_dict()
UpperCAmelCase : List[Any] = self.__class__.model_type
return output
| 595 |
"""simple docstring"""
import os
import unittest
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
BertTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : str = BertTokenizer
__UpperCAmelCase : Optional[Any] = BertTokenizerFast
__UpperCAmelCase : str = True
__UpperCAmelCase : Tuple = True
__UpperCAmelCase : Any = filter_non_english
def __UpperCAmelCase ( self ):
super().setUp()
__a = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
__a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def __UpperCAmelCase ( self , _a ):
__a = '''UNwant\u00E9d,running'''
__a = '''unwanted, running'''
return input_text, output_text
def __UpperCAmelCase ( self ):
__a = self.tokenizer_class(self.vocab_file )
__a = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(_a , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_a ) , [9, 6, 7, 12, 10, 11] )
def __UpperCAmelCase ( self ):
if not self.test_rust_tokenizer:
return
__a = self.get_tokenizer()
__a = self.get_rust_tokenizer()
__a = '''UNwant\u00E9d,running'''
__a = tokenizer.tokenize(_a )
__a = rust_tokenizer.tokenize(_a )
self.assertListEqual(_a , _a )
__a = tokenizer.encode(_a , add_special_tokens=_a )
__a = rust_tokenizer.encode(_a , add_special_tokens=_a )
self.assertListEqual(_a , _a )
__a = self.get_rust_tokenizer()
__a = tokenizer.encode(_a )
__a = rust_tokenizer.encode(_a )
self.assertListEqual(_a , _a )
# With lower casing
__a = self.get_tokenizer(do_lower_case=_a )
__a = self.get_rust_tokenizer(do_lower_case=_a )
__a = '''UNwant\u00E9d,running'''
__a = tokenizer.tokenize(_a )
__a = rust_tokenizer.tokenize(_a )
self.assertListEqual(_a , _a )
__a = tokenizer.encode(_a , add_special_tokens=_a )
__a = rust_tokenizer.encode(_a , add_special_tokens=_a )
self.assertListEqual(_a , _a )
__a = self.get_rust_tokenizer()
__a = tokenizer.encode(_a )
__a = rust_tokenizer.encode(_a )
self.assertListEqual(_a , _a )
def __UpperCAmelCase ( self ):
__a = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize('''ah\u535A\u63A8zz''' ) , ['''ah''', '''\u535A''', '''\u63A8''', '''zz'''] )
def __UpperCAmelCase ( self ):
__a = BasicTokenizer(do_lower_case=_a )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def __UpperCAmelCase ( self ):
__a = BasicTokenizer(do_lower_case=_a , strip_accents=_a )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hällo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''h\u00E9llo'''] )
def __UpperCAmelCase ( self ):
__a = BasicTokenizer(do_lower_case=_a , strip_accents=_a )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def __UpperCAmelCase ( self ):
__a = BasicTokenizer(do_lower_case=_a )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def __UpperCAmelCase ( self ):
__a = BasicTokenizer(do_lower_case=_a )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def __UpperCAmelCase ( self ):
__a = BasicTokenizer(do_lower_case=_a , strip_accents=_a )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HäLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def __UpperCAmelCase ( self ):
__a = BasicTokenizer(do_lower_case=_a , strip_accents=_a )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HaLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def __UpperCAmelCase ( self ):
__a = BasicTokenizer(do_lower_case=_a , never_split=['''[UNK]'''] )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? [UNK]''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?''', '''[UNK]'''] )
def __UpperCAmelCase ( self ):
__a = BasicTokenizer()
__a = '''a\n\'ll !!to?\'d of, can\'t.'''
__a = ['''a''', '''\'''', '''ll''', '''!''', '''!''', '''to''', '''?''', '''\'''', '''d''', '''of''', ''',''', '''can''', '''\'''', '''t''', '''.''']
self.assertListEqual(tokenizer.tokenize(_a ) , _a )
def __UpperCAmelCase ( self ):
__a = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''']
__a = {}
for i, token in enumerate(_a ):
__a = i
__a = WordpieceTokenizer(vocab=_a , unk_token='''[UNK]''' )
self.assertListEqual(tokenizer.tokenize('''''' ) , [] )
self.assertListEqual(tokenizer.tokenize('''unwanted running''' ) , ['''un''', '''##want''', '''##ed''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.tokenize('''unwantedX running''' ) , ['''[UNK]''', '''runn''', '''##ing'''] )
def __UpperCAmelCase ( self ):
self.assertTrue(_is_whitespace(''' ''' ) )
self.assertTrue(_is_whitespace('''\t''' ) )
self.assertTrue(_is_whitespace('''\r''' ) )
self.assertTrue(_is_whitespace('''\n''' ) )
self.assertTrue(_is_whitespace('''\u00A0''' ) )
self.assertFalse(_is_whitespace('''A''' ) )
self.assertFalse(_is_whitespace('''-''' ) )
def __UpperCAmelCase ( self ):
self.assertTrue(_is_control('''\u0005''' ) )
self.assertFalse(_is_control('''A''' ) )
self.assertFalse(_is_control(''' ''' ) )
self.assertFalse(_is_control('''\t''' ) )
self.assertFalse(_is_control('''\r''' ) )
def __UpperCAmelCase ( self ):
self.assertTrue(_is_punctuation('''-''' ) )
self.assertTrue(_is_punctuation('''$''' ) )
self.assertTrue(_is_punctuation('''`''' ) )
self.assertTrue(_is_punctuation('''.''' ) )
self.assertFalse(_is_punctuation('''A''' ) )
self.assertFalse(_is_punctuation(''' ''' ) )
def __UpperCAmelCase ( self ):
__a = self.get_tokenizer()
__a = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(_a ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] )
self.assertListEqual(
[rust_tokenizer.tokenize(_a ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] )
@slow
def __UpperCAmelCase ( self ):
__a = self.tokenizer_class.from_pretrained('''bert-base-uncased''' )
__a = tokenizer.encode('''sequence builders''' , add_special_tokens=_a )
__a = tokenizer.encode('''multi-sequence build''' , add_special_tokens=_a )
__a = tokenizer.build_inputs_with_special_tokens(_a )
__a = tokenizer.build_inputs_with_special_tokens(_a , _a )
assert encoded_sentence == [101] + text + [102]
assert encoded_pair == [101] + text + [102] + text_a + [102]
def __UpperCAmelCase ( self ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
__a = self.rust_tokenizer_class.from_pretrained(_a , **_a )
__a = f'''A, naïve {tokenizer_r.mask_token} AllenNLP sentence.'''
__a = tokenizer_r.encode_plus(
_a , return_attention_mask=_a , return_token_type_ids=_a , return_offsets_mapping=_a , add_special_tokens=_a , )
__a = tokenizer_r.do_lower_case if hasattr(_a , '''do_lower_case''' ) else False
__a = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), '''A'''),
((1, 2), ''','''),
((3, 5), '''na'''),
((5, 6), '''##ï'''),
((6, 8), '''##ve'''),
((9, 15), tokenizer_r.mask_token),
((16, 21), '''Allen'''),
((21, 23), '''##NL'''),
((23, 24), '''##P'''),
((25, 33), '''sentence'''),
((33, 34), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), '''a'''),
((1, 2), ''','''),
((3, 8), '''naive'''),
((9, 15), tokenizer_r.mask_token),
((16, 21), '''allen'''),
((21, 23), '''##nl'''),
((23, 24), '''##p'''),
((25, 33), '''sentence'''),
((33, 34), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['''input_ids'''] ) )
self.assertEqual([e[0] for e in expected_results] , tokens['''offset_mapping'''] )
def __UpperCAmelCase ( self ):
__a = ['''的''', '''人''', '''有''']
__a = ''''''.join(_a )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
__a = True
__a = self.tokenizer_class.from_pretrained(_a , **_a )
__a = self.rust_tokenizer_class.from_pretrained(_a , **_a )
__a = tokenizer_p.encode(_a , add_special_tokens=_a )
__a = tokenizer_r.encode(_a , add_special_tokens=_a )
__a = tokenizer_r.convert_ids_to_tokens(_a )
__a = tokenizer_p.convert_ids_to_tokens(_a )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(_a , _a )
self.assertListEqual(_a , _a )
__a = False
__a = self.rust_tokenizer_class.from_pretrained(_a , **_a )
__a = self.tokenizer_class.from_pretrained(_a , **_a )
__a = tokenizer_r.encode(_a , add_special_tokens=_a )
__a = tokenizer_p.encode(_a , add_special_tokens=_a )
__a = tokenizer_r.convert_ids_to_tokens(_a )
__a = tokenizer_p.convert_ids_to_tokens(_a )
# it is expected that only the first Chinese character is not preceded by "##".
__a = [
f'''##{token}''' if idx != 0 else token for idx, token in enumerate(_a )
]
self.assertListEqual(_a , _a )
self.assertListEqual(_a , _a )
| 695 | 0 |
import unittest
import torch
from torch import nn
from accelerate.test_utils import require_cuda
from accelerate.utils.memory import find_executable_batch_size, release_memory
def lowerCAmelCase_ ( ):
raise RuntimeError("""CUDA out of memory.""" )
class lowercase ( nn.Module ):
def __init__( self ) -> Any:
super().__init__()
_A : int = nn.Linear(3 , 4 )
_A : Tuple = nn.BatchNormad(4 )
_A : Dict = nn.Linear(4 , 5 )
def a__ ( self , _a ) -> int:
return self.lineara(self.batchnorm(self.lineara(_a ) ) )
class lowercase ( unittest.TestCase ):
def a__ ( self ) -> Any:
_A : Tuple = []
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(_a ):
nonlocal batch_sizes
batch_sizes.append(_a )
if batch_size != 8:
raise_fake_out_of_memory()
mock_training_loop_function()
self.assertListEqual(_a , [128, 64, 32, 16, 8] )
def a__ ( self ) -> Union[str, Any]:
_A : Dict = []
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(_a , _a ):
nonlocal batch_sizes
batch_sizes.append(_a )
if batch_size != 8:
raise_fake_out_of_memory()
return batch_size, arga
_A , _A : Tuple = mock_training_loop_function("""hello""" )
self.assertListEqual(_a , [128, 64, 32, 16, 8] )
self.assertListEqual([bs, arga] , [8, """hello"""] )
def a__ ( self ) -> List[str]:
@find_executable_batch_size(starting_batch_size=0 )
def mock_training_loop_function(_a ):
pass
with self.assertRaises(_a ) as cm:
mock_training_loop_function()
self.assertIn("""No executable batch size found, reached zero.""" , cm.exception.args[0] )
def a__ ( self ) -> Any:
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(_a ):
if batch_size > 0:
raise_fake_out_of_memory()
pass
with self.assertRaises(_a ) as cm:
mock_training_loop_function()
self.assertIn("""No executable batch size found, reached zero.""" , cm.exception.args[0] )
def a__ ( self ) -> Any:
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(_a , _a , _a ):
if batch_size != 8:
raise raise_fake_out_of_memory()
with self.assertRaises(_a ) as cm:
mock_training_loop_function(128 , """hello""" , """world""" )
self.assertIn("""Batch size was passed into `f`""" , cm.exception.args[0] )
self.assertIn("""`f(arg1=\'hello\', arg2=\'world\')""" , cm.exception.args[0] )
def a__ ( self ) -> Tuple:
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(_a ):
raise ValueError("""Oops, we had an error!""" )
with self.assertRaises(_a ) as cm:
mock_training_loop_function()
self.assertIn("""Oops, we had an error!""" , cm.exception.args[0] )
@require_cuda
def a__ ( self ) -> List[Any]:
_A : Optional[Any] = torch.cuda.memory_allocated()
_A : Optional[int] = ModelForTest()
model.cuda()
self.assertGreater(torch.cuda.memory_allocated() , _a )
_A : Optional[Any] = release_memory(_a )
self.assertEqual(torch.cuda.memory_allocated() , _a )
| 307 |
"""simple docstring"""
from __future__ import annotations
def lowercase ( lowerCAmelCase__ : float , lowerCAmelCase__ : float , lowerCAmelCase__ : float ) -> float:
if days_between_payments <= 0:
raise ValueError('''days_between_payments must be > 0''' )
if daily_interest_rate < 0:
raise ValueError('''daily_interest_rate must be >= 0''' )
if principal <= 0:
raise ValueError('''principal must be > 0''' )
return principal * daily_interest_rate * days_between_payments
def lowercase ( lowerCAmelCase__ : float , lowerCAmelCase__ : float , lowerCAmelCase__ : float , ) -> float:
if number_of_compounding_periods <= 0:
raise ValueError('''number_of_compounding_periods must be > 0''' )
if nominal_annual_interest_rate_percentage < 0:
raise ValueError('''nominal_annual_interest_rate_percentage must be >= 0''' )
if principal <= 0:
raise ValueError('''principal must be > 0''' )
return principal * (
(1 + nominal_annual_interest_rate_percentage) ** number_of_compounding_periods
- 1
)
def lowercase ( lowerCAmelCase__ : float , lowerCAmelCase__ : float , lowerCAmelCase__ : float , ) -> float:
if number_of_years <= 0:
raise ValueError('''number_of_years must be > 0''' )
if nominal_annual_percentage_rate < 0:
raise ValueError('''nominal_annual_percentage_rate must be >= 0''' )
if principal <= 0:
raise ValueError('''principal must be > 0''' )
return compound_interest(
lowerCAmelCase__ , nominal_annual_percentage_rate / 365 , number_of_years * 365 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 695 | 0 |
"""simple docstring"""
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
SCREAMING_SNAKE_CASE_ = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class _UpperCAmelCase :
__SCREAMING_SNAKE_CASE : str = field(
default=__SCREAMING_SNAKE_CASE , metadata={"help": "Model type selected in the list: " + ", ".join(__SCREAMING_SNAKE_CASE )} )
__SCREAMING_SNAKE_CASE : str = field(
default=__SCREAMING_SNAKE_CASE , metadata={"help": "The input data dir. Should contain the .json files for the SQuAD task."} )
__SCREAMING_SNAKE_CASE : int = field(
default=1_2_8 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
__SCREAMING_SNAKE_CASE : int = field(
default=1_2_8 , metadata={"help": "When splitting up a long document into chunks, how much stride to take between chunks."} , )
__SCREAMING_SNAKE_CASE : int = field(
default=6_4 , metadata={
"help": (
"The maximum number of tokens for the question. Questions longer than this will "
"be truncated to this length."
)
} , )
__SCREAMING_SNAKE_CASE : int = field(
default=3_0 , metadata={
"help": (
"The maximum length of an answer that can be generated. This is needed because the start "
"and end predictions are not conditioned on one another."
)
} , )
__SCREAMING_SNAKE_CASE : bool = field(
default=__SCREAMING_SNAKE_CASE , metadata={"help": "Overwrite the cached training and evaluation sets"} )
__SCREAMING_SNAKE_CASE : bool = field(
default=__SCREAMING_SNAKE_CASE , metadata={"help": "If true, the SQuAD examples contain some that do not have an answer."} )
__SCREAMING_SNAKE_CASE : float = field(
default=0.0 , metadata={"help": "If null_score - best_non_null is greater than the threshold predict null."} )
__SCREAMING_SNAKE_CASE : int = field(
default=2_0 , metadata={"help": "If null_score - best_non_null is greater than the threshold predict null."} )
__SCREAMING_SNAKE_CASE : int = field(
default=0 , metadata={
"help": (
"language id of input for language-specific xlm models (see"
" tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)"
)
} , )
__SCREAMING_SNAKE_CASE : int = field(default=1 , metadata={"help": "multiple threads for converting example to features"} )
class _UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
__SCREAMING_SNAKE_CASE : Optional[int] = 'train'
__SCREAMING_SNAKE_CASE : Dict = 'dev'
class _UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
__SCREAMING_SNAKE_CASE : SquadDataTrainingArguments
__SCREAMING_SNAKE_CASE : List[SquadFeatures]
__SCREAMING_SNAKE_CASE : Split
__SCREAMING_SNAKE_CASE : bool
def __init__( self , lowercase_ , lowercase_ , lowercase_ = None , lowercase_ = Split.train , lowercase_ = False , lowercase_ = None , lowercase_ = "pt" , ) -> List[Any]:
UpperCAmelCase = args
UpperCAmelCase = is_language_sensitive
UpperCAmelCase = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(_a , _a ):
try:
UpperCAmelCase = Split[mode]
except KeyError:
raise KeyError('mode is not a valid split name' )
UpperCAmelCase = mode
# Load data features from cache or dataset file
UpperCAmelCase = 'v2' if args.version_2_with_negative else 'v1'
UpperCAmelCase = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , F"cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}" , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
UpperCAmelCase = cached_features_file + '.lock'
with FileLock(_a ):
if os.path.exists(_a ) and not args.overwrite_cache:
UpperCAmelCase = time.time()
UpperCAmelCase = torch.load(_a )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
UpperCAmelCase = self.old_features['features']
UpperCAmelCase = self.old_features.get('dataset' , _a )
UpperCAmelCase = self.old_features.get('examples' , _a )
logger.info(
F"Loading features from cached file {cached_features_file} [took %.3f s]" , time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
F"Deleting cached file {cached_features_file} will allow dataset and examples to be cached in"
' future run' )
else:
if mode == Split.dev:
UpperCAmelCase = self.processor.get_dev_examples(args.data_dir )
else:
UpperCAmelCase = self.processor.get_train_examples(args.data_dir )
UpperCAmelCase , UpperCAmelCase = squad_convert_examples_to_features(
examples=self.examples , tokenizer=_a , max_seq_length=args.max_seq_length , doc_stride=args.doc_stride , max_query_length=args.max_query_length , is_training=mode == Split.train , threads=args.threads , return_dataset=_a , )
UpperCAmelCase = time.time()
torch.save(
{'features': self.features, 'dataset': self.dataset, 'examples': self.examples} , _a , )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
F"Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]" )
def __len__( self ) -> List[Any]:
return len(self.features )
def __getitem__( self , lowercase_ ) -> Union[str, Any]:
# Convert to Tensors and build dataset
UpperCAmelCase = self.features[i]
UpperCAmelCase = torch.tensor(feature.input_ids , dtype=torch.long )
UpperCAmelCase = torch.tensor(feature.attention_mask , dtype=torch.long )
UpperCAmelCase = torch.tensor(feature.token_type_ids , dtype=torch.long )
UpperCAmelCase = torch.tensor(feature.cls_index , dtype=torch.long )
UpperCAmelCase = torch.tensor(feature.p_mask , dtype=torch.float )
UpperCAmelCase = torch.tensor(feature.is_impossible , dtype=torch.float )
UpperCAmelCase = {
'input_ids': input_ids,
'attention_mask': attention_mask,
'token_type_ids': token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({'cls_index': cls_index, 'p_mask': p_mask} )
if self.args.version_2_with_negative:
inputs.update({'is_impossible': is_impossible} )
if self.is_language_sensitive:
inputs.update({'langs': (torch.ones(input_ids.shape , dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
UpperCAmelCase = torch.tensor(feature.start_position , dtype=torch.long )
UpperCAmelCase = torch.tensor(feature.end_position , dtype=torch.long )
inputs.update({'start_positions': start_positions, 'end_positions': end_positions} )
return inputs
| 373 |
"""simple docstring"""
def lowercase ( lowerCAmelCase__ : Any , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Any=False ) -> Any:
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
__a = len(set_a.intersection(lowerCAmelCase__ ) )
if alternative_union:
__a = len(lowerCAmelCase__ ) + len(lowerCAmelCase__ )
else:
__a = len(set_a.union(lowerCAmelCase__ ) )
return intersection / union
if isinstance(lowerCAmelCase__ , (list, tuple) ) and isinstance(lowerCAmelCase__ , (list, tuple) ):
__a = [element for element in set_a if element in set_b]
if alternative_union:
__a = len(lowerCAmelCase__ ) + len(lowerCAmelCase__ )
return len(lowerCAmelCase__ ) / union
else:
__a = set_a + [element for element in set_b if element not in set_a]
return len(lowerCAmelCase__ ) / len(lowerCAmelCase__ )
return len(lowerCAmelCase__ ) / len(lowerCAmelCase__ )
return None
if __name__ == "__main__":
lowercase_ = {"a", "b", "c", "d", "e"}
lowercase_ = {"c", "d", "e", "f", "h", "i"}
print(jaccard_similarity(set_a, set_b))
| 695 | 0 |
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from torch import nn
from transformers import (
SpeechaTextaConfig,
SpeechaTextaForCausalLM,
SpeechaTextaTokenizer,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
__A : Dict = logging.get_logger(__name__)
__A : Union[str, Any] = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
}
__A : Any = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def UpperCAmelCase ( lowerCamelCase_ :List[str] , lowerCamelCase_ :Any , lowerCamelCase_ :int , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :int ):
'''simple docstring'''
for attribute in key.split(""".""" ):
snake_case_ : List[str] = getattr(lowerCAmelCase__ , lowerCAmelCase__ )
if weight_type is not None:
snake_case_ : List[str] = getattr(lowerCAmelCase__ , lowerCAmelCase__ ).shape
else:
snake_case_ : List[Any] = hf_pointer.shape
assert hf_shape == value.shape, (
F'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
snake_case_ : Any = value
elif weight_type == "weight_g":
snake_case_ : Optional[int] = value
elif weight_type == "weight_v":
snake_case_ : Dict = value
elif weight_type == "bias":
snake_case_ : Optional[Any] = value
else:
snake_case_ : List[Any] = value
logger.info(F'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def UpperCAmelCase ( lowerCamelCase_ :Dict , lowerCamelCase_ :List[Any] ):
'''simple docstring'''
snake_case_ : Optional[int] = []
snake_case_ : Dict = fairseq_model.state_dict()
snake_case_ : str = hf_model.feature_extractor
# if encoder has different dim to decoder -> use proj_weight
snake_case_ : Union[str, Any] = None
for name, value in fairseq_dict.items():
snake_case_ : str = False
if "conv_layers" in name:
load_conv_layer(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , hf_model.config.feat_extract_norm == """group""" , )
snake_case_ : Dict = True
elif name.split(""".""" )[0] == "proj":
snake_case_ : Dict = fairseq_model.proj
snake_case_ : int = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
snake_case_ : Dict = True
if "*" in mapped_key:
snake_case_ : Optional[Any] = name.split(lowerCAmelCase__ )[0].split(""".""" )[-2]
snake_case_ : Optional[int] = mapped_key.replace("""*""" , lowerCAmelCase__ )
if "weight_g" in name:
snake_case_ : Dict = """weight_g"""
elif "weight_v" in name:
snake_case_ : Any = """weight_v"""
elif "bias" in name:
snake_case_ : List[Any] = """bias"""
elif "weight" in name:
snake_case_ : Dict = """weight"""
else:
snake_case_ : Any = None
set_recursively(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
continue
if not is_used:
unused_weights.append(lowerCAmelCase__ )
logger.warning(F'''Unused weights: {unused_weights}''' )
return proj_weight
def UpperCAmelCase ( lowerCamelCase_ :str , lowerCamelCase_ :Optional[Any] , lowerCamelCase_ :Optional[Any] , lowerCamelCase_ :int , lowerCamelCase_ :List[str] ):
'''simple docstring'''
snake_case_ : List[str] = full_name.split("""conv_layers.""" )[-1]
snake_case_ : Dict = name.split(""".""" )
snake_case_ : int = int(items[0] )
snake_case_ : int = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
snake_case_ : Optional[int] = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
snake_case_ : str = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
snake_case_ : Tuple = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
snake_case_ : Optional[int] = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(lowerCAmelCase__ )
def UpperCAmelCase ( lowerCamelCase_ :List[str] ):
'''simple docstring'''
snake_case_ , snake_case_ : Union[str, Any] = emb.weight.shape
snake_case_ : Tuple = nn.Linear(lowerCAmelCase__ , lowerCAmelCase__ , bias=lowerCAmelCase__ )
snake_case_ : Tuple = emb.weight.data
return lin_layer
def UpperCAmelCase ( lowerCamelCase_ :Union[str, Any] ):
'''simple docstring'''
with open(lowerCAmelCase__ , """r""" , encoding="""utf-8""" ) as f:
snake_case_ : List[str] = f.readlines()
snake_case_ : str = [line.split(""" """ )[0] for line in lines]
snake_case_ : Dict = len(lowerCAmelCase__ )
snake_case_ : int = {
"""<s>""": 0,
"""<pad>""": 1,
"""</s>""": 2,
"""<unk>""": 3,
}
vocab_dict.update(dict(zip(lowerCAmelCase__ , range(4 , num_words + 4 ) ) ) )
return vocab_dict
@torch.no_grad()
def UpperCAmelCase ( lowerCamelCase_ :Any , lowerCamelCase_ :int , lowerCamelCase_ :Any , lowerCamelCase_ :List[str] , lowerCamelCase_ :Optional[Any] , lowerCamelCase_ :Any , lowerCamelCase_ :Any , ):
'''simple docstring'''
snake_case_ : List[str] = WavaVecaConfig.from_pretrained(lowerCAmelCase__ )
snake_case_ : Dict = SpeechaTextaConfig.from_pretrained(
lowerCAmelCase__ , vocab_size=lowerCAmelCase__ , decoder_layers=lowerCAmelCase__ , do_stable_layer_norm=lowerCAmelCase__ )
snake_case_ : Dict = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , )
snake_case_ , snake_case_ , snake_case_ : Union[str, Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
snake_case_ : int = model[0].eval()
# set weights for wav2vec2 encoder
snake_case_ : Dict = WavaVecaModel(lowerCAmelCase__ )
snake_case_ : Dict = recursively_load_weights_wavaveca(model.encoder , lowerCAmelCase__ )
snake_case_ : str = SpeechaTextaForCausalLM(lowerCAmelCase__ )
snake_case_ , snake_case_ : int = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=lowerCAmelCase__ )
# set output linear layer
unexpected_keys.remove("""embed_out""" )
snake_case_ : Optional[int] = nn.Parameter(model.decoder.embed_out.detach() )
# layer norm is init to identity matrix so leaving it is fine
logger.warning(F'''The following keys are missing when loading the decoder weights: {missing_keys}''' )
logger.warning(F'''The following keys are unexpected when loading the decoder weights: {unexpected_keys}''' )
snake_case_ : int = SpeechEncoderDecoderModel(encoder=lowerCAmelCase__ , decoder=lowerCAmelCase__ )
snake_case_ : str = False
# add projection layer
snake_case_ : Dict = nn.Parameter(projection_layer.weight )
snake_case_ : Tuple = nn.Parameter(projection_layer.bias )
snake_case_ : Optional[int] = create_vocab_dict(lowerCAmelCase__ )
with open(os.path.join(lowerCAmelCase__ , """vocab.json""" ) , """w""" ) as fp:
json.dump(lowerCAmelCase__ , lowerCAmelCase__ )
snake_case_ : Union[str, Any] = SpeechaTextaTokenizer(os.path.join(lowerCAmelCase__ , """vocab.json""" ) )
tokenizer.save_pretrained(lowerCAmelCase__ )
snake_case_ : List[str] = hf_wavavec.config.to_dict()
snake_case_ : List[Any] = tokenizer.pad_token_id
snake_case_ : Any = tokenizer.bos_token_id
snake_case_ : Any = tokenizer.eos_token_id
snake_case_ : List[str] = """speech_to_text_2"""
snake_case_ : Any = """wav2vec2"""
snake_case_ : Dict = SpeechEncoderDecoderConfig.from_dict(lowerCAmelCase__ )
hf_wavavec.save_pretrained(lowerCAmelCase__ )
feature_extractor.save_pretrained(lowerCAmelCase__ )
if __name__ == "__main__":
__A : Dict = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument(
'--encoder_config_path',
default='facebook/wav2vec2-large-lv60',
type=str,
help='Path to hf encoder wav2vec2 checkpoint config',
)
parser.add_argument(
'--decoder_config_path',
default='facebook/s2t-small-mustc-en-fr-st',
type=str,
help='Path to hf decoder s2t checkpoint config',
)
parser.add_argument('--vocab_size', default=10_224, type=int, help='Vocab size of decoder')
parser.add_argument('--num_decoder_layers', default=7, type=int, help='Number of decoder layers')
__A : Optional[int] = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
vocab_size=args.vocab_size,
num_decoder_layers=args.num_decoder_layers,
) | 334 |
"""simple docstring"""
from __future__ import annotations
import requests
def lowercase ( lowerCAmelCase__ : str ) -> dict:
__a = f'''https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty'''
return requests.get(lowerCAmelCase__ ).json()
def lowercase ( lowerCAmelCase__ : int = 10 ) -> list[dict]:
__a = '''https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty'''
__a = requests.get(lowerCAmelCase__ ).json()[:max_stories]
return [get_hackernews_story(lowerCAmelCase__ ) for story_id in story_ids]
def lowercase ( lowerCAmelCase__ : int = 10 ) -> str:
__a = hackernews_top_stories(lowerCAmelCase__ )
return "\n".join('''* [{title}]({url})'''.format(**lowerCAmelCase__ ) for story in stories )
if __name__ == "__main__":
print(hackernews_top_stories_as_markdown())
| 695 | 0 |
'''simple docstring'''
def UpperCamelCase ( _lowerCamelCase : int = 10 , _lowerCamelCase : int = 10_00 , _lowerCamelCase : bool = True ):
assert (
isinstance(lowerCAmelCase__ , lowerCAmelCase__ )
and isinstance(lowerCAmelCase__ , lowerCAmelCase__ )
and isinstance(lowerCAmelCase__ , lowerCAmelCase__ )
), "Invalid type of value(s) specified to function!"
if min_val > max_val:
raise ValueError("Invalid value for min_val or max_val (min_value < max_value)" )
return min_val if option else max_val
def UpperCamelCase ( _lowerCamelCase : int , _lowerCamelCase : int ):
return int((number_a + number_a) / 2 )
def UpperCamelCase ( _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : int ):
assert (
isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and isinstance(lowerCAmelCase__ , lowerCAmelCase__ )
), 'argument values must be type of "int"'
if lower > higher:
raise ValueError("argument value for lower and higher must be(lower > higher)" )
if not lower < to_guess < higher:
raise ValueError(
"guess value must be within the range of lower and higher value" )
def answer(_lowerCamelCase : int ) -> str:
if number > to_guess:
return "high"
elif number < to_guess:
return "low"
else:
return "same"
print("started..." )
A__ = lower
A__ = higher
A__ = []
while True:
A__ = get_avg(lowerCAmelCase__ , lowerCAmelCase__ )
last_numbers.append(lowerCAmelCase__ )
if answer(lowerCAmelCase__ ) == "low":
A__ = number
elif answer(lowerCAmelCase__ ) == "high":
A__ = number
else:
break
print(F"guess the number : {last_numbers[-1]}" )
print(F"details : {last_numbers!s}" )
def UpperCamelCase ( ):
A__ = int(input("Enter lower value : " ).strip() )
A__ = int(input("Enter high value : " ).strip() )
A__ = int(input("Enter value to guess : " ).strip() )
guess_the_number(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
if __name__ == "__main__":
main()
| 440 |
"""simple docstring"""
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"salesforce/blip2-opt-2.7b": "https://huggingface.co/salesforce/blip2-opt-2.7b/resolve/main/config.json",
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = 'blip_2_vision_model'
def __init__( self , _a=1_408 , _a=6_144 , _a=39 , _a=16 , _a=224 , _a=14 , _a="gelu" , _a=0.0_0001 , _a=0.0 , _a=1E-10 , _a=True , **_a , ):
super().__init__(**_a )
__a = hidden_size
__a = intermediate_size
__a = num_hidden_layers
__a = num_attention_heads
__a = patch_size
__a = image_size
__a = initializer_range
__a = attention_dropout
__a = layer_norm_eps
__a = hidden_act
__a = qkv_bias
@classmethod
def __UpperCAmelCase ( cls , _a , **_a ):
cls._set_token_in_kwargs(_a )
__a , __a = cls.get_config_dict(_a , **_a )
# get the vision config dict if we are loading from Blip2Config
if config_dict.get('''model_type''' ) == "blip-2":
__a = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(_a , **_a )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : str = 'blip_2_qformer'
def __init__( self , _a=30_522 , _a=768 , _a=12 , _a=12 , _a=3_072 , _a="gelu" , _a=0.1 , _a=0.1 , _a=512 , _a=0.02 , _a=1E-12 , _a=0 , _a="absolute" , _a=2 , _a=1_408 , **_a , ):
super().__init__(pad_token_id=_a , **_a )
__a = vocab_size
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = hidden_act
__a = intermediate_size
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = max_position_embeddings
__a = initializer_range
__a = layer_norm_eps
__a = position_embedding_type
__a = cross_attention_frequency
__a = encoder_hidden_size
@classmethod
def __UpperCAmelCase ( cls , _a , **_a ):
cls._set_token_in_kwargs(_a )
__a , __a = cls.get_config_dict(_a , **_a )
# get the qformer config dict if we are loading from Blip2Config
if config_dict.get('''model_type''' ) == "blip-2":
__a = config_dict['''qformer_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(_a , **_a )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : Any = 'blip-2'
__UpperCAmelCase : List[str] = True
def __init__( self , _a=None , _a=None , _a=None , _a=32 , **_a ):
super().__init__(**_a )
if vision_config is None:
__a = {}
logger.info('''vision_config is None. initializing the Blip2VisionConfig with default values.''' )
if qformer_config is None:
__a = {}
logger.info('''qformer_config is None. Initializing the Blip2QFormerConfig with default values.''' )
if text_config is None:
__a = {}
logger.info('''text_config is None. Initializing the text config with default values (`OPTConfig`).''' )
__a = BlipaVisionConfig(**_a )
__a = BlipaQFormerConfig(**_a )
__a = text_config['''model_type'''] if '''model_type''' in text_config else '''opt'''
__a = CONFIG_MAPPING[text_model_type](**_a )
__a = self.text_config.tie_word_embeddings
__a = self.text_config.is_encoder_decoder
__a = num_query_tokens
__a = self.vision_config.hidden_size
__a = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
__a = 1.0
__a = 0.02
@classmethod
def __UpperCAmelCase ( cls , _a , _a , _a , **_a , ):
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **_a , )
def __UpperCAmelCase ( self ):
__a = copy.deepcopy(self.__dict__ )
__a = self.vision_config.to_dict()
__a = self.qformer_config.to_dict()
__a = self.text_config.to_dict()
__a = self.__class__.model_type
return output
| 695 | 0 |
'''simple docstring'''
import io
import json
import unittest
from parameterized import parameterized
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device
from utils import calculate_bleu
_UpperCAmelCase : Tuple = get_tests_dir() + '''/test_data/fsmt/fsmt_val_data.json'''
with io.open(filename, '''r''', encoding='''utf-8''') as f:
_UpperCAmelCase : Tuple = json.load(f)
@require_torch
class lowercase_ ( unittest.TestCase ):
"""simple docstring"""
def __UpperCAmelCase ( self : Tuple, UpperCamelCase__ : Tuple ) -> Optional[Any]:
return FSMTTokenizer.from_pretrained(_a )
def __UpperCAmelCase ( self : Union[str, Any], UpperCamelCase__ : int ) -> Tuple:
_A = FSMTForConditionalGeneration.from_pretrained(_a ).to(_a )
if torch_device == "cuda":
model.half()
return model
@parameterized.expand(
[
['en-ru', 26.0],
['ru-en', 22.0],
['en-de', 22.0],
['de-en', 29.0],
] )
@slow
def __UpperCAmelCase ( self : Any, UpperCamelCase__ : Tuple, UpperCamelCase__ : List[Any] ) -> List[str]:
# note: this test is not testing the best performance since it only evals a small batch
# but it should be enough to detect a regression in the output quality
_A = f'facebook/wmt19-{pair}'
_A = self.get_tokenizer(_a )
_A = self.get_model(_a )
_A = bleu_data[pair]['src']
_A = bleu_data[pair]['tgt']
_A = tokenizer(_a, return_tensors='pt', truncation=_a, padding='longest' ).to(_a )
_A = model.generate(
input_ids=batch.input_ids, num_beams=8, )
_A = tokenizer.batch_decode(
_a, skip_special_tokens=_a, clean_up_tokenization_spaces=_a )
_A = calculate_bleu(_a, _a )
print(_a )
self.assertGreaterEqual(scores['bleu'], _a )
| 107 |
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"microsoft/deberta-v2-xlarge": "https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json",
"microsoft/deberta-v2-xxlarge": "https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json",
"microsoft/deberta-v2-xlarge-mnli": (
"https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json"
),
"microsoft/deberta-v2-xxlarge-mnli": (
"https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json"
),
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : Dict = 'deberta-v2'
def __init__( self , _a=128_100 , _a=1_536 , _a=24 , _a=24 , _a=6_144 , _a="gelu" , _a=0.1 , _a=0.1 , _a=512 , _a=0 , _a=0.02 , _a=1E-7 , _a=False , _a=-1 , _a=0 , _a=True , _a=None , _a=0 , _a="gelu" , **_a , ):
super().__init__(**_a )
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = intermediate_size
__a = hidden_act
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = max_position_embeddings
__a = type_vocab_size
__a = initializer_range
__a = relative_attention
__a = max_relative_positions
__a = pad_token_id
__a = position_biased_input
# Backwards compatibility
if type(_a ) == str:
__a = [x.strip() for x in pos_att_type.lower().split('''|''' )]
__a = pos_att_type
__a = vocab_size
__a = layer_norm_eps
__a = kwargs.get('''pooler_hidden_size''' , _a )
__a = pooler_dropout
__a = pooler_hidden_act
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@property
def __UpperCAmelCase ( self ):
if self.task == "multiple-choice":
__a = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
__a = {0: '''batch''', 1: '''sequence'''}
if self._config.type_vocab_size > 0:
return OrderedDict(
[('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ('''token_type_ids''', dynamic_axis)] )
else:
return OrderedDict([('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis)] )
@property
def __UpperCAmelCase ( self ):
return 12
def __UpperCAmelCase ( self , _a , _a = -1 , _a = -1 , _a = -1 , _a = False , _a = None , _a = 3 , _a = 40 , _a = 40 , _a = None , ):
__a = super().generate_dummy_inputs(preprocessor=_a , framework=_a )
if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs:
del dummy_inputs["token_type_ids"]
return dummy_inputs
| 695 | 0 |
from decimal import Decimal, getcontext
from math import ceil, factorial
def __a ( __UpperCAmelCase ):
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
raise TypeError('''Undefined for non-integers''' )
elif precision < 1:
raise ValueError('''Undefined for non-natural numbers''' )
a__ = precision
a__ = ceil(precision / 14 )
a__ = 42_6880 * Decimal(1_0005 ).sqrt()
a__ = 1
a__ = 1359_1409
a__ = Decimal(lowerCAmelCase__ )
for k in range(1 , lowerCAmelCase__ ):
a__ = factorial(6 * k ) // (factorial(3 * k ) * factorial(lowerCAmelCase__ ) ** 3)
linear_term += 5_4514_0134
exponential_term *= -26_2537_4126_4076_8000
partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term
return str(constant_term / partial_sum )[:-1]
if __name__ == "__main__":
a_ : Optional[Any] = 50
print(f'The first {n} digits of pi is: {pi(n)}')
| 194 |
"""simple docstring"""
import importlib.metadata
import operator
import re
import sys
from typing import Optional
from packaging import version
lowercase_ = {
"<": operator.lt,
"<=": operator.le,
"==": operator.eq,
"!=": operator.ne,
">=": operator.ge,
">": operator.gt,
}
def lowercase ( lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : int , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Optional[Any] ) -> Dict:
if got_ver is None or want_ver is None:
raise ValueError(
f'''Unable to compare versions for {requirement}: need={want_ver} found={got_ver}. This is unusual. Consider'''
f''' reinstalling {pkg}.''' )
if not ops[op](version.parse(lowerCAmelCase__ ) , version.parse(lowerCAmelCase__ ) ):
raise ImportError(
f'''{requirement} is required for a normal functioning of this module, but found {pkg}=={got_ver}.{hint}''' )
def lowercase ( lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[str] = None ) -> None:
__a = f'''\n{hint}''' if hint is not None else ''''''
# non-versioned check
if re.match(r'''^[\w_\-\d]+$''' , lowerCAmelCase__ ):
__a , __a , __a = requirement, None, None
else:
__a = re.findall(r'''^([^!=<>\s]+)([\s!=<>]{1,2}.+)''' , lowerCAmelCase__ )
if not match:
raise ValueError(
'''requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23, but'''
f''' got {requirement}''' )
__a , __a = match[0]
__a = want_full.split(''',''' ) # there could be multiple requirements
__a = {}
for w in want_range:
__a = re.findall(r'''^([\s!=<>]{1,2})(.+)''' , lowerCAmelCase__ )
if not match:
raise ValueError(
'''requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23,'''
f''' but got {requirement}''' )
__a , __a = match[0]
__a = want_ver
if op not in ops:
raise ValueError(f'''{requirement}: need one of {list(ops.keys() )}, but got {op}''' )
# special case
if pkg == "python":
__a = '''.'''.join([str(lowerCAmelCase__ ) for x in sys.version_info[:3]] )
for op, want_ver in wanted.items():
_compare_versions(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
return
# check if any version is installed
try:
__a = importlib.metadata.version(lowerCAmelCase__ )
except importlib.metadata.PackageNotFoundError:
raise importlib.metadata.PackageNotFoundError(
f'''The \'{requirement}\' distribution was not found and is required by this application. {hint}''' )
# check that the right version is installed if version number or a range was provided
if want_ver is not None:
for op, want_ver in wanted.items():
_compare_versions(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def lowercase ( lowerCAmelCase__ : Tuple ) -> Optional[Any]:
__a = '''Try: pip install transformers -U or pip install -e \'.[dev]\' if you\'re working with git main'''
return require_version(lowerCAmelCase__ , lowerCAmelCase__ )
| 695 | 0 |
'''simple docstring'''
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=__SCREAMING_SNAKE_CASE )
class SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCAmelCase__ = field(default='''language-modeling''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
UpperCAmelCase__ = Features({'''text''': Value('''string''' )} )
UpperCAmelCase__ = Features({} )
UpperCAmelCase__ = "text"
@property
def snake_case__ ( self : Optional[int] ) ->Optional[Any]:
'''simple docstring'''
return {self.text_column: "text"}
| 435 |
"""simple docstring"""
from __future__ import annotations
lowercase_ = list[tuple[int, int]]
lowercase_ = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
lowercase_ = ([-1, 0], [0, -1], [1, 0], [0, 1]) # up, left, down, right
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self , _a , _a , _a , _a , _a , _a , ):
__a = pos_x
__a = pos_y
__a = (pos_y, pos_x)
__a = goal_x
__a = goal_y
__a = g_cost
__a = parent
__a = self.calculate_heuristic()
def __UpperCAmelCase ( self ):
__a = abs(self.pos_x - self.goal_x )
__a = abs(self.pos_y - self.goal_y )
return dx + dy
def __lt__( self , _a ):
return self.f_cost < other.f_cost
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self , _a , _a ):
__a = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , _a )
__a = Node(goal[1] , goal[0] , goal[1] , goal[0] , 99_999 , _a )
__a = [self.start]
__a = []
__a = False
def __UpperCAmelCase ( self ):
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
__a = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
__a = True
return self.retrace_path(_a )
self.closed_nodes.append(_a )
__a = self.get_successors(_a )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(_a )
else:
# retrieve the best current path
__a = self.open_nodes.pop(self.open_nodes.index(_a ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(_a )
else:
self.open_nodes.append(_a )
if not self.reached:
return [self.start.pos]
return None
def __UpperCAmelCase ( self , _a ):
__a = []
for action in delta:
__a = parent.pos_x + action[1]
__a = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(_a ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
_a , _a , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , _a , ) )
return successors
def __UpperCAmelCase ( self , _a ):
__a = node
__a = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
__a = current_node.parent
path.reverse()
return path
if __name__ == "__main__":
lowercase_ = (0, 0)
lowercase_ = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
print("------")
lowercase_ = GreedyBestFirst(init, goal)
lowercase_ = greedy_bf.search()
if path:
for pos_x, pos_y in path:
lowercase_ = 2
for elem in grid:
print(elem)
| 695 | 0 |
'''simple docstring'''
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
snake_case_ : List[str] = logging.get_logger(__name__)
snake_case_ : Any = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
snake_case_ : List[Any] = {
'''tokenizer_file''': {
'''EleutherAI/gpt-neox-20b''': '''https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json''',
},
}
snake_case_ : Optional[int] = {
'''gpt-neox-20b''': 2048,
}
class A_ ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
_lowerCAmelCase = VOCAB_FILES_NAMES
_lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCAmelCase = ['input_ids', 'attention_mask']
def __init__( self , A_=None , A_=None , A_=None , A_="<|endoftext|>" , A_="<|endoftext|>" , A_="<|endoftext|>" , A_=False , **A_ , ):
super().__init__(
_a , _a , tokenizer_file=_a , unk_token=_a , bos_token=_a , eos_token=_a , add_prefix_space=_a , **_a , )
_UpperCamelCase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , _a ) != add_prefix_space:
_UpperCamelCase = getattr(_a , pre_tok_state.pop("type" ) )
_UpperCamelCase = add_prefix_space
_UpperCamelCase = pre_tok_class(**_a )
_UpperCamelCase = add_prefix_space
def a ( self , A_ , A_ = None ):
_UpperCamelCase = self._tokenizer.model.save(_a , name=_a )
return tuple(_a )
def a ( self , A_ ):
_UpperCamelCase = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(_a , add_special_tokens=_a ) + [self.eos_token_id] )
if len(_a ) > self.model_max_length:
_UpperCamelCase = input_ids[-self.model_max_length :]
return input_ids
| 138 |
"""simple docstring"""
import argparse
import torch
from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert
from transformers.utils import logging
logging.set_verbosity_info()
def lowercase ( lowerCAmelCase__ : Any , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : str ) -> List[Any]:
# Initialise PyTorch model
__a = RemBertConfig.from_json_file(lowerCAmelCase__ )
print('''Building PyTorch model from configuration: {}'''.format(str(lowerCAmelCase__ ) ) )
__a = RemBertModel(lowerCAmelCase__ )
# Load weights from tf checkpoint
load_tf_weights_in_rembert(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# Save pytorch-model
print('''Save PyTorch model to {}'''.format(lowerCAmelCase__ ) )
torch.save(model.state_dict() , lowerCAmelCase__ )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--rembert_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained RemBERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
lowercase_ = parser.parse_args()
convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path)
| 695 | 0 |
"""simple docstring"""
from argparse import ArgumentParser
from .add_new_model import AddNewModelCommand
from .add_new_model_like import AddNewModelLikeCommand
from .convert import ConvertCommand
from .download import DownloadCommand
from .env import EnvironmentCommand
from .lfs import LfsCommands
from .pt_to_tf import PTtoTFCommand
from .run import RunCommand
from .serving import ServeCommand
from .user import UserCommands
def _lowerCAmelCase() -> List[Any]:
_SCREAMING_SNAKE_CASE =ArgumentParser('''Transformers CLI tool''' , usage='''transformers-cli <command> [<args>]''' )
_SCREAMING_SNAKE_CASE =parser.add_subparsers(help='''transformers-cli command helpers''' )
# Register commands
ConvertCommand.register_subcommand(lowerCAmelCase__ )
DownloadCommand.register_subcommand(lowerCAmelCase__ )
EnvironmentCommand.register_subcommand(lowerCAmelCase__ )
RunCommand.register_subcommand(lowerCAmelCase__ )
ServeCommand.register_subcommand(lowerCAmelCase__ )
UserCommands.register_subcommand(lowerCAmelCase__ )
AddNewModelCommand.register_subcommand(lowerCAmelCase__ )
AddNewModelLikeCommand.register_subcommand(lowerCAmelCase__ )
LfsCommands.register_subcommand(lowerCAmelCase__ )
PTtoTFCommand.register_subcommand(lowerCAmelCase__ )
# Let's go
_SCREAMING_SNAKE_CASE =parser.parse_args()
if not hasattr(lowerCAmelCase__ , '''func''' ):
parser.print_help()
exit(1 )
# Run
_SCREAMING_SNAKE_CASE =args.func(lowerCAmelCase__ )
service.run()
if __name__ == "__main__":
main()
| 255 |
"""simple docstring"""
import tempfile
import unittest
import numpy as np
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_flax
if is_flax_available():
import os
from flax.core.frozen_dict import unfreeze
from flax.traverse_util import flatten_dict
from transformers import FlaxBertModel
lowercase_ = "0.12" # assumed parallelism: 8
@require_flax
@is_staging_test
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def __UpperCAmelCase ( cls ):
__a = TOKEN
HfFolder.save_token(_a )
@classmethod
def __UpperCAmelCase ( cls ):
try:
delete_repo(token=cls._token , repo_id='''test-model-flax''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-model-flax-org''' )
except HTTPError:
pass
def __UpperCAmelCase ( self ):
__a = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
__a = FlaxBertModel(_a )
model.push_to_hub('''test-model-flax''' , use_auth_token=self._token )
__a = FlaxBertModel.from_pretrained(f'''{USER}/test-model-flax''' )
__a = flatten_dict(unfreeze(model.params ) )
__a = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
__a = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_a , 1E-3 , msg=f'''{key} not identical''' )
# Reset repo
delete_repo(token=self._token , repo_id='''test-model-flax''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(_a , repo_id='''test-model-flax''' , push_to_hub=_a , use_auth_token=self._token )
__a = FlaxBertModel.from_pretrained(f'''{USER}/test-model-flax''' )
__a = flatten_dict(unfreeze(model.params ) )
__a = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
__a = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_a , 1E-3 , msg=f'''{key} not identical''' )
def __UpperCAmelCase ( self ):
__a = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
__a = FlaxBertModel(_a )
model.push_to_hub('''valid_org/test-model-flax-org''' , use_auth_token=self._token )
__a = FlaxBertModel.from_pretrained('''valid_org/test-model-flax-org''' )
__a = flatten_dict(unfreeze(model.params ) )
__a = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
__a = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_a , 1E-3 , msg=f'''{key} not identical''' )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-model-flax-org''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(
_a , repo_id='''valid_org/test-model-flax-org''' , push_to_hub=_a , use_auth_token=self._token )
__a = FlaxBertModel.from_pretrained('''valid_org/test-model-flax-org''' )
__a = flatten_dict(unfreeze(model.params ) )
__a = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
__a = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_a , 1E-3 , msg=f'''{key} not identical''' )
def lowercase ( lowerCAmelCase__ : str , lowerCAmelCase__ : Dict ) -> Optional[int]:
__a = True
__a = flatten_dict(modela.params )
__a = flatten_dict(modela.params )
for key in flat_params_a.keys():
if np.sum(np.abs(flat_params_a[key] - flat_params_a[key] ) ) > 1e-4:
__a = False
return models_are_equal
@require_flax
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self ):
__a = BertConfig.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' )
__a = FlaxBertModel(_a )
__a = '''bert'''
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(_a , _a ) )
with self.assertRaises(_a ):
__a = FlaxBertModel.from_pretrained(_a )
__a = FlaxBertModel.from_pretrained(_a , subfolder=_a )
self.assertTrue(check_models_equal(_a , _a ) )
def __UpperCAmelCase ( self ):
__a = BertConfig.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' )
__a = FlaxBertModel(_a )
__a = '''bert'''
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(_a , _a ) , max_shard_size='''10KB''' )
with self.assertRaises(_a ):
__a = FlaxBertModel.from_pretrained(_a )
__a = FlaxBertModel.from_pretrained(_a , subfolder=_a )
self.assertTrue(check_models_equal(_a , _a ) )
def __UpperCAmelCase ( self ):
__a = '''bert'''
__a = '''hf-internal-testing/tiny-random-bert-subfolder'''
with self.assertRaises(_a ):
__a = FlaxBertModel.from_pretrained(_a )
__a = FlaxBertModel.from_pretrained(_a , subfolder=_a )
self.assertIsNotNone(_a )
def __UpperCAmelCase ( self ):
__a = '''bert'''
__a = '''hf-internal-testing/tiny-random-bert-sharded-subfolder'''
with self.assertRaises(_a ):
__a = FlaxBertModel.from_pretrained(_a )
__a = FlaxBertModel.from_pretrained(_a , subfolder=_a )
self.assertIsNotNone(_a )
| 695 | 0 |
'''simple docstring'''
def lowerCamelCase__ ( __lowercase ):
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) or number < 0:
raise ValueError("""Input must be a non-negative integer""" )
snake_case : List[Any] = 0
while number:
# This way we arrive at next set bit (next 1) instead of looping
# through each bit and checking for 1s hence the
# loop won't run 32 times it will only run the number of `1` times
number &= number - 1
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 116 |
"""simple docstring"""
import unittest
from diffusers.models.unet_ad_blocks import * # noqa F403
from diffusers.utils import torch_device
from .test_unet_blocks_common import UNetBlockTesterMixin
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = DownBlockaD # noqa F405
__UpperCAmelCase : Any = 'down'
def __UpperCAmelCase ( self ):
__a = [-0.0232, -0.9869, 0.8054, -0.0637, -0.1688, -1.4264, 0.4470, -1.3394, 0.0904]
super().test_output(_a )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : str = ResnetDownsampleBlockaD # noqa F405
__UpperCAmelCase : List[str] = 'down'
def __UpperCAmelCase ( self ):
__a = [0.0710, 0.2410, -0.7320, -1.0757, -1.1343, 0.3540, -0.0133, -0.2576, 0.0948]
super().test_output(_a )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] = AttnDownBlockaD # noqa F405
__UpperCAmelCase : Optional[Any] = 'down'
def __UpperCAmelCase ( self ):
__a = [0.0636, 0.8964, -0.6234, -1.0131, 0.0844, 0.4935, 0.3437, 0.0911, -0.2957]
super().test_output(_a )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = CrossAttnDownBlockaD # noqa F405
__UpperCAmelCase : Optional[Any] = 'down'
def __UpperCAmelCase ( self ):
__a , __a = super().prepare_init_args_and_inputs_for_common()
__a = 32
return init_dict, inputs_dict
def __UpperCAmelCase ( self ):
__a = [0.2238, -0.7396, -0.2255, -0.3829, 0.1925, 1.1665, 0.0603, -0.7295, 0.1983]
super().test_output(_a )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : int = SimpleCrossAttnDownBlockaD # noqa F405
__UpperCAmelCase : Any = 'down'
@property
def __UpperCAmelCase ( self ):
return super().get_dummy_input(include_encoder_hidden_states=_a )
def __UpperCAmelCase ( self ):
__a , __a = super().prepare_init_args_and_inputs_for_common()
__a = 32
return init_dict, inputs_dict
@unittest.skipIf(torch_device == '''mps''' , '''MPS result is not consistent''' )
def __UpperCAmelCase ( self ):
__a = [0.7921, -0.0992, -0.1962, -0.7695, -0.4242, 0.7804, 0.4737, 0.2765, 0.3338]
super().test_output(_a )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : int = SkipDownBlockaD # noqa F405
__UpperCAmelCase : Tuple = 'down'
@property
def __UpperCAmelCase ( self ):
return super().get_dummy_input(include_skip_sample=_a )
def __UpperCAmelCase ( self ):
__a = [-0.0845, -0.2087, -0.2465, 0.0971, 0.1900, -0.0484, 0.2664, 0.4179, 0.5069]
super().test_output(_a )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = AttnSkipDownBlockaD # noqa F405
__UpperCAmelCase : Optional[int] = 'down'
@property
def __UpperCAmelCase ( self ):
return super().get_dummy_input(include_skip_sample=_a )
def __UpperCAmelCase ( self ):
__a = [0.5539, 0.1609, 0.4924, 0.0537, -0.1995, 0.4050, 0.0979, -0.2721, -0.0642]
super().test_output(_a )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : int = DownEncoderBlockaD # noqa F405
__UpperCAmelCase : Optional[int] = 'down'
@property
def __UpperCAmelCase ( self ):
return super().get_dummy_input(include_temb=_a )
def __UpperCAmelCase ( self ):
__a = {
'''in_channels''': 32,
'''out_channels''': 32,
}
__a = self.dummy_input
return init_dict, inputs_dict
def __UpperCAmelCase ( self ):
__a = [1.1102, 0.5302, 0.4872, -0.0023, -0.8042, 0.0483, -0.3489, -0.5632, 0.7626]
super().test_output(_a )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = AttnDownEncoderBlockaD # noqa F405
__UpperCAmelCase : Any = 'down'
@property
def __UpperCAmelCase ( self ):
return super().get_dummy_input(include_temb=_a )
def __UpperCAmelCase ( self ):
__a = {
'''in_channels''': 32,
'''out_channels''': 32,
}
__a = self.dummy_input
return init_dict, inputs_dict
def __UpperCAmelCase ( self ):
__a = [0.8966, -0.1486, 0.8568, 0.8141, -0.9046, -0.1342, -0.0972, -0.7417, 0.1538]
super().test_output(_a )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : str = UNetMidBlockaD # noqa F405
__UpperCAmelCase : Any = 'mid'
def __UpperCAmelCase ( self ):
__a = {
'''in_channels''': 32,
'''temb_channels''': 128,
}
__a = self.dummy_input
return init_dict, inputs_dict
def __UpperCAmelCase ( self ):
__a = [-0.1062, 1.7248, 0.3494, 1.4569, -0.0910, -1.2421, -0.9984, 0.6736, 1.0028]
super().test_output(_a )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : str = UNetMidBlockaDCrossAttn # noqa F405
__UpperCAmelCase : str = 'mid'
def __UpperCAmelCase ( self ):
__a , __a = super().prepare_init_args_and_inputs_for_common()
__a = 32
return init_dict, inputs_dict
def __UpperCAmelCase ( self ):
__a = [0.0187, 2.4220, 0.4484, 1.1203, -0.6121, -1.5122, -0.8270, 0.7851, 1.8335]
super().test_output(_a )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : Any = UNetMidBlockaDSimpleCrossAttn # noqa F405
__UpperCAmelCase : List[Any] = 'mid'
@property
def __UpperCAmelCase ( self ):
return super().get_dummy_input(include_encoder_hidden_states=_a )
def __UpperCAmelCase ( self ):
__a , __a = super().prepare_init_args_and_inputs_for_common()
__a = 32
return init_dict, inputs_dict
def __UpperCAmelCase ( self ):
__a = [0.7143, 1.9974, 0.5448, 1.3977, 0.1282, -1.1237, -1.4238, 0.5530, 0.8880]
super().test_output(_a )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = UpBlockaD # noqa F405
__UpperCAmelCase : Union[str, Any] = 'up'
@property
def __UpperCAmelCase ( self ):
return super().get_dummy_input(include_res_hidden_states_tuple=_a )
def __UpperCAmelCase ( self ):
__a = [-0.2041, -0.4165, -0.3022, 0.0041, -0.6628, -0.7053, 0.1928, -0.0325, 0.0523]
super().test_output(_a )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : str = ResnetUpsampleBlockaD # noqa F405
__UpperCAmelCase : int = 'up'
@property
def __UpperCAmelCase ( self ):
return super().get_dummy_input(include_res_hidden_states_tuple=_a )
def __UpperCAmelCase ( self ):
__a = [0.2287, 0.3549, -0.1346, 0.4797, -0.1715, -0.9649, 0.7305, -0.5864, -0.6244]
super().test_output(_a )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : Dict = CrossAttnUpBlockaD # noqa F405
__UpperCAmelCase : List[Any] = 'up'
@property
def __UpperCAmelCase ( self ):
return super().get_dummy_input(include_res_hidden_states_tuple=_a )
def __UpperCAmelCase ( self ):
__a , __a = super().prepare_init_args_and_inputs_for_common()
__a = 32
return init_dict, inputs_dict
def __UpperCAmelCase ( self ):
__a = [-0.1403, -0.3515, -0.0420, -0.1425, 0.3167, 0.5094, -0.2181, 0.5931, 0.5582]
super().test_output(_a )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = SimpleCrossAttnUpBlockaD # noqa F405
__UpperCAmelCase : Optional[int] = 'up'
@property
def __UpperCAmelCase ( self ):
return super().get_dummy_input(include_res_hidden_states_tuple=_a , include_encoder_hidden_states=_a )
def __UpperCAmelCase ( self ):
__a , __a = super().prepare_init_args_and_inputs_for_common()
__a = 32
return init_dict, inputs_dict
def __UpperCAmelCase ( self ):
__a = [0.2645, 0.1480, 0.0909, 0.8044, -0.9758, -0.9083, 0.0994, -1.1453, -0.7402]
super().test_output(_a )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : Any = AttnUpBlockaD # noqa F405
__UpperCAmelCase : List[Any] = 'up'
@property
def __UpperCAmelCase ( self ):
return super().get_dummy_input(include_res_hidden_states_tuple=_a )
@unittest.skipIf(torch_device == '''mps''' , '''MPS result is not consistent''' )
def __UpperCAmelCase ( self ):
__a = [0.0979, 0.1326, 0.0021, 0.0659, 0.2249, 0.0059, 0.1132, 0.5952, 0.1033]
super().test_output(_a )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : Any = SkipUpBlockaD # noqa F405
__UpperCAmelCase : str = 'up'
@property
def __UpperCAmelCase ( self ):
return super().get_dummy_input(include_res_hidden_states_tuple=_a )
def __UpperCAmelCase ( self ):
__a = [-0.0893, -0.1234, -0.1506, -0.0332, 0.0123, -0.0211, 0.0566, 0.0143, 0.0362]
super().test_output(_a )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = AttnSkipUpBlockaD # noqa F405
__UpperCAmelCase : int = 'up'
@property
def __UpperCAmelCase ( self ):
return super().get_dummy_input(include_res_hidden_states_tuple=_a )
def __UpperCAmelCase ( self ):
__a = [0.0361, 0.0617, 0.2787, -0.0350, 0.0342, 0.3421, -0.0843, 0.0913, 0.3015]
super().test_output(_a )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = UpDecoderBlockaD # noqa F405
__UpperCAmelCase : List[str] = 'up'
@property
def __UpperCAmelCase ( self ):
return super().get_dummy_input(include_temb=_a )
def __UpperCAmelCase ( self ):
__a = {'''in_channels''': 32, '''out_channels''': 32}
__a = self.dummy_input
return init_dict, inputs_dict
def __UpperCAmelCase ( self ):
__a = [0.4404, 0.1998, -0.9886, -0.3320, -0.3128, -0.7034, -0.6955, -0.2338, -0.3137]
super().test_output(_a )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] = AttnUpDecoderBlockaD # noqa F405
__UpperCAmelCase : Any = 'up'
@property
def __UpperCAmelCase ( self ):
return super().get_dummy_input(include_temb=_a )
def __UpperCAmelCase ( self ):
__a = {'''in_channels''': 32, '''out_channels''': 32}
__a = self.dummy_input
return init_dict, inputs_dict
def __UpperCAmelCase ( self ):
__a = [0.6738, 0.4491, 0.1055, 1.0710, 0.7316, 0.3339, 0.3352, 0.1023, 0.3568]
super().test_output(_a )
| 695 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
snake_case_ : int = {
"""configuration_transfo_xl""": ["""TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TransfoXLConfig"""],
"""tokenization_transfo_xl""": ["""TransfoXLCorpus""", """TransfoXLTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Tuple = [
"""TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""AdaptiveEmbedding""",
"""TransfoXLForSequenceClassification""",
"""TransfoXLLMHeadModel""",
"""TransfoXLModel""",
"""TransfoXLPreTrainedModel""",
"""load_tf_weights_in_transfo_xl""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Tuple = [
"""TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFAdaptiveEmbedding""",
"""TFTransfoXLForSequenceClassification""",
"""TFTransfoXLLMHeadModel""",
"""TFTransfoXLMainLayer""",
"""TFTransfoXLModel""",
"""TFTransfoXLPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_transfo_xl import TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, TransfoXLConfig
from .tokenization_transfo_xl import TransfoXLCorpus, TransfoXLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_transfo_xl import (
TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
AdaptiveEmbedding,
TransfoXLForSequenceClassification,
TransfoXLLMHeadModel,
TransfoXLModel,
TransfoXLPreTrainedModel,
load_tf_weights_in_transfo_xl,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_transfo_xl import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFAdaptiveEmbedding,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLMainLayer,
TFTransfoXLModel,
TFTransfoXLPreTrainedModel,
)
else:
import sys
snake_case_ : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 595 |
"""simple docstring"""
import copy
from typing import Dict, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
from ..detr import DetrConfig
from ..swin import SwinConfig
lowercase_ = {
"facebook/maskformer-swin-base-ade": (
"https://huggingface.co/facebook/maskformer-swin-base-ade/blob/main/config.json"
)
# See all MaskFormer models at https://huggingface.co/models?filter=maskformer
}
lowercase_ = logging.get_logger(__name__)
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : str = 'maskformer'
__UpperCAmelCase : Optional[int] = {'hidden_size': 'mask_feature_size'}
__UpperCAmelCase : Any = ['resnet', 'swin']
__UpperCAmelCase : Dict = ['detr']
def __init__( self , _a = 256 , _a = 256 , _a = 0.1 , _a = False , _a = None , _a = None , _a = 0.02 , _a = 1.0 , _a = 1.0 , _a = 1.0 , _a = 20.0 , _a = None , **_a , ):
if backbone_config is None:
# fall back to https://huggingface.co/microsoft/swin-base-patch4-window12-384-in22k
__a = SwinConfig(
image_size=384 , in_channels=3 , patch_size=4 , embed_dim=128 , depths=[2, 2, 18, 2] , num_heads=[4, 8, 16, 32] , window_size=12 , drop_path_rate=0.3 , out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] , )
if isinstance(_a , _a ):
__a = backbone_config.pop('''model_type''' )
__a = CONFIG_MAPPING[backbone_model_type]
__a = config_class.from_dict(_a )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
f'''Backbone {backbone_config.model_type} is not a supported model and may not be compatible with MaskFormer. '''
f'''Supported model types: {','.join(self.backbones_supported )}''' )
if decoder_config is None:
# fall back to https://huggingface.co/facebook/detr-resnet-50
__a = DetrConfig()
else:
# verify that the decoder is supported
__a = (
decoder_config.pop('''model_type''' ) if isinstance(_a , _a ) else decoder_config.model_type
)
if decoder_type not in self.decoders_supported:
raise ValueError(
f'''Transformer Decoder {decoder_type} not supported, please use one of'''
f''' {','.join(self.decoders_supported )}''' )
if isinstance(_a , _a ):
__a = CONFIG_MAPPING[decoder_type]
__a = config_class.from_dict(_a )
__a = backbone_config
__a = decoder_config
# main feature dimension for the model
__a = fpn_feature_size
__a = mask_feature_size
# initializer
__a = init_std
__a = init_xavier_std
# Hungarian matcher && loss
__a = cross_entropy_weight
__a = dice_weight
__a = mask_weight
__a = use_auxiliary_loss
__a = no_object_weight
__a = output_auxiliary_logits
__a = self.decoder_config.encoder_attention_heads
__a = self.decoder_config.num_hidden_layers
super().__init__(**_a )
@classmethod
def __UpperCAmelCase ( cls , _a , _a , **_a ):
return cls(
backbone_config=_a , decoder_config=_a , **_a , )
def __UpperCAmelCase ( self ):
__a = copy.deepcopy(self.__dict__ )
__a = self.backbone_config.to_dict()
__a = self.decoder_config.to_dict()
__a = self.__class__.model_type
return output
| 695 | 0 |
import inspect
import os
import unittest
from dataclasses import dataclass
import torch
from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs
from accelerate.state import AcceleratorState
from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu
from accelerate.utils import KwargsHandler
@dataclass
class lowercase ( __SCREAMING_SNAKE_CASE ):
_a = 0
_a = False
_a = 3.0
class lowercase ( unittest.TestCase ):
def a__ ( self ) -> Any:
# If no defaults are changed, `to_kwargs` returns an empty dict.
self.assertDictEqual(MockClass().to_kwargs() , {} )
self.assertDictEqual(MockClass(a=2 ).to_kwargs() , {"""a""": 2} )
self.assertDictEqual(MockClass(a=2 , b=_a ).to_kwargs() , {"""a""": 2, """b""": True} )
self.assertDictEqual(MockClass(a=2 , c=2.25 ).to_kwargs() , {"""a""": 2, """c""": 2.25} )
@require_cuda
def a__ ( self ) -> int:
# If no defaults are changed, `to_kwargs` returns an empty dict.
_A : Tuple = GradScalerKwargs(init_scale=1024 , growth_factor=2 )
AcceleratorState._reset_state()
_A : str = Accelerator(mixed_precision="""fp16""" , kwargs_handlers=[scaler_handler] )
print(accelerator.use_fpaa )
_A : Dict = accelerator.scaler
# Check the kwargs have been applied
self.assertEqual(scaler._init_scale , 1024.0 )
self.assertEqual(scaler._growth_factor , 2.0 )
# Check the other values are at the default
self.assertEqual(scaler._backoff_factor , 0.5 )
self.assertEqual(scaler._growth_interval , 2000 )
self.assertEqual(scaler._enabled , _a )
@require_multi_gpu
def a__ ( self ) -> Optional[int]:
_A : int = ["""torchrun""", F'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__ )]
execute_subprocess_async(_a , env=os.environ.copy() )
if __name__ == "__main__":
_snake_case = DistributedDataParallelKwargs(bucket_cap_mb=15, find_unused_parameters=True)
_snake_case = Accelerator(kwargs_handlers=[ddp_scaler])
_snake_case = torch.nn.Linear(100, 200)
_snake_case = accelerator.prepare(model)
# Check the values changed in kwargs
_snake_case = ""
_snake_case = model.bucket_bytes_cap // (1024 * 1024)
if observed_bucket_cap_map != 15:
error_msg += f"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n"
if model.find_unused_parameters is not True:
error_msg += f"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n"
# Check the values of the defaults
if model.dim != 0:
error_msg += f"Default value not respected, should have `0` but found {model.dim}.\n"
if model.broadcast_buffers is not True:
error_msg += f"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n"
if model.gradient_as_bucket_view is not False:
error_msg += f"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n"
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 307 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Generator
import requests
from bsa import BeautifulSoup
lowercase_ = "https://www.indeed.co.in/jobs?q=mobile+app+development&l="
def lowercase ( lowerCAmelCase__ : str = "mumbai" ) -> Generator[tuple[str, str], None, None]:
__a = BeautifulSoup(requests.get(url + location ).content , '''html.parser''' )
# This attribute finds out all the specifics listed in a job
for job in soup.find_all('''div''' , attrs={'''data-tn-component''': '''organicJob'''} ):
__a = job.find('''a''' , attrs={'''data-tn-element''': '''jobTitle'''} ).text.strip()
__a = job.find('''span''' , {'''class''': '''company'''} ).text.strip()
yield job_title, company_name
if __name__ == "__main__":
for i, job in enumerate(fetch_jobs("Bangalore"), 1):
print(F'''Job {i:>2} is {job[0]} at {job[1]}''')
| 695 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {
'''s-JoL/Open-Llama-V1''': '''https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json''',
}
class _UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
__SCREAMING_SNAKE_CASE : Any = 'open-llama'
def __init__( self , lowercase_=1_0_0_0_0_0 , lowercase_=4_0_9_6 , lowercase_=1_1_0_0_8 , lowercase_=3_2 , lowercase_=3_2 , lowercase_="silu" , lowercase_=2_0_4_8 , lowercase_=0.0_2 , lowercase_=1E-6 , lowercase_=True , lowercase_=0 , lowercase_=1 , lowercase_=2 , lowercase_=False , lowercase_=True , lowercase_=0.1 , lowercase_=0.1 , lowercase_=True , lowercase_=True , lowercase_=None , **lowercase_ , ) -> Tuple:
UpperCAmelCase = vocab_size
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = hidden_size
UpperCAmelCase = intermediate_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = hidden_act
UpperCAmelCase = initializer_range
UpperCAmelCase = rms_norm_eps
UpperCAmelCase = use_cache
UpperCAmelCase = kwargs.pop(
'use_memorry_efficient_attention' , _a )
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_dropout_prob
UpperCAmelCase = use_stable_embedding
UpperCAmelCase = shared_input_output_embedding
UpperCAmelCase = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=_a , bos_token_id=_a , eos_token_id=_a , tie_word_embeddings=_a , **_a , )
def a_ ( self ) -> Any:
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , _a ) or len(self.rope_scaling ) != 2:
raise ValueError(
'`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '
F"got {self.rope_scaling}" )
UpperCAmelCase = self.rope_scaling.get('type' , _a )
UpperCAmelCase = self.rope_scaling.get('factor' , _a )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F"`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}" )
if rope_scaling_factor is None or not isinstance(_a , _a ) or rope_scaling_factor <= 1.0:
raise ValueError(F"`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}" )
| 373 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"bigcode/gpt_bigcode-santacoder": "https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json",
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : List[str] = 'gpt_bigcode'
__UpperCAmelCase : Tuple = ['past_key_values']
__UpperCAmelCase : Dict = {
'hidden_size': 'n_embd',
'max_position_embeddings': 'n_positions',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self , _a=50_257 , _a=1_024 , _a=768 , _a=12 , _a=12 , _a=None , _a="gelu_pytorch_tanh" , _a=0.1 , _a=0.1 , _a=0.1 , _a=1E-5 , _a=0.02 , _a=True , _a=True , _a=50_256 , _a=50_256 , _a=True , _a=True , _a=True , **_a , ):
__a = vocab_size
__a = n_positions
__a = n_embd
__a = n_layer
__a = n_head
__a = n_inner
__a = activation_function
__a = resid_pdrop
__a = embd_pdrop
__a = attn_pdrop
__a = layer_norm_epsilon
__a = initializer_range
__a = scale_attn_weights
__a = use_cache
__a = attention_softmax_in_fpaa
__a = scale_attention_softmax_in_fpaa
__a = multi_query
__a = bos_token_id
__a = eos_token_id
super().__init__(bos_token_id=_a , eos_token_id=_a , **_a )
| 695 | 0 |
'''simple docstring'''
import math
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from .attention_processor import Attention
from .embeddings import get_timestep_embedding
from .modeling_utils import ModelMixin
class __UpperCamelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
@register_to_config
def __init__( self :Tuple ,_UpperCamelCase :int = 1_2_8 ,_UpperCamelCase :Optional[int] = 2_5_6 ,_UpperCamelCase :Union[str, Any] = 20_00.0 ,_UpperCamelCase :str = 7_6_8 ,_UpperCamelCase :Tuple = 1_2 ,_UpperCamelCase :List[Any] = 1_2 ,_UpperCamelCase :Union[str, Any] = 6_4 ,_UpperCamelCase :Dict = 2_0_4_8 ,_UpperCamelCase :Dict = 0.1 ,):
super().__init__()
snake_case_ : Optional[int] = nn.Sequential(
nn.Linear(_a ,d_model * 4 ,bias=_a ) ,nn.SiLU() ,nn.Linear(d_model * 4 ,d_model * 4 ,bias=_a ) ,nn.SiLU() ,)
snake_case_ : Dict = nn.Embedding(_a ,_a )
snake_case_ : Dict = False
snake_case_ : Tuple = nn.Linear(_a ,_a ,bias=_a )
snake_case_ : Union[str, Any] = nn.Dropout(p=_a )
snake_case_ : Dict = nn.ModuleList()
for lyr_num in range(_a ):
# FiLM conditional T5 decoder
snake_case_ : Union[str, Any] = DecoderLayer(d_model=_a ,d_kv=_a ,num_heads=_a ,d_ff=_a ,dropout_rate=_a )
self.decoders.append(_a )
snake_case_ : Union[str, Any] = TaLayerNorm(_a )
snake_case_ : List[str] = nn.Dropout(p=_a )
snake_case_ : str = nn.Linear(_a ,_a ,bias=_a )
def a__ ( self :Optional[int] ,_UpperCamelCase :Optional[Any] ,_UpperCamelCase :Optional[Any] ):
snake_case_ : Any = torch.mul(query_input.unsqueeze(-1 ) ,key_input.unsqueeze(-2 ) )
return mask.unsqueeze(-3 )
def a__ ( self :int ,_UpperCamelCase :Optional[Any] ,_UpperCamelCase :str ,_UpperCamelCase :List[Any] ):
snake_case_ , snake_case_ , snake_case_ : Union[str, Any] = decoder_input_tokens.shape
assert decoder_noise_time.shape == (batch,)
# decoder_noise_time is in [0, 1), so rescale to expected timing range.
snake_case_ : str = get_timestep_embedding(
decoder_noise_time * self.config.max_decoder_noise_time ,embedding_dim=self.config.d_model ,max_period=self.config.max_decoder_noise_time ,).to(dtype=self.dtype )
snake_case_ : int = self.conditioning_emb(_a ).unsqueeze(1 )
assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4)
snake_case_ : Optional[int] = decoder_input_tokens.shape[1]
# If we want to use relative positions for audio context, we can just offset
# this sequence by the length of encodings_and_masks.
snake_case_ : Tuple = torch.broadcast_to(
torch.arange(_a ,device=decoder_input_tokens.device ) ,(batch, seq_length) ,)
snake_case_ : Optional[Any] = self.position_encoding(_a )
snake_case_ : int = self.continuous_inputs_projection(_a )
inputs += position_encodings
snake_case_ : Tuple = self.dropout(_a )
# decoder: No padding present.
snake_case_ : Union[str, Any] = torch.ones(
decoder_input_tokens.shape[:2] ,device=decoder_input_tokens.device ,dtype=inputs.dtype )
# Translate encoding masks to encoder-decoder masks.
snake_case_ : int = [(x, self.encoder_decoder_mask(_a ,_a )) for x, y in encodings_and_masks]
# cross attend style: concat encodings
snake_case_ : str = torch.cat([x[0] for x in encodings_and_encdec_masks] ,dim=1 )
snake_case_ : Tuple = torch.cat([x[1] for x in encodings_and_encdec_masks] ,dim=-1 )
for lyr in self.decoders:
snake_case_ : Tuple = lyr(
_a ,conditioning_emb=_a ,encoder_hidden_states=_a ,encoder_attention_mask=_a ,)[0]
snake_case_ : Dict = self.decoder_norm(_a )
snake_case_ : Any = self.post_dropout(_a )
snake_case_ : str = self.spec_out(_a )
return spec_out
class __UpperCamelCase ( nn.Module ):
def __init__( self :Union[str, Any] ,_UpperCamelCase :str ,_UpperCamelCase :str ,_UpperCamelCase :Tuple ,_UpperCamelCase :Dict ,_UpperCamelCase :Union[str, Any] ,_UpperCamelCase :List[str]=1E-6 ):
super().__init__()
snake_case_ : Tuple = nn.ModuleList()
# cond self attention: layer 0
self.layer.append(
TaLayerSelfAttentionCond(d_model=_a ,d_kv=_a ,num_heads=_a ,dropout_rate=_a ) )
# cross attention: layer 1
self.layer.append(
TaLayerCrossAttention(
d_model=_a ,d_kv=_a ,num_heads=_a ,dropout_rate=_a ,layer_norm_epsilon=_a ,) )
# Film Cond MLP + dropout: last layer
self.layer.append(
TaLayerFFCond(d_model=_a ,d_ff=_a ,dropout_rate=_a ,layer_norm_epsilon=_a ) )
def a__ ( self :Union[str, Any] ,_UpperCamelCase :Tuple ,_UpperCamelCase :List[Any]=None ,_UpperCamelCase :Optional[Any]=None ,_UpperCamelCase :List[str]=None ,_UpperCamelCase :Optional[int]=None ,_UpperCamelCase :List[Any]=None ,):
snake_case_ : Dict = self.layer[0](
_a ,conditioning_emb=_a ,attention_mask=_a ,)
if encoder_hidden_states is not None:
snake_case_ : List[Any] = torch.where(encoder_attention_mask > 0 ,0 ,-1E1_0 ).to(
encoder_hidden_states.dtype )
snake_case_ : Any = self.layer[1](
_a ,key_value_states=_a ,attention_mask=_a ,)
# Apply Film Conditional Feed Forward layer
snake_case_ : Optional[Any] = self.layer[-1](_a ,_a )
return (hidden_states,)
class __UpperCamelCase ( nn.Module ):
def __init__( self :Tuple ,_UpperCamelCase :Optional[Any] ,_UpperCamelCase :Any ,_UpperCamelCase :Union[str, Any] ,_UpperCamelCase :Optional[int] ):
super().__init__()
snake_case_ : str = TaLayerNorm(_a )
snake_case_ : Tuple = TaFiLMLayer(in_features=d_model * 4 ,out_features=_a )
snake_case_ : int = Attention(query_dim=_a ,heads=_a ,dim_head=_a ,out_bias=_a ,scale_qk=_a )
snake_case_ : str = nn.Dropout(_a )
def a__ ( self :int ,_UpperCamelCase :int ,_UpperCamelCase :List[Any]=None ,_UpperCamelCase :List[Any]=None ,):
# pre_self_attention_layer_norm
snake_case_ : str = self.layer_norm(_a )
if conditioning_emb is not None:
snake_case_ : Any = self.FiLMLayer(_a ,_a )
# Self-attention block
snake_case_ : Any = self.attention(_a )
snake_case_ : Union[str, Any] = hidden_states + self.dropout(_a )
return hidden_states
class __UpperCamelCase ( nn.Module ):
def __init__( self :int ,_UpperCamelCase :Dict ,_UpperCamelCase :Dict ,_UpperCamelCase :Tuple ,_UpperCamelCase :Tuple ,_UpperCamelCase :Tuple ):
super().__init__()
snake_case_ : Union[str, Any] = Attention(query_dim=_a ,heads=_a ,dim_head=_a ,out_bias=_a ,scale_qk=_a )
snake_case_ : List[str] = TaLayerNorm(_a ,eps=_a )
snake_case_ : int = nn.Dropout(_a )
def a__ ( self :List[str] ,_UpperCamelCase :int ,_UpperCamelCase :int=None ,_UpperCamelCase :str=None ,):
snake_case_ : int = self.layer_norm(_a )
snake_case_ : Optional[int] = self.attention(
_a ,encoder_hidden_states=_a ,attention_mask=attention_mask.squeeze(1 ) ,)
snake_case_ : List[str] = hidden_states + self.dropout(_a )
return layer_output
class __UpperCamelCase ( nn.Module ):
def __init__( self :str ,_UpperCamelCase :Union[str, Any] ,_UpperCamelCase :Tuple ,_UpperCamelCase :Optional[int] ,_UpperCamelCase :str ):
super().__init__()
snake_case_ : Dict = TaDenseGatedActDense(d_model=_a ,d_ff=_a ,dropout_rate=_a )
snake_case_ : List[Any] = TaFiLMLayer(in_features=d_model * 4 ,out_features=_a )
snake_case_ : Dict = TaLayerNorm(_a ,eps=_a )
snake_case_ : Optional[int] = nn.Dropout(_a )
def a__ ( self :List[str] ,_UpperCamelCase :Optional[int] ,_UpperCamelCase :str=None ):
snake_case_ : Optional[Any] = self.layer_norm(_a )
if conditioning_emb is not None:
snake_case_ : List[Any] = self.film(_a ,_a )
snake_case_ : Any = self.DenseReluDense(_a )
snake_case_ : str = hidden_states + self.dropout(_a )
return hidden_states
class __UpperCamelCase ( nn.Module ):
def __init__( self :str ,_UpperCamelCase :List[str] ,_UpperCamelCase :Optional[Any] ,_UpperCamelCase :Union[str, Any] ):
super().__init__()
snake_case_ : Optional[int] = nn.Linear(_a ,_a ,bias=_a )
snake_case_ : List[Any] = nn.Linear(_a ,_a ,bias=_a )
snake_case_ : List[Any] = nn.Linear(_a ,_a ,bias=_a )
snake_case_ : Any = nn.Dropout(_a )
snake_case_ : int = NewGELUActivation()
def a__ ( self :Tuple ,_UpperCamelCase :Dict ):
snake_case_ : str = self.act(self.wi_a(_a ) )
snake_case_ : str = self.wi_a(_a )
snake_case_ : Any = hidden_gelu * hidden_linear
snake_case_ : Dict = self.dropout(_a )
snake_case_ : Optional[Any] = self.wo(_a )
return hidden_states
class __UpperCamelCase ( nn.Module ):
def __init__( self :Union[str, Any] ,_UpperCamelCase :Dict ,_UpperCamelCase :Dict=1E-6 ):
super().__init__()
snake_case_ : List[Any] = nn.Parameter(torch.ones(_a ) )
snake_case_ : Dict = eps
def a__ ( self :Any ,_UpperCamelCase :Tuple ):
# T5 uses a layer_norm which only scales and doesn't shift, which is also known as Root Mean
# Square Layer Normalization https://arxiv.org/abs/1910.07467 thus variance is calculated
# w/o mean and there is no bias. Additionally we want to make sure that the accumulation for
# half-precision inputs is done in fp32
snake_case_ : str = hidden_states.to(torch.floataa ).pow(2 ).mean(-1 ,keepdim=_a )
snake_case_ : List[str] = hidden_states * torch.rsqrt(variance + self.variance_epsilon )
# convert into half-precision if necessary
if self.weight.dtype in [torch.floataa, torch.bfloataa]:
snake_case_ : str = hidden_states.to(self.weight.dtype )
return self.weight * hidden_states
class __UpperCamelCase ( nn.Module ):
def a__ ( self :int ,_UpperCamelCase :int ):
return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi ) * (input + 0.04_47_15 * torch.pow(_a ,3.0 )) ))
class __UpperCamelCase ( nn.Module ):
def __init__( self :Dict ,_UpperCamelCase :str ,_UpperCamelCase :str ):
super().__init__()
snake_case_ : List[Any] = nn.Linear(_a ,out_features * 2 ,bias=_a )
def a__ ( self :int ,_UpperCamelCase :Tuple ,_UpperCamelCase :Union[str, Any] ):
snake_case_ : Union[str, Any] = self.scale_bias(_a )
snake_case_ , snake_case_ : str = torch.chunk(_a ,2 ,-1 )
snake_case_ : List[Any] = x * (1 + scale) + shift
return x | 334 |
"""simple docstring"""
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
lowercase_ = 1_6
lowercase_ = 3_2
def lowercase ( lowerCAmelCase__ : Accelerator , lowerCAmelCase__ : int = 16 , lowerCAmelCase__ : str = "bert-base-cased" ) -> Optional[int]:
__a = AutoTokenizer.from_pretrained(lowerCAmelCase__ )
__a = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(lowerCAmelCase__ : Optional[Any] ):
# max_length=None => use the model max length (it's actually the default)
__a = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=lowerCAmelCase__ , max_length=lowerCAmelCase__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
__a = datasets.map(
lowerCAmelCase__ , batched=lowerCAmelCase__ , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , load_from_cache_file=lowerCAmelCase__ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__a = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(lowerCAmelCase__ : int ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(lowerCAmelCase__ , padding='''max_length''' , max_length=128 , return_tensors='''pt''' )
return tokenizer.pad(lowerCAmelCase__ , padding='''longest''' , return_tensors='''pt''' )
# Instantiate dataloaders.
__a = DataLoader(
tokenized_datasets['''train'''] , shuffle=lowerCAmelCase__ , collate_fn=lowerCAmelCase__ , batch_size=lowerCAmelCase__ )
__a = DataLoader(
tokenized_datasets['''validation'''] , shuffle=lowerCAmelCase__ , collate_fn=lowerCAmelCase__ , batch_size=lowerCAmelCase__ )
return train_dataloader, eval_dataloader
def lowercase ( lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Union[str, Any] ) -> Optional[int]:
# Initialize accelerator
__a = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__a = config['''lr''']
__a = int(config['''num_epochs'''] )
__a = int(config['''seed'''] )
__a = int(config['''batch_size'''] )
__a = args.model_name_or_path
set_seed(lowerCAmelCase__ )
__a , __a = get_dataloaders(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__a = AutoModelForSequenceClassification.from_pretrained(lowerCAmelCase__ , return_dict=lowerCAmelCase__ )
# Instantiate optimizer
__a = (
AdamW
if accelerator.state.deepspeed_plugin is None
or '''optimizer''' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
__a = optimizer_cls(params=model.parameters() , lr=lowerCAmelCase__ )
if accelerator.state.deepspeed_plugin is not None:
__a = accelerator.state.deepspeed_plugin.deepspeed_config[
'''gradient_accumulation_steps'''
]
else:
__a = 1
__a = (len(lowerCAmelCase__ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
__a = get_linear_schedule_with_warmup(
optimizer=lowerCAmelCase__ , num_warmup_steps=0 , num_training_steps=lowerCAmelCase__ , )
else:
__a = DummyScheduler(lowerCAmelCase__ , total_num_steps=lowerCAmelCase__ , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__a , __a , __a , __a , __a = accelerator.prepare(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# We need to keep track of how many total steps we have iterated over
__a = 0
# We also need to keep track of the stating epoch so files are named properly
__a = 0
# Now we train the model
__a = evaluate.load('''glue''' , '''mrpc''' )
__a = 0
__a = {}
for epoch in range(lowerCAmelCase__ , lowerCAmelCase__ ):
model.train()
for step, batch in enumerate(lowerCAmelCase__ ):
__a = model(**lowerCAmelCase__ )
__a = outputs.loss
__a = loss / gradient_accumulation_steps
accelerator.backward(lowerCAmelCase__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
model.eval()
__a = 0
for step, batch in enumerate(lowerCAmelCase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__a = model(**lowerCAmelCase__ )
__a = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
__a , __a = accelerator.gather(
(predictions, batch['''labels''']) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(lowerCAmelCase__ ) - 1:
__a = predictions[: len(eval_dataloader.dataset ) - samples_seen]
__a = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=lowerCAmelCase__ , references=lowerCAmelCase__ , )
__a = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'''epoch {epoch}:''' , lowerCAmelCase__ )
__a = eval_metric['''accuracy''']
if best_performance < eval_metric["accuracy"]:
__a = eval_metric['''accuracy''']
if args.performance_lower_bound is not None:
assert (
args.performance_lower_bound <= best_performance
), f'''Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}'''
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , '''all_results.json''' ) , '''w''' ) as f:
json.dump(lowerCAmelCase__ , lowerCAmelCase__ )
def lowercase ( ) -> List[str]:
__a = argparse.ArgumentParser(description='''Simple example of training script tracking peak GPU memory usage.''' )
parser.add_argument(
'''--model_name_or_path''' , type=lowerCAmelCase__ , default='''bert-base-cased''' , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , required=lowerCAmelCase__ , )
parser.add_argument(
'''--output_dir''' , type=lowerCAmelCase__ , default='''.''' , help='''Optional save directory where all checkpoint folders will be stored. Default is the current working directory.''' , )
parser.add_argument(
'''--performance_lower_bound''' , type=lowerCAmelCase__ , default=lowerCAmelCase__ , help='''Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value.''' , )
parser.add_argument(
'''--num_epochs''' , type=lowerCAmelCase__ , default=3 , help='''Number of train epochs.''' , )
__a = parser.parse_args()
__a = {'''lr''': 2e-5, '''num_epochs''': args.num_epochs, '''seed''': 42, '''batch_size''': 16}
training_function(lowerCAmelCase__ , lowerCAmelCase__ )
if __name__ == "__main__":
main()
| 695 | 0 |
'''simple docstring'''
import os
import unittest
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
BertTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
__lowercase = BertTokenizer
__lowercase = BertTokenizerFast
__lowercase = True
__lowercase = True
__lowercase = filter_non_english
def UpperCAmelCase_ ( self :Dict )-> int:
super().setUp()
A__ = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
A__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def UpperCAmelCase_ ( self :Optional[int] , lowercase_ :Optional[int] )-> Optional[Any]:
A__ = "UNwant\u00E9d,running"
A__ = "unwanted, running"
return input_text, output_text
def UpperCAmelCase_ ( self :int )-> int:
A__ = self.tokenizer_class(self.vocab_file )
A__ = tokenizer.tokenize("UNwant\u00E9d,running" )
self.assertListEqual(_a , ["un", "##want", "##ed", ",", "runn", "##ing"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_a ) , [9, 6, 7, 12, 10, 11] )
def UpperCAmelCase_ ( self :Tuple )-> Optional[Any]:
if not self.test_rust_tokenizer:
return
A__ = self.get_tokenizer()
A__ = self.get_rust_tokenizer()
A__ = "UNwant\u00E9d,running"
A__ = tokenizer.tokenize(_a )
A__ = rust_tokenizer.tokenize(_a )
self.assertListEqual(_a , _a )
A__ = tokenizer.encode(_a , add_special_tokens=_a )
A__ = rust_tokenizer.encode(_a , add_special_tokens=_a )
self.assertListEqual(_a , _a )
A__ = self.get_rust_tokenizer()
A__ = tokenizer.encode(_a )
A__ = rust_tokenizer.encode(_a )
self.assertListEqual(_a , _a )
# With lower casing
A__ = self.get_tokenizer(do_lower_case=_a )
A__ = self.get_rust_tokenizer(do_lower_case=_a )
A__ = "UNwant\u00E9d,running"
A__ = tokenizer.tokenize(_a )
A__ = rust_tokenizer.tokenize(_a )
self.assertListEqual(_a , _a )
A__ = tokenizer.encode(_a , add_special_tokens=_a )
A__ = rust_tokenizer.encode(_a , add_special_tokens=_a )
self.assertListEqual(_a , _a )
A__ = self.get_rust_tokenizer()
A__ = tokenizer.encode(_a )
A__ = rust_tokenizer.encode(_a )
self.assertListEqual(_a , _a )
def UpperCAmelCase_ ( self :List[str] )-> Union[str, Any]:
A__ = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize("ah\u535A\u63A8zz" ) , ["ah", "\u535A", "\u63A8", "zz"] )
def UpperCAmelCase_ ( self :str )-> Union[str, Any]:
A__ = BasicTokenizer(do_lower_case=_a )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["hello", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def UpperCAmelCase_ ( self :Union[str, Any] )-> int:
A__ = BasicTokenizer(do_lower_case=_a , strip_accents=_a )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hällo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["h\u00E9llo"] )
def UpperCAmelCase_ ( self :List[str] )-> Optional[int]:
A__ = BasicTokenizer(do_lower_case=_a , strip_accents=_a )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def UpperCAmelCase_ ( self :int )-> Tuple:
A__ = BasicTokenizer(do_lower_case=_a )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def UpperCAmelCase_ ( self :str )-> Union[str, Any]:
A__ = BasicTokenizer(do_lower_case=_a )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["HeLLo", "!", "how", "Are", "yoU", "?"] )
def UpperCAmelCase_ ( self :Tuple )-> str:
A__ = BasicTokenizer(do_lower_case=_a , strip_accents=_a )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HäLLo", "!", "how", "Are", "yoU", "?"] )
def UpperCAmelCase_ ( self :str )-> Tuple:
A__ = BasicTokenizer(do_lower_case=_a , strip_accents=_a )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HaLLo", "!", "how", "Are", "yoU", "?"] )
def UpperCAmelCase_ ( self :Dict )-> List[Any]:
A__ = BasicTokenizer(do_lower_case=_a , never_split=["[UNK]"] )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? [UNK]" ) , ["HeLLo", "!", "how", "Are", "yoU", "?", "[UNK]"] )
def UpperCAmelCase_ ( self :int )-> List[str]:
A__ = BasicTokenizer()
A__ = "a\n\'ll !!to?\'d of, can\'t."
A__ = ["a", "\'", "ll", "!", "!", "to", "?", "\'", "d", "of", ",", "can", "\'", "t", "."]
self.assertListEqual(tokenizer.tokenize(_a ) , _a )
def UpperCAmelCase_ ( self :int )-> Optional[int]:
A__ = ["[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing"]
A__ = {}
for i, token in enumerate(_a ):
A__ = i
A__ = WordpieceTokenizer(vocab=_a , unk_token="[UNK]" )
self.assertListEqual(tokenizer.tokenize("" ) , [] )
self.assertListEqual(tokenizer.tokenize("unwanted running" ) , ["un", "##want", "##ed", "runn", "##ing"] )
self.assertListEqual(tokenizer.tokenize("unwantedX running" ) , ["[UNK]", "runn", "##ing"] )
def UpperCAmelCase_ ( self :List[str] )-> List[str]:
self.assertTrue(_is_whitespace(" " ) )
self.assertTrue(_is_whitespace("\t" ) )
self.assertTrue(_is_whitespace("\r" ) )
self.assertTrue(_is_whitespace("\n" ) )
self.assertTrue(_is_whitespace("\u00A0" ) )
self.assertFalse(_is_whitespace("A" ) )
self.assertFalse(_is_whitespace("-" ) )
def UpperCAmelCase_ ( self :List[Any] )-> List[str]:
self.assertTrue(_is_control("\u0005" ) )
self.assertFalse(_is_control("A" ) )
self.assertFalse(_is_control(" " ) )
self.assertFalse(_is_control("\t" ) )
self.assertFalse(_is_control("\r" ) )
def UpperCAmelCase_ ( self :int )-> List[str]:
self.assertTrue(_is_punctuation("-" ) )
self.assertTrue(_is_punctuation("$" ) )
self.assertTrue(_is_punctuation("`" ) )
self.assertTrue(_is_punctuation("." ) )
self.assertFalse(_is_punctuation("A" ) )
self.assertFalse(_is_punctuation(" " ) )
def UpperCAmelCase_ ( self :Optional[Any] )-> Optional[int]:
A__ = self.get_tokenizer()
A__ = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(_a ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] )
self.assertListEqual(
[rust_tokenizer.tokenize(_a ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] )
@slow
def UpperCAmelCase_ ( self :Optional[Any] )-> Optional[int]:
A__ = self.tokenizer_class.from_pretrained("bert-base-uncased" )
A__ = tokenizer.encode("sequence builders" , add_special_tokens=_a )
A__ = tokenizer.encode("multi-sequence build" , add_special_tokens=_a )
A__ = tokenizer.build_inputs_with_special_tokens(_a )
A__ = tokenizer.build_inputs_with_special_tokens(_a , _a )
assert encoded_sentence == [1_01] + text + [1_02]
assert encoded_pair == [1_01] + text + [1_02] + text_a + [1_02]
def UpperCAmelCase_ ( self :Dict )-> Optional[Any]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
A__ = self.rust_tokenizer_class.from_pretrained(_a , **_a )
A__ = F"A, naïve {tokenizer_r.mask_token} AllenNLP sentence."
A__ = tokenizer_r.encode_plus(
_a , return_attention_mask=_a , return_token_type_ids=_a , return_offsets_mapping=_a , add_special_tokens=_a , )
A__ = tokenizer_r.do_lower_case if hasattr(_a , "do_lower_case" ) else False
A__ = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), "A"),
((1, 2), ","),
((3, 5), "na"),
((5, 6), "##ï"),
((6, 8), "##ve"),
((9, 15), tokenizer_r.mask_token),
((16, 21), "Allen"),
((21, 23), "##NL"),
((23, 24), "##P"),
((25, 33), "sentence"),
((33, 34), "."),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), "a"),
((1, 2), ","),
((3, 8), "naive"),
((9, 15), tokenizer_r.mask_token),
((16, 21), "allen"),
((21, 23), "##nl"),
((23, 24), "##p"),
((25, 33), "sentence"),
((33, 34), "."),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["input_ids"] ) )
self.assertEqual([e[0] for e in expected_results] , tokens["offset_mapping"] )
def UpperCAmelCase_ ( self :Dict )-> int:
A__ = ["的", "人", "有"]
A__ = "".join(_a )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
A__ = True
A__ = self.tokenizer_class.from_pretrained(_a , **_a )
A__ = self.rust_tokenizer_class.from_pretrained(_a , **_a )
A__ = tokenizer_p.encode(_a , add_special_tokens=_a )
A__ = tokenizer_r.encode(_a , add_special_tokens=_a )
A__ = tokenizer_r.convert_ids_to_tokens(_a )
A__ = tokenizer_p.convert_ids_to_tokens(_a )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(_a , _a )
self.assertListEqual(_a , _a )
A__ = False
A__ = self.rust_tokenizer_class.from_pretrained(_a , **_a )
A__ = self.tokenizer_class.from_pretrained(_a , **_a )
A__ = tokenizer_r.encode(_a , add_special_tokens=_a )
A__ = tokenizer_p.encode(_a , add_special_tokens=_a )
A__ = tokenizer_r.convert_ids_to_tokens(_a )
A__ = tokenizer_p.convert_ids_to_tokens(_a )
# it is expected that only the first Chinese character is not preceded by "##".
A__ = [
F"##{token}" if idx != 0 else token for idx, token in enumerate(_a )
]
self.assertListEqual(_a , _a )
self.assertListEqual(_a , _a )
| 440 |
"""simple docstring"""
from typing import Any
def lowercase ( lowerCAmelCase__ : list , lowerCAmelCase__ : list , lowerCAmelCase__ : dict , lowerCAmelCase__ : dict , lowerCAmelCase__ : dict , ) -> list:
_validation(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , )
# Creates data structures and fill initial step
__a = {}
__a = {}
for state in states_space:
__a = observations_space[0]
__a = (
initial_probabilities[state] * emission_probabilities[state][observation]
)
__a = None
# Fills the data structure with the probabilities of
# different transitions and pointers to previous states
for o in range(1 , len(lowerCAmelCase__ ) ):
__a = observations_space[o]
__a = observations_space[o - 1]
for state in states_space:
# Calculates the argmax for probability function
__a = ''''''
__a = -1
for k_state in states_space:
__a = (
probabilities[(k_state, prior_observation)]
* transition_probabilities[k_state][state]
* emission_probabilities[state][observation]
)
if probability > max_probability:
__a = probability
__a = k_state
# Update probabilities and pointers dicts
__a = (
probabilities[(arg_max, prior_observation)]
* transition_probabilities[arg_max][state]
* emission_probabilities[state][observation]
)
__a = arg_max
# The final observation
__a = observations_space[len(lowerCAmelCase__ ) - 1]
# argmax for given final observation
__a = ''''''
__a = -1
for k_state in states_space:
__a = probabilities[(k_state, final_observation)]
if probability > max_probability:
__a = probability
__a = k_state
__a = arg_max
# Process pointers backwards
__a = last_state
__a = []
for o in range(len(lowerCAmelCase__ ) - 1 , -1 , -1 ):
result.append(lowerCAmelCase__ )
__a = pointers[previous, observations_space[o]]
result.reverse()
return result
def lowercase ( lowerCAmelCase__ : Any , lowerCAmelCase__ : Any , lowerCAmelCase__ : Any , lowerCAmelCase__ : Any , lowerCAmelCase__ : Any , ) -> None:
_validate_not_empty(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , )
_validate_lists(lowerCAmelCase__ , lowerCAmelCase__ )
_validate_dicts(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def lowercase ( lowerCAmelCase__ : Any , lowerCAmelCase__ : Any , lowerCAmelCase__ : Any , lowerCAmelCase__ : Any , lowerCAmelCase__ : Any , ) -> None:
if not all(
[
observations_space,
states_space,
initial_probabilities,
transition_probabilities,
emission_probabilities,
] ):
raise ValueError('''There\'s an empty parameter''' )
def lowercase ( lowerCAmelCase__ : Any , lowerCAmelCase__ : Any ) -> None:
_validate_list(lowerCAmelCase__ , '''observations_space''' )
_validate_list(lowerCAmelCase__ , '''states_space''' )
def lowercase ( lowerCAmelCase__ : Any , lowerCAmelCase__ : str ) -> None:
if not isinstance(_object , lowerCAmelCase__ ):
__a = f'''{var_name} must be a list'''
raise ValueError(lowerCAmelCase__ )
else:
for x in _object:
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
__a = f'''{var_name} must be a list of strings'''
raise ValueError(lowerCAmelCase__ )
def lowercase ( lowerCAmelCase__ : Any , lowerCAmelCase__ : Any , lowerCAmelCase__ : Any , ) -> None:
_validate_dict(lowerCAmelCase__ , '''initial_probabilities''' , lowerCAmelCase__ )
_validate_nested_dict(lowerCAmelCase__ , '''transition_probabilities''' )
_validate_nested_dict(lowerCAmelCase__ , '''emission_probabilities''' )
def lowercase ( lowerCAmelCase__ : Any , lowerCAmelCase__ : str ) -> None:
_validate_dict(_object , lowerCAmelCase__ , lowerCAmelCase__ )
for x in _object.values():
_validate_dict(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def lowercase ( lowerCAmelCase__ : Any , lowerCAmelCase__ : str , lowerCAmelCase__ : type , lowerCAmelCase__ : bool = False ) -> None:
if not isinstance(_object , lowerCAmelCase__ ):
__a = f'''{var_name} must be a dict'''
raise ValueError(lowerCAmelCase__ )
if not all(isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) for x in _object ):
__a = f'''{var_name} all keys must be strings'''
raise ValueError(lowerCAmelCase__ )
if not all(isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) for x in _object.values() ):
__a = '''nested dictionary ''' if nested else ''''''
__a = f'''{var_name} {nested_text}all values must be {value_type.__name__}'''
raise ValueError(lowerCAmelCase__ )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 695 | 0 |
'''simple docstring'''
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
_UpperCAmelCase : List[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
_UpperCAmelCase : List[str] = '''\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Pipeline, KandinskyV22PriorPipeline\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\"kandinsky-community/kandinsky-2-2-prior\")\n >>> pipe_prior.to(\"cuda\")\n >>> prompt = \"red cat, 4k photo\"\n >>> out = pipe_prior(prompt)\n >>> image_emb = out.image_embeds\n >>> zero_image_emb = out.negative_image_embeds\n >>> pipe = KandinskyV22Pipeline.from_pretrained(\"kandinsky-community/kandinsky-2-2-decoder\")\n >>> pipe.to(\"cuda\")\n >>> image = pipe(\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=50,\n ... ).images\n >>> image[0].save(\"cat.png\")\n ```\n'''
def _SCREAMING_SNAKE_CASE ( __snake_case : str , __snake_case : List[str] , __snake_case : Any=8 ):
_A = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
_A = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class lowercase_ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def __init__( self : Any, UpperCamelCase__ : Optional[int], UpperCamelCase__ : Any, UpperCamelCase__ : List[Any], ) -> int:
super().__init__()
self.register_modules(
unet=_a, scheduler=_a, movq=_a, )
_A = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def __UpperCAmelCase ( self : Tuple, UpperCamelCase__ : Optional[Any], UpperCamelCase__ : List[Any], UpperCamelCase__ : int, UpperCamelCase__ : Dict, UpperCamelCase__ : Any, UpperCamelCase__ : Tuple ) -> Optional[int]:
if latents is None:
_A = randn_tensor(_a, generator=_a, device=_a, dtype=_a )
else:
if latents.shape != shape:
raise ValueError(f'Unexpected latents shape, got {latents.shape}, expected {shape}' )
_A = latents.to(_a )
_A = latents * scheduler.init_noise_sigma
return latents
def __UpperCAmelCase ( self : List[str], UpperCamelCase__ : str=0 ) -> str:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
_A = torch.device(f'cuda:{gpu_id}' )
_A = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(_a, _a )
def __UpperCAmelCase ( self : List[Any], UpperCamelCase__ : int=0 ) -> str:
if is_accelerate_available() and is_accelerate_version('>=', '0.17.0.dev0' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.' )
_A = torch.device(f'cuda:{gpu_id}' )
if self.device.type != "cpu":
self.to('cpu', silence_dtype_warnings=_a )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
_A = None
for cpu_offloaded_model in [self.unet, self.movq]:
_A , _A = cpu_offload_with_hook(_a, _a, prev_module_hook=_a )
# We'll offload the last model manually.
_A = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def __UpperCAmelCase ( self : int ) -> Union[str, Any]:
if not hasattr(self.unet, '_hf_hook' ):
return self.device
for module in self.unet.modules():
if (
hasattr(_a, '_hf_hook' )
and hasattr(module._hf_hook, 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(_a )
def __call__( self : Optional[Any], UpperCamelCase__ : str, UpperCamelCase__ : Dict, UpperCamelCase__ : Optional[int] = 5_12, UpperCamelCase__ : Tuple = 5_12, UpperCamelCase__ : Tuple = 1_00, UpperCamelCase__ : Tuple = 4.0, UpperCamelCase__ : List[str] = 1, UpperCamelCase__ : List[Any] = None, UpperCamelCase__ : List[Any] = None, UpperCamelCase__ : str = "pil", UpperCamelCase__ : Optional[int] = True, ) -> Dict:
_A = self._execution_device
_A = guidance_scale > 1.0
if isinstance(_a, _a ):
_A = torch.cat(_a, dim=0 )
_A = image_embeds.shape[0] * num_images_per_prompt
if isinstance(_a, _a ):
_A = torch.cat(_a, dim=0 )
if do_classifier_free_guidance:
_A = image_embeds.repeat_interleave(_a, dim=0 )
_A = negative_image_embeds.repeat_interleave(_a, dim=0 )
_A = torch.cat([negative_image_embeds, image_embeds], dim=0 ).to(dtype=self.unet.dtype, device=_a )
self.scheduler.set_timesteps(_a, device=_a )
_A = self.scheduler.timesteps
_A = self.unet.config.in_channels
_A , _A = downscale_height_and_width(_a, _a, self.movq_scale_factor )
# create initial latent
_A = self.prepare_latents(
(batch_size, num_channels_latents, height, width), image_embeds.dtype, _a, _a, _a, self.scheduler, )
for i, t in enumerate(self.progress_bar(_a ) ):
# expand the latents if we are doing classifier free guidance
_A = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_A = {'image_embeds': image_embeds}
_A = self.unet(
sample=_a, timestep=_a, encoder_hidden_states=_a, added_cond_kwargs=_a, return_dict=_a, )[0]
if do_classifier_free_guidance:
_A , _A = noise_pred.split(latents.shape[1], dim=1 )
_A , _A = noise_pred.chunk(2 )
_A , _A = variance_pred.chunk(2 )
_A = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
_A = torch.cat([noise_pred, variance_pred_text], dim=1 )
if not (
hasattr(self.scheduler.config, 'variance_type' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
_A , _A = noise_pred.split(latents.shape[1], dim=1 )
# compute the previous noisy sample x_t -> x_t-1
_A = self.scheduler.step(
_a, _a, _a, generator=_a, )[0]
# post-processing
_A = self.movq.decode(_a, force_not_quantize=_a )['sample']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f'Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}' )
if output_type in ["np", "pil"]:
_A = image * 0.5 + 0.5
_A = image.clamp(0, 1 )
_A = image.cpu().permute(0, 2, 3, 1 ).float().numpy()
if output_type == "pil":
_A = self.numpy_to_pil(_a )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_a )
| 107 |
"""simple docstring"""
import math
def lowercase ( lowerCAmelCase__ : int ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(lowerCAmelCase__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def lowercase ( lowerCAmelCase__ : float = 0.1 ) -> int:
__a = 3
__a = 3
while primes / (2 * j - 1) >= ratio:
for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1 ):
primes += is_prime(lowerCAmelCase__ )
j += 2
return j
if __name__ == "__main__":
import doctest
doctest.testmod()
| 695 | 0 |
import argparse
import re
from flax.traverse_util import flatten_dict, unflatten_dict
from tax import checkpoints
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
from transformers.utils import logging
logging.set_verbosity_info()
# should not include what is already done by the `from_pt` argument
a_ : Optional[int] = {
'/attention/': '/0/SelfAttention/',
'/self_attention/': '/0/SelfAttention/',
'/encoder_decoder_attention/': '/1/EncDecAttention/',
'value': 'v',
'query': 'q',
'key': 'k',
'out': 'o',
'pre_self_attention_layer_norm': '0/layer_norm',
'pre_cross_attention_layer_norm': '1/layer_norm',
'pre_attention_layer_norm': '0/layer_norm', # previously 1, but seems wrong
'token_embedder': 'shared',
'encoder_norm': 'final_layer_norm',
'decoder_norm': 'final_layer_norm',
'relpos_bias/rel_embedding': 'block/0/layer/0/SelfAttention/relative_attention_bias/weight',
'router/router_weights/w/': 'router/classifier/',
'roer/roer_weights/w/': 'router/classifier/',
'logits_dense': 'lm_head',
}
def __a ( __UpperCAmelCase ):
# 1. in HF T5, we have block.{x}.layer.{y}. which corresponds to layer.{x} in
# the original model
a__ = list(s_dict.keys() )
for key in keys:
a__ = R'''.*/layers_(\d+)'''
a__ = key
if re.match(lowerCAmelCase__ , lowerCAmelCase__ ):
a__ = re.sub(R'''layers_(\d+)''' , R'''block/\1/layer''' , lowerCAmelCase__ )
a__ = R'''(encoder|decoder)\/'''
if re.match(lowerCAmelCase__ , lowerCAmelCase__ ):
a__ = re.match(lowerCAmelCase__ , lowerCAmelCase__ ).groups()
if groups[0] == "encoder":
a__ = re.sub(R'''/mlp/''' , R'''/1/mlp/''' , lowerCAmelCase__ )
a__ = re.sub(R'''/pre_mlp_layer_norm/''' , R'''/1/layer_norm/''' , lowerCAmelCase__ )
elif groups[0] == "decoder":
a__ = re.sub(R'''/mlp/''' , R'''/2/mlp/''' , lowerCAmelCase__ )
a__ = re.sub(R'''/pre_mlp_layer_norm/''' , R'''/2/layer_norm/''' , lowerCAmelCase__ )
# 2. Convert other classic mappings
for old_key, temp_key in MOE_LAYER_NAME_MAPPING.items():
if old_key in new_key:
a__ = new_key.replace(lowerCAmelCase__ , lowerCAmelCase__ )
print(f"{key} -> {new_key}" )
a__ = s_dict.pop(lowerCAmelCase__ )
if "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
a__ = s_dict[
'''encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight'''
].T
if "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
a__ = s_dict[
'''decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight'''
].T
# 3. Take extra care of the EXPERTS layer
for key in list(s_dict.keys() ):
if "expert" in key:
a__ = s_dict[key].shape[0]
a__ = s_dict[key]
for idx in range(lowerCAmelCase__ ):
a__ = expert_weihts[idx]
print(f"{key} -> {key.replace('expert/' , 'nested fstring' )}" )
s_dict.pop(lowerCAmelCase__ )
return s_dict
a_ : List[Any] = {
'NUM_ENCODER_LAYERS': 'num_layers',
'NUM_DECODER_LAYERS': 'num_decoder_layers',
'NUM_HEADS': 'num_heads',
'HEAD_DIM': 'd_kv',
'EMBED_DIM': 'd_model',
'MLP_DIM': 'd_ff',
'NUM_SELECTED_EXPERTS': 'num_selected_experts',
'NUM_ENCODER_SPARSE_LAYERS': 'num_sparse_encoder_layers',
'NUM_DECODER_SPARSE_LAYERS': 'num_sparse_decoder_layers',
'dense.MlpBlock.activations': 'feed_forward_proj',
}
def __a ( __UpperCAmelCase , __UpperCAmelCase ):
# Convert a google style config to the hugging face fromat
import regex as re
with open(lowerCAmelCase__ , '''r''' ) as f:
a__ = f.read()
a__ = re.findall(R'''(.*) = ([0-9.]*)''' , lowerCAmelCase__ )
a__ = {}
for param, value in regex_match:
if param in GIN_TO_CONFIG_MAPPING and value != "":
a__ = float(lowerCAmelCase__ ) if '''.''' in value else int(lowerCAmelCase__ )
a__ = re.findall(R'''(.*activations) = \(\'(.*)\',\)''' , lowerCAmelCase__ )[0]
a__ = str(activation[1] )
a__ = num_experts
a__ = SwitchTransformersConfig(**lowerCAmelCase__ )
return config
def __a ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase="./" , __UpperCAmelCase=8 ):
# Initialise PyTorch model
print(f"Loading flax weights from : {flax_checkpoint_path}" )
a__ = checkpoints.load_tax_checkpoint(lowerCAmelCase__ )
if gin_file is not None:
a__ = convert_gin_to_config(lowerCAmelCase__ , lowerCAmelCase__ )
else:
a__ = SwitchTransformersConfig.from_pretrained(lowerCAmelCase__ )
a__ = SwitchTransformersForConditionalGeneration(lowerCAmelCase__ )
a__ = flax_params['''target''']
a__ = flatten_dict(lowerCAmelCase__ , sep='''/''' )
a__ = rename_keys(lowerCAmelCase__ )
a__ = unflatten_dict(lowerCAmelCase__ , sep='''/''' )
# Load the flax params in the PT model
load_flax_weights_in_pytorch_model(lowerCAmelCase__ , lowerCAmelCase__ )
print(f"Save PyTorch model to {pytorch_dump_path}" )
pt_model.save_pretrained(lowerCAmelCase__ )
if __name__ == "__main__":
a_ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--switch_t5x_checkpoint_path',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained SwitchTransformers model. \nThis specifies the'
' model architecture. If not provided, a `gin_file` has to be provided.'
),
)
parser.add_argument(
'--gin_file',
default=None,
type=str,
required=False,
help='Path to the gin config file. If not provided, a `config_file` has to be passed ',
)
parser.add_argument(
'--config_name', default=None, type=str, required=False, help='Config name of SwitchTransformers model.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output pytorch model.'
)
parser.add_argument('--num_experts', default=8, type=int, required=False, help='Number of experts')
a_ : Dict = parser.parse_args()
convert_flax_checkpoint_to_pytorch(
args.switch_tax_checkpoint_path,
args.config_name,
args.gin_file,
args.pytorch_dump_folder_path,
args.num_experts,
)
| 194 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase_ = {
"configuration_mctct": ["MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP", "MCTCTConfig"],
"feature_extraction_mctct": ["MCTCTFeatureExtractor"],
"processing_mctct": ["MCTCTProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST",
"MCTCTForCTC",
"MCTCTModel",
"MCTCTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 695 | 0 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowerCAmelCase_ : str = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCAmelCase__ = ['pixel_values']
def __init__( self : Any , lowercase__ : int = True , lowercase__ : List[Any] = None , lowercase__ : str = None , lowercase__ : Optional[int] = PILImageResampling.BILINEAR , lowercase__ : Union[str, Any] = True , lowercase__ : List[str] = 1 / 255 , lowercase__ : Tuple = True , lowercase__ : Optional[int] = None , lowercase__ : Union[str, Any] = None , **lowercase__ : Tuple , ) ->Optional[int]:
'''simple docstring'''
super().__init__(**_a )
_UpperCamelCase : Optional[int] = size if size is not None else {"shortest_edge": 384}
_UpperCamelCase : Any = get_size_dict(_a , default_to_square=_a )
_UpperCamelCase : Dict = do_resize
_UpperCamelCase : Optional[int] = size
# Default value set here for backwards compatibility where the value in config is None
_UpperCamelCase : Optional[int] = crop_pct if crop_pct is not None else 224 / 256
_UpperCamelCase : Optional[Any] = resample
_UpperCamelCase : Union[str, Any] = do_rescale
_UpperCamelCase : Optional[Any] = rescale_factor
_UpperCamelCase : Dict = do_normalize
_UpperCamelCase : Optional[int] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_UpperCamelCase : Tuple = image_std if image_std is not None else IMAGENET_STANDARD_STD
def snake_case__ ( self : Dict , lowercase__ : str , lowercase__ : str , lowercase__ : str , lowercase__ : int = PILImageResampling.BICUBIC , lowercase__ : Dict = None , **lowercase__ : str , ) ->Tuple:
'''simple docstring'''
_UpperCamelCase : Optional[Any] = get_size_dict(_a , default_to_square=_a )
if "shortest_edge" not in size:
raise ValueError(f'''Size dictionary must contain \'shortest_edge\' key. Got {size.keys()}''' )
_UpperCamelCase : Optional[Any] = size["shortest_edge"]
if shortest_edge < 384:
# maintain same ratio, resizing shortest edge to shortest_edge/crop_pct
_UpperCamelCase : Optional[int] = int(shortest_edge / crop_pct )
_UpperCamelCase : Tuple = get_resize_output_image_size(_a , size=_a , default_to_square=_a )
_UpperCamelCase : Any = resize(image=_a , size=_a , resample=_a , data_format=_a , **_a )
# then crop to (shortest_edge, shortest_edge)
return center_crop(image=_a , size=(shortest_edge, shortest_edge) , data_format=_a , **_a )
else:
# warping (no cropping) when evaluated at 384 or larger
return resize(
_a , size=(shortest_edge, shortest_edge) , resample=_a , data_format=_a , **_a )
def snake_case__ ( self : List[Any] , lowercase__ : Optional[int] , lowercase__ : List[Any] , lowercase__ : Optional[Any] = None , **lowercase__ : str , ) ->Any:
'''simple docstring'''
return rescale(_a , scale=_a , data_format=_a , **_a )
def snake_case__ ( self : List[Any] , lowercase__ : Any , lowercase__ : str , lowercase__ : Dict , lowercase__ : str = None , **lowercase__ : Union[str, Any] , ) ->Union[str, Any]:
'''simple docstring'''
return normalize(_a , mean=_a , std=_a , data_format=_a , **_a )
def snake_case__ ( self : List[str] , lowercase__ : Union[str, Any] , lowercase__ : Optional[int] = None , lowercase__ : Optional[int] = None , lowercase__ : List[str] = None , lowercase__ : Optional[int] = None , lowercase__ : List[Any] = None , lowercase__ : Dict = None , lowercase__ : Dict = None , lowercase__ : Optional[Any] = None , lowercase__ : Tuple = None , lowercase__ : str = None , lowercase__ : Dict = ChannelDimension.FIRST , **lowercase__ : str , ) ->Union[str, Any]:
'''simple docstring'''
_UpperCamelCase : Dict = do_resize if do_resize is not None else self.do_resize
_UpperCamelCase : List[str] = crop_pct if crop_pct is not None else self.crop_pct
_UpperCamelCase : str = resample if resample is not None else self.resample
_UpperCamelCase : Tuple = do_rescale if do_rescale is not None else self.do_rescale
_UpperCamelCase : Optional[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
_UpperCamelCase : Tuple = do_normalize if do_normalize is not None else self.do_normalize
_UpperCamelCase : Any = image_mean if image_mean is not None else self.image_mean
_UpperCamelCase : List[str] = image_std if image_std is not None else self.image_std
_UpperCamelCase : Tuple = size if size is not None else self.size
_UpperCamelCase : List[str] = get_size_dict(_a , default_to_square=_a )
_UpperCamelCase : int = make_list_of_images(_a )
if not valid_images(_a ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_resize and size["shortest_edge"] < 384 and crop_pct is None:
raise ValueError("crop_pct must be specified if size < 384." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
_UpperCamelCase : str = [to_numpy_array(_a ) for image in images]
if do_resize:
_UpperCamelCase : List[str] = [self.resize(image=_a , size=_a , crop_pct=_a , resample=_a ) for image in images]
if do_rescale:
_UpperCamelCase : Any = [self.rescale(image=_a , scale=_a ) for image in images]
if do_normalize:
_UpperCamelCase : Union[str, Any] = [self.normalize(image=_a , mean=_a , std=_a ) for image in images]
_UpperCamelCase : Tuple = [to_channel_dimension_format(_a , _a ) for image in images]
_UpperCamelCase : str = {"pixel_values": images}
return BatchFeature(data=_a , tensor_type=_a )
| 435 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json"
),
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json"
),
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json"
),
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = 'dpr'
def __init__( self , _a=30_522 , _a=768 , _a=12 , _a=12 , _a=3_072 , _a="gelu" , _a=0.1 , _a=0.1 , _a=512 , _a=2 , _a=0.02 , _a=1E-12 , _a=0 , _a="absolute" , _a = 0 , **_a , ):
super().__init__(pad_token_id=_a , **_a )
__a = vocab_size
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = hidden_act
__a = intermediate_size
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = max_position_embeddings
__a = type_vocab_size
__a = initializer_range
__a = layer_norm_eps
__a = projection_dim
__a = position_embedding_type
| 695 | 0 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class A_ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , A_ , A_=13 , A_=3 , A_=2_24 , A_=30 , A_=4_00 , A_=True , A_=None , A_=True , A_=[0.5, 0.5, 0.5] , A_=[0.5, 0.5, 0.5] , ):
_UpperCamelCase = size if size is not None else {"height": 18, "width": 18}
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = num_channels
_UpperCamelCase = image_size
_UpperCamelCase = min_resolution
_UpperCamelCase = max_resolution
_UpperCamelCase = do_resize
_UpperCamelCase = size
_UpperCamelCase = do_normalize
_UpperCamelCase = image_mean
_UpperCamelCase = image_std
def a ( self ):
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class A_ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
_lowerCAmelCase = ViTImageProcessor if is_vision_available() else None
def a ( self ):
_UpperCamelCase = EfficientFormerImageProcessorTester(self )
@property
def a ( self ):
return self.image_proc_tester.prepare_image_processor_dict()
def a ( self ):
_UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_a , "image_mean" ) )
self.assertTrue(hasattr(_a , "image_std" ) )
self.assertTrue(hasattr(_a , "do_normalize" ) )
self.assertTrue(hasattr(_a , "do_resize" ) )
self.assertTrue(hasattr(_a , "size" ) )
def a ( self ):
pass
def a ( self ):
# Initialize image_processor
_UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_UpperCamelCase = prepare_image_inputs(self.image_proc_tester , equal_resolution=_a )
for image in image_inputs:
self.assertIsInstance(_a , Image.Image )
# Test not batched input
_UpperCamelCase = image_processor(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
# Test batched
_UpperCamelCase = image_processor(_a , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
def a ( self ):
# Initialize image_processor
_UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_UpperCamelCase = prepare_image_inputs(self.image_proc_tester , equal_resolution=_a , numpify=_a )
for image in image_inputs:
self.assertIsInstance(_a , np.ndarray )
# Test not batched input
_UpperCamelCase = image_processor(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
# Test batched
_UpperCamelCase = image_processor(_a , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
def a ( self ):
# Initialize image_processor
_UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_UpperCamelCase = prepare_image_inputs(self.image_proc_tester , equal_resolution=_a , torchify=_a )
for image in image_inputs:
self.assertIsInstance(_a , torch.Tensor )
# Test not batched input
_UpperCamelCase = image_processor(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
# Test batched
_UpperCamelCase = image_processor(_a , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
| 138 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = StableDiffusionInpaintPipeline
__UpperCAmelCase : int = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
__UpperCAmelCase : str = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
__UpperCAmelCase : int = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
__UpperCAmelCase : Tuple = frozenset([] )
def __UpperCAmelCase ( self ):
torch.manual_seed(0 )
__a = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=9 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=_a , )
__a = PNDMScheduler(skip_prk_steps=_a )
torch.manual_seed(0 )
__a = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
__a = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act='''gelu''' , projection_dim=512 , )
__a = CLIPTextModel(_a )
__a = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
__a = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def __UpperCAmelCase ( self , _a , _a=0 ):
# TODO: use tensor inputs instead of PIL, this is here just to leave the old expected_slices untouched
__a = floats_tensor((1, 3, 32, 32) , rng=random.Random(_a ) ).to(_a )
__a = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__a = Image.fromarray(np.uinta(_a ) ).convert('''RGB''' ).resize((64, 64) )
__a = Image.fromarray(np.uinta(image + 4 ) ).convert('''RGB''' ).resize((64, 64) )
if str(_a ).startswith('''mps''' ):
__a = torch.manual_seed(_a )
else:
__a = torch.Generator(device=_a ).manual_seed(_a )
__a = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': init_image,
'''mask_image''': mask_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def __UpperCAmelCase ( self ):
__a = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__a = self.get_dummy_components()
__a = StableDiffusionInpaintPipeline(**_a )
__a = sd_pipe.to(_a )
sd_pipe.set_progress_bar_config(disable=_a )
__a = self.get_dummy_inputs(_a )
__a = sd_pipe(**_a ).images
__a = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__a = np.array([0.4727, 0.5735, 0.3941, 0.5446, 0.5926, 0.4394, 0.5062, 0.4654, 0.4476] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __UpperCAmelCase ( self ):
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCAmelCase ( self ):
__a = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''' )
__a = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' )
__a = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'''
'''/yellow_cat_sitting_on_a_park_bench.npy''' )
__a = '''stabilityai/stable-diffusion-2-inpainting'''
__a = StableDiffusionInpaintPipeline.from_pretrained(_a , safety_checker=_a )
pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
pipe.enable_attention_slicing()
__a = '''Face of a yellow cat, high resolution, sitting on a park bench'''
__a = torch.manual_seed(0 )
__a = pipe(
prompt=_a , image=_a , mask_image=_a , generator=_a , output_type='''np''' , )
__a = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 9E-3
def __UpperCAmelCase ( self ):
__a = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''' )
__a = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' )
__a = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'''
'''/yellow_cat_sitting_on_a_park_bench_fp16.npy''' )
__a = '''stabilityai/stable-diffusion-2-inpainting'''
__a = StableDiffusionInpaintPipeline.from_pretrained(
_a , torch_dtype=torch.floataa , safety_checker=_a , )
pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
pipe.enable_attention_slicing()
__a = '''Face of a yellow cat, high resolution, sitting on a park bench'''
__a = torch.manual_seed(0 )
__a = pipe(
prompt=_a , image=_a , mask_image=_a , generator=_a , output_type='''np''' , )
__a = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 5E-1
def __UpperCAmelCase ( self ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__a = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''' )
__a = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' )
__a = '''stabilityai/stable-diffusion-2-inpainting'''
__a = PNDMScheduler.from_pretrained(_a , subfolder='''scheduler''' )
__a = StableDiffusionInpaintPipeline.from_pretrained(
_a , safety_checker=_a , scheduler=_a , torch_dtype=torch.floataa , )
pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
__a = '''Face of a yellow cat, high resolution, sitting on a park bench'''
__a = torch.manual_seed(0 )
__a = pipe(
prompt=_a , image=_a , mask_image=_a , generator=_a , num_inference_steps=2 , output_type='''np''' , )
__a = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.65 * 10**9
| 695 | 0 |
"""simple docstring"""
import tempfile
import unittest
import numpy as np
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_flax
if is_flax_available():
import os
from flax.core.frozen_dict import unfreeze
from flax.traverse_util import flatten_dict
from transformers import FlaxBertModel
UpperCAmelCase_ : Tuple = '''0.12''' # assumed parallelism: 8
@require_flax
@is_staging_test
class __UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def UpperCamelCase_ ( cls ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =TOKEN
HfFolder.save_token(_a )
@classmethod
def UpperCamelCase_ ( cls ):
'''simple docstring'''
try:
delete_repo(token=cls._token , repo_id='''test-model-flax''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-model-flax-org''' )
except HTTPError:
pass
def UpperCamelCase_ ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =BertConfig(
vocab_size=9_9 , hidden_size=3_2 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=3_7 )
_SCREAMING_SNAKE_CASE =FlaxBertModel(_a )
model.push_to_hub('''test-model-flax''' , use_auth_token=self._token )
_SCREAMING_SNAKE_CASE =FlaxBertModel.from_pretrained(f"""{USER}/test-model-flax""" )
_SCREAMING_SNAKE_CASE =flatten_dict(unfreeze(model.params ) )
_SCREAMING_SNAKE_CASE =flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
_SCREAMING_SNAKE_CASE =(base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_a , 1E-3 , msg=f"""{key} not identical""" )
# Reset repo
delete_repo(token=self._token , repo_id='''test-model-flax''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(_a , repo_id='''test-model-flax''' , push_to_hub=_a , use_auth_token=self._token )
_SCREAMING_SNAKE_CASE =FlaxBertModel.from_pretrained(f"""{USER}/test-model-flax""" )
_SCREAMING_SNAKE_CASE =flatten_dict(unfreeze(model.params ) )
_SCREAMING_SNAKE_CASE =flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
_SCREAMING_SNAKE_CASE =(base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_a , 1E-3 , msg=f"""{key} not identical""" )
def UpperCamelCase_ ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =BertConfig(
vocab_size=9_9 , hidden_size=3_2 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=3_7 )
_SCREAMING_SNAKE_CASE =FlaxBertModel(_a )
model.push_to_hub('''valid_org/test-model-flax-org''' , use_auth_token=self._token )
_SCREAMING_SNAKE_CASE =FlaxBertModel.from_pretrained('''valid_org/test-model-flax-org''' )
_SCREAMING_SNAKE_CASE =flatten_dict(unfreeze(model.params ) )
_SCREAMING_SNAKE_CASE =flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
_SCREAMING_SNAKE_CASE =(base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_a , 1E-3 , msg=f"""{key} not identical""" )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-model-flax-org''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(
_a , repo_id='''valid_org/test-model-flax-org''' , push_to_hub=_a , use_auth_token=self._token )
_SCREAMING_SNAKE_CASE =FlaxBertModel.from_pretrained('''valid_org/test-model-flax-org''' )
_SCREAMING_SNAKE_CASE =flatten_dict(unfreeze(model.params ) )
_SCREAMING_SNAKE_CASE =flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
_SCREAMING_SNAKE_CASE =(base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_a , 1E-3 , msg=f"""{key} not identical""" )
def _lowerCAmelCase(a : str , a : Dict ) -> Optional[int]:
_SCREAMING_SNAKE_CASE =True
_SCREAMING_SNAKE_CASE =flatten_dict(modela.params )
_SCREAMING_SNAKE_CASE =flatten_dict(modela.params )
for key in flat_params_a.keys():
if np.sum(np.abs(flat_params_a[key] - flat_params_a[key] ) ) > 1E-4:
_SCREAMING_SNAKE_CASE =False
return models_are_equal
@require_flax
class __UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =BertConfig.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' )
_SCREAMING_SNAKE_CASE =FlaxBertModel(_a )
_SCREAMING_SNAKE_CASE ='''bert'''
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(_a , _a ) )
with self.assertRaises(_a ):
_SCREAMING_SNAKE_CASE =FlaxBertModel.from_pretrained(_a )
_SCREAMING_SNAKE_CASE =FlaxBertModel.from_pretrained(_a , subfolder=_a )
self.assertTrue(check_models_equal(_a , _a ) )
def UpperCamelCase_ ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =BertConfig.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' )
_SCREAMING_SNAKE_CASE =FlaxBertModel(_a )
_SCREAMING_SNAKE_CASE ='''bert'''
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(_a , _a ) , max_shard_size='''10KB''' )
with self.assertRaises(_a ):
_SCREAMING_SNAKE_CASE =FlaxBertModel.from_pretrained(_a )
_SCREAMING_SNAKE_CASE =FlaxBertModel.from_pretrained(_a , subfolder=_a )
self.assertTrue(check_models_equal(_a , _a ) )
def UpperCamelCase_ ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE ='''bert'''
_SCREAMING_SNAKE_CASE ='''hf-internal-testing/tiny-random-bert-subfolder'''
with self.assertRaises(_a ):
_SCREAMING_SNAKE_CASE =FlaxBertModel.from_pretrained(_a )
_SCREAMING_SNAKE_CASE =FlaxBertModel.from_pretrained(_a , subfolder=_a )
self.assertIsNotNone(_a )
def UpperCamelCase_ ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE ='''bert'''
_SCREAMING_SNAKE_CASE ='''hf-internal-testing/tiny-random-bert-sharded-subfolder'''
with self.assertRaises(_a ):
_SCREAMING_SNAKE_CASE =FlaxBertModel.from_pretrained(_a )
_SCREAMING_SNAKE_CASE =FlaxBertModel.from_pretrained(_a , subfolder=_a )
self.assertIsNotNone(_a )
| 255 |
"""simple docstring"""
import inspect
import os
import unittest
from dataclasses import dataclass
import torch
from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs
from accelerate.state import AcceleratorState
from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu
from accelerate.utils import KwargsHandler
@dataclass
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : int = 0
__UpperCAmelCase : bool = False
__UpperCAmelCase : float = 3.0
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self ):
# If no defaults are changed, `to_kwargs` returns an empty dict.
self.assertDictEqual(MockClass().to_kwargs() , {} )
self.assertDictEqual(MockClass(a=2 ).to_kwargs() , {'''a''': 2} )
self.assertDictEqual(MockClass(a=2 , b=_a ).to_kwargs() , {'''a''': 2, '''b''': True} )
self.assertDictEqual(MockClass(a=2 , c=2.25 ).to_kwargs() , {'''a''': 2, '''c''': 2.25} )
@require_cuda
def __UpperCAmelCase ( self ):
# If no defaults are changed, `to_kwargs` returns an empty dict.
__a = GradScalerKwargs(init_scale=1_024 , growth_factor=2 )
AcceleratorState._reset_state()
__a = Accelerator(mixed_precision='''fp16''' , kwargs_handlers=[scaler_handler] )
print(accelerator.use_fpaa )
__a = accelerator.scaler
# Check the kwargs have been applied
self.assertEqual(scaler._init_scale , 1024.0 )
self.assertEqual(scaler._growth_factor , 2.0 )
# Check the other values are at the default
self.assertEqual(scaler._backoff_factor , 0.5 )
self.assertEqual(scaler._growth_interval , 2_000 )
self.assertEqual(scaler._enabled , _a )
@require_multi_gpu
def __UpperCAmelCase ( self ):
__a = ['''torchrun''', f'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__ )]
execute_subprocess_async(_a , env=os.environ.copy() )
if __name__ == "__main__":
lowercase_ = DistributedDataParallelKwargs(bucket_cap_mb=1_5, find_unused_parameters=True)
lowercase_ = Accelerator(kwargs_handlers=[ddp_scaler])
lowercase_ = torch.nn.Linear(1_0_0, 2_0_0)
lowercase_ = accelerator.prepare(model)
# Check the values changed in kwargs
lowercase_ = ""
lowercase_ = model.bucket_bytes_cap // (1_0_2_4 * 1_0_2_4)
if observed_bucket_cap_map != 1_5:
error_msg += F"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n"
if model.find_unused_parameters is not True:
error_msg += F"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n"
# Check the values of the defaults
if model.dim != 0:
error_msg += F"Default value not respected, should have `0` but found {model.dim}.\n"
if model.broadcast_buffers is not True:
error_msg += F"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n"
if model.gradient_as_bucket_view is not False:
error_msg += F"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n"
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 695 | 0 |
'''simple docstring'''
from __future__ import annotations
import math
lowercase : Dict = """2020.9.26"""
lowercase : List[str] = """xcodz-dot, cclaus, dhruvmanila"""
def lowerCamelCase__ ( __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ):
if not all(isinstance(lowerCAmelCase__ , (float, int) ) for val in locals().values() ):
snake_case : Tuple = F'''Input values must either be float or int: {list(locals().values() )}'''
raise TypeError(lowerCAmelCase__ )
snake_case : Optional[Any] = ((x * distance) / (z + distance)) * scale
snake_case : Any = ((y * distance) / (z + distance)) * scale
return projected_x, projected_y
def lowerCamelCase__ ( __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ):
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
raise TypeError("""Axis must be a str""" )
snake_case : Optional[int] = locals()
del input_variables["axis"]
if not all(isinstance(lowerCAmelCase__ , (float, int) ) for val in input_variables.values() ):
snake_case : str = (
"""Input values except axis must either be float or int: """
F'''{list(input_variables.values() )}'''
)
raise TypeError(lowerCAmelCase__ )
snake_case : Union[str, Any] = (angle % 360) / 450 * 180 / math.pi
if axis == "z":
snake_case : Optional[int] = x * math.cos(lowerCAmelCase__ ) - y * math.sin(lowerCAmelCase__ )
snake_case : List[Any] = y * math.cos(lowerCAmelCase__ ) + x * math.sin(lowerCAmelCase__ )
snake_case : str = z
elif axis == "x":
snake_case : Union[str, Any] = y * math.cos(lowerCAmelCase__ ) - z * math.sin(lowerCAmelCase__ )
snake_case : Tuple = z * math.cos(lowerCAmelCase__ ) + y * math.sin(lowerCAmelCase__ )
snake_case : Optional[Any] = x
elif axis == "y":
snake_case : str = x * math.cos(lowerCAmelCase__ ) - z * math.sin(lowerCAmelCase__ )
snake_case : List[str] = z * math.cos(lowerCAmelCase__ ) + x * math.sin(lowerCAmelCase__ )
snake_case : List[Any] = y
else:
raise ValueError("""not a valid axis, choose one of \'x\', \'y\', \'z\'""" )
return new_x, new_y, new_z
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F"""{convert_to_ad(1.0, 2.0, 3.0, 1_0.0, 1_0.0) = }""")
print(F"""{rotate(1.0, 2.0, 3.0, 'y', 9_0.0) = }""")
| 116 |
"""simple docstring"""
import inspect
import os
import sys
import unittest
import accelerate
from accelerate.test_utils import execute_subprocess_async, require_tpu
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self ):
__a = inspect.getfile(accelerate.test_utils )
__a = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_script.py'''] )
__a = os.path.sep.join(inspect.getfile(self.__class__ ).split(os.path.sep )[:-1] )
@require_tpu
def __UpperCAmelCase ( self ):
__a = f'''
{self.test_dir}/xla_spawn.py
--num_cores 8
{self.test_file_path}
'''.split()
__a = [sys.executable] + distributed_args
execute_subprocess_async(_a , env=os.environ.copy() )
| 695 | 0 |
"""simple docstring"""
snake_case_ : List[str] = 6_5_5_2_1
def lowercase_ ( _lowercase : str ):
'''simple docstring'''
UpperCAmelCase : List[Any] = 1
UpperCAmelCase : List[Any] = 0
for plain_chr in plain_text:
UpperCAmelCase : int = (a + ord(lowerCAmelCase__ )) % MOD_ADLER
UpperCAmelCase : Any = (b + a) % MOD_ADLER
return (b << 16) | a
| 595 |
"""simple docstring"""
import os
import unittest
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
BertTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : str = BertTokenizer
__UpperCAmelCase : Optional[Any] = BertTokenizerFast
__UpperCAmelCase : str = True
__UpperCAmelCase : Tuple = True
__UpperCAmelCase : Any = filter_non_english
def __UpperCAmelCase ( self ):
super().setUp()
__a = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
__a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def __UpperCAmelCase ( self , _a ):
__a = '''UNwant\u00E9d,running'''
__a = '''unwanted, running'''
return input_text, output_text
def __UpperCAmelCase ( self ):
__a = self.tokenizer_class(self.vocab_file )
__a = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(_a , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_a ) , [9, 6, 7, 12, 10, 11] )
def __UpperCAmelCase ( self ):
if not self.test_rust_tokenizer:
return
__a = self.get_tokenizer()
__a = self.get_rust_tokenizer()
__a = '''UNwant\u00E9d,running'''
__a = tokenizer.tokenize(_a )
__a = rust_tokenizer.tokenize(_a )
self.assertListEqual(_a , _a )
__a = tokenizer.encode(_a , add_special_tokens=_a )
__a = rust_tokenizer.encode(_a , add_special_tokens=_a )
self.assertListEqual(_a , _a )
__a = self.get_rust_tokenizer()
__a = tokenizer.encode(_a )
__a = rust_tokenizer.encode(_a )
self.assertListEqual(_a , _a )
# With lower casing
__a = self.get_tokenizer(do_lower_case=_a )
__a = self.get_rust_tokenizer(do_lower_case=_a )
__a = '''UNwant\u00E9d,running'''
__a = tokenizer.tokenize(_a )
__a = rust_tokenizer.tokenize(_a )
self.assertListEqual(_a , _a )
__a = tokenizer.encode(_a , add_special_tokens=_a )
__a = rust_tokenizer.encode(_a , add_special_tokens=_a )
self.assertListEqual(_a , _a )
__a = self.get_rust_tokenizer()
__a = tokenizer.encode(_a )
__a = rust_tokenizer.encode(_a )
self.assertListEqual(_a , _a )
def __UpperCAmelCase ( self ):
__a = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize('''ah\u535A\u63A8zz''' ) , ['''ah''', '''\u535A''', '''\u63A8''', '''zz'''] )
def __UpperCAmelCase ( self ):
__a = BasicTokenizer(do_lower_case=_a )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def __UpperCAmelCase ( self ):
__a = BasicTokenizer(do_lower_case=_a , strip_accents=_a )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hällo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''h\u00E9llo'''] )
def __UpperCAmelCase ( self ):
__a = BasicTokenizer(do_lower_case=_a , strip_accents=_a )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def __UpperCAmelCase ( self ):
__a = BasicTokenizer(do_lower_case=_a )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def __UpperCAmelCase ( self ):
__a = BasicTokenizer(do_lower_case=_a )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def __UpperCAmelCase ( self ):
__a = BasicTokenizer(do_lower_case=_a , strip_accents=_a )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HäLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def __UpperCAmelCase ( self ):
__a = BasicTokenizer(do_lower_case=_a , strip_accents=_a )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HaLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def __UpperCAmelCase ( self ):
__a = BasicTokenizer(do_lower_case=_a , never_split=['''[UNK]'''] )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? [UNK]''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?''', '''[UNK]'''] )
def __UpperCAmelCase ( self ):
__a = BasicTokenizer()
__a = '''a\n\'ll !!to?\'d of, can\'t.'''
__a = ['''a''', '''\'''', '''ll''', '''!''', '''!''', '''to''', '''?''', '''\'''', '''d''', '''of''', ''',''', '''can''', '''\'''', '''t''', '''.''']
self.assertListEqual(tokenizer.tokenize(_a ) , _a )
def __UpperCAmelCase ( self ):
__a = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''']
__a = {}
for i, token in enumerate(_a ):
__a = i
__a = WordpieceTokenizer(vocab=_a , unk_token='''[UNK]''' )
self.assertListEqual(tokenizer.tokenize('''''' ) , [] )
self.assertListEqual(tokenizer.tokenize('''unwanted running''' ) , ['''un''', '''##want''', '''##ed''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.tokenize('''unwantedX running''' ) , ['''[UNK]''', '''runn''', '''##ing'''] )
def __UpperCAmelCase ( self ):
self.assertTrue(_is_whitespace(''' ''' ) )
self.assertTrue(_is_whitespace('''\t''' ) )
self.assertTrue(_is_whitespace('''\r''' ) )
self.assertTrue(_is_whitespace('''\n''' ) )
self.assertTrue(_is_whitespace('''\u00A0''' ) )
self.assertFalse(_is_whitespace('''A''' ) )
self.assertFalse(_is_whitespace('''-''' ) )
def __UpperCAmelCase ( self ):
self.assertTrue(_is_control('''\u0005''' ) )
self.assertFalse(_is_control('''A''' ) )
self.assertFalse(_is_control(''' ''' ) )
self.assertFalse(_is_control('''\t''' ) )
self.assertFalse(_is_control('''\r''' ) )
def __UpperCAmelCase ( self ):
self.assertTrue(_is_punctuation('''-''' ) )
self.assertTrue(_is_punctuation('''$''' ) )
self.assertTrue(_is_punctuation('''`''' ) )
self.assertTrue(_is_punctuation('''.''' ) )
self.assertFalse(_is_punctuation('''A''' ) )
self.assertFalse(_is_punctuation(''' ''' ) )
def __UpperCAmelCase ( self ):
__a = self.get_tokenizer()
__a = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(_a ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] )
self.assertListEqual(
[rust_tokenizer.tokenize(_a ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] )
@slow
def __UpperCAmelCase ( self ):
__a = self.tokenizer_class.from_pretrained('''bert-base-uncased''' )
__a = tokenizer.encode('''sequence builders''' , add_special_tokens=_a )
__a = tokenizer.encode('''multi-sequence build''' , add_special_tokens=_a )
__a = tokenizer.build_inputs_with_special_tokens(_a )
__a = tokenizer.build_inputs_with_special_tokens(_a , _a )
assert encoded_sentence == [101] + text + [102]
assert encoded_pair == [101] + text + [102] + text_a + [102]
def __UpperCAmelCase ( self ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
__a = self.rust_tokenizer_class.from_pretrained(_a , **_a )
__a = f'''A, naïve {tokenizer_r.mask_token} AllenNLP sentence.'''
__a = tokenizer_r.encode_plus(
_a , return_attention_mask=_a , return_token_type_ids=_a , return_offsets_mapping=_a , add_special_tokens=_a , )
__a = tokenizer_r.do_lower_case if hasattr(_a , '''do_lower_case''' ) else False
__a = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), '''A'''),
((1, 2), ''','''),
((3, 5), '''na'''),
((5, 6), '''##ï'''),
((6, 8), '''##ve'''),
((9, 15), tokenizer_r.mask_token),
((16, 21), '''Allen'''),
((21, 23), '''##NL'''),
((23, 24), '''##P'''),
((25, 33), '''sentence'''),
((33, 34), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), '''a'''),
((1, 2), ''','''),
((3, 8), '''naive'''),
((9, 15), tokenizer_r.mask_token),
((16, 21), '''allen'''),
((21, 23), '''##nl'''),
((23, 24), '''##p'''),
((25, 33), '''sentence'''),
((33, 34), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['''input_ids'''] ) )
self.assertEqual([e[0] for e in expected_results] , tokens['''offset_mapping'''] )
def __UpperCAmelCase ( self ):
__a = ['''的''', '''人''', '''有''']
__a = ''''''.join(_a )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
__a = True
__a = self.tokenizer_class.from_pretrained(_a , **_a )
__a = self.rust_tokenizer_class.from_pretrained(_a , **_a )
__a = tokenizer_p.encode(_a , add_special_tokens=_a )
__a = tokenizer_r.encode(_a , add_special_tokens=_a )
__a = tokenizer_r.convert_ids_to_tokens(_a )
__a = tokenizer_p.convert_ids_to_tokens(_a )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(_a , _a )
self.assertListEqual(_a , _a )
__a = False
__a = self.rust_tokenizer_class.from_pretrained(_a , **_a )
__a = self.tokenizer_class.from_pretrained(_a , **_a )
__a = tokenizer_r.encode(_a , add_special_tokens=_a )
__a = tokenizer_p.encode(_a , add_special_tokens=_a )
__a = tokenizer_r.convert_ids_to_tokens(_a )
__a = tokenizer_p.convert_ids_to_tokens(_a )
# it is expected that only the first Chinese character is not preceded by "##".
__a = [
f'''##{token}''' if idx != 0 else token for idx, token in enumerate(_a )
]
self.assertListEqual(_a , _a )
self.assertListEqual(_a , _a )
| 695 | 0 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"BridgeTower/bridgetower-base": "https://huggingface.co/BridgeTower/bridgetower-base/blob/main/config.json",
"BridgeTower/bridgetower-base-itm-mlm": (
"https://huggingface.co/BridgeTower/bridgetower-base-itm-mlm/blob/main/config.json"
),
}
class lowercase ( __SCREAMING_SNAKE_CASE ):
_a = 'bridgetower_vision_model'
def __init__( self , _a=768 , _a=12 , _a=3 , _a=16 , _a=288 , _a=1 , _a=1e-05 , _a=False , _a=True , _a=False , **_a , ) -> Any:
super().__init__(**_a )
_A : List[Any] = hidden_size
_A : List[Any] = num_hidden_layers
_A : Tuple = num_channels
_A : Optional[int] = patch_size
_A : List[Any] = image_size
_A : Optional[Any] = initializer_factor
_A : Union[str, Any] = layer_norm_eps
_A : List[str] = stop_gradient
_A : Optional[int] = share_layernorm
_A : List[str] = remove_last_layer
@classmethod
def a__ ( cls , _a , **_a ) -> Optional[int]:
_A , _A : Tuple = cls.get_config_dict(_a , **_a )
if config_dict.get("""model_type""" ) == "bridgetower":
_A : Optional[Any] = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(_a , **_a )
class lowercase ( __SCREAMING_SNAKE_CASE ):
_a = 'bridgetower_text_model'
def __init__( self , _a=5_0265 , _a=768 , _a=12 , _a=12 , _a=1 , _a=3072 , _a="gelu" , _a=0.1 , _a=0.1 , _a=514 , _a=1 , _a=1e-05 , _a=1 , _a=0 , _a=2 , _a="absolute" , _a=True , **_a , ) -> Tuple:
super().__init__(**_a )
_A : Optional[Any] = vocab_size
_A : Any = hidden_size
_A : Any = num_hidden_layers
_A : Optional[Any] = num_attention_heads
_A : List[Any] = hidden_act
_A : List[Any] = initializer_factor
_A : str = intermediate_size
_A : Optional[Any] = hidden_dropout_prob
_A : Optional[int] = attention_probs_dropout_prob
_A : List[Any] = max_position_embeddings
_A : Union[str, Any] = type_vocab_size
_A : Any = layer_norm_eps
_A : Dict = position_embedding_type
_A : Tuple = use_cache
_A : Optional[Any] = pad_token_id
_A : List[Any] = bos_token_id
_A : Optional[Any] = eos_token_id
@classmethod
def a__ ( cls , _a , **_a ) -> Optional[int]:
_A , _A : Optional[int] = cls.get_config_dict(_a , **_a )
if config_dict.get("""model_type""" ) == "bridgetower":
_A : int = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(_a , **_a )
class lowercase ( __SCREAMING_SNAKE_CASE ):
_a = 'bridgetower'
def __init__( self , _a=True , _a="gelu" , _a=768 , _a=1 , _a=1e-05 , _a=False , _a="add" , _a=12 , _a=6 , _a=False , _a=False , _a=None , _a=None , **_a , ) -> Union[str, Any]:
# TODO: remove this once the Hub files are updated.
_A : int = kwargs.pop("""text_config_dict""" , _a )
_A : Any = kwargs.pop("""vision_config_dict""" , _a )
super().__init__(**_a )
_A : Optional[int] = share_cross_modal_transformer_layers
_A : Union[str, Any] = hidden_act
_A : Optional[int] = hidden_size
_A : Union[str, Any] = initializer_factor
_A : str = layer_norm_eps
_A : str = share_link_tower_layers
_A : Tuple = link_tower_type
_A : Union[str, Any] = num_attention_heads
_A : Dict = num_hidden_layers
_A : Optional[int] = tie_word_embeddings
_A : Union[str, Any] = init_layernorm_from_vision_encoder
if text_config is None:
_A : Optional[int] = {}
logger.info("""`text_config` is `None`. Initializing the `BridgeTowerTextConfig` with default values.""" )
if vision_config is None:
_A : Any = {}
logger.info("""`vision_config` is `None`. Initializing the `BridgeTowerVisionConfig` with default values.""" )
_A : Tuple = BridgeTowerTextConfig(**_a )
_A : Optional[Any] = BridgeTowerVisionConfig(**_a )
@classmethod
def a__ ( cls , _a , _a , **_a ) -> Any:
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **_a )
def a__ ( self ) -> Dict:
_A : Tuple = copy.deepcopy(self.__dict__ )
_A : Any = self.text_config.to_dict()
_A : Tuple = self.vision_config.to_dict()
_A : List[Any] = self.__class__.model_type
return output
| 307 |
"""simple docstring"""
from __future__ import annotations
def lowercase ( lowerCAmelCase__ : float , lowerCAmelCase__ : float , lowerCAmelCase__ : float ) -> float:
if days_between_payments <= 0:
raise ValueError('''days_between_payments must be > 0''' )
if daily_interest_rate < 0:
raise ValueError('''daily_interest_rate must be >= 0''' )
if principal <= 0:
raise ValueError('''principal must be > 0''' )
return principal * daily_interest_rate * days_between_payments
def lowercase ( lowerCAmelCase__ : float , lowerCAmelCase__ : float , lowerCAmelCase__ : float , ) -> float:
if number_of_compounding_periods <= 0:
raise ValueError('''number_of_compounding_periods must be > 0''' )
if nominal_annual_interest_rate_percentage < 0:
raise ValueError('''nominal_annual_interest_rate_percentage must be >= 0''' )
if principal <= 0:
raise ValueError('''principal must be > 0''' )
return principal * (
(1 + nominal_annual_interest_rate_percentage) ** number_of_compounding_periods
- 1
)
def lowercase ( lowerCAmelCase__ : float , lowerCAmelCase__ : float , lowerCAmelCase__ : float , ) -> float:
if number_of_years <= 0:
raise ValueError('''number_of_years must be > 0''' )
if nominal_annual_percentage_rate < 0:
raise ValueError('''nominal_annual_percentage_rate must be >= 0''' )
if principal <= 0:
raise ValueError('''principal must be > 0''' )
return compound_interest(
lowerCAmelCase__ , nominal_annual_percentage_rate / 365 , number_of_years * 365 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 695 | 0 |
"""simple docstring"""
def lowercase__ ( lowerCAmelCase : list ) -> list:
"""simple docstring"""
if len(lowerCAmelCase__ ) <= 1:
return lst
UpperCAmelCase = 1
while i < len(lowerCAmelCase__ ):
if lst[i - 1] <= lst[i]:
i += 1
else:
UpperCAmelCase , UpperCAmelCase = lst[i], lst[i - 1]
i -= 1
if i == 0:
UpperCAmelCase = 1
return lst
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = input('''Enter numbers separated by a comma:\n''').strip()
SCREAMING_SNAKE_CASE_ = [int(item) for item in user_input.split(''',''')]
print(gnome_sort(unsorted))
| 373 |
"""simple docstring"""
def lowercase ( lowerCAmelCase__ : Any , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Any=False ) -> Any:
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
__a = len(set_a.intersection(lowerCAmelCase__ ) )
if alternative_union:
__a = len(lowerCAmelCase__ ) + len(lowerCAmelCase__ )
else:
__a = len(set_a.union(lowerCAmelCase__ ) )
return intersection / union
if isinstance(lowerCAmelCase__ , (list, tuple) ) and isinstance(lowerCAmelCase__ , (list, tuple) ):
__a = [element for element in set_a if element in set_b]
if alternative_union:
__a = len(lowerCAmelCase__ ) + len(lowerCAmelCase__ )
return len(lowerCAmelCase__ ) / union
else:
__a = set_a + [element for element in set_b if element not in set_a]
return len(lowerCAmelCase__ ) / len(lowerCAmelCase__ )
return len(lowerCAmelCase__ ) / len(lowerCAmelCase__ )
return None
if __name__ == "__main__":
lowercase_ = {"a", "b", "c", "d", "e"}
lowercase_ = {"c", "d", "e", "f", "h", "i"}
print(jaccard_similarity(set_a, set_b))
| 695 | 0 |
'''simple docstring'''
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_herbert import HerbertTokenizer
__A : int = logging.get_logger(__name__)
__A : List[str] = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
__A : int = {
'vocab_file': {
'allegro/herbert-base-cased': 'https://huggingface.co/allegro/herbert-base-cased/resolve/main/vocab.json'
},
'merges_file': {
'allegro/herbert-base-cased': 'https://huggingface.co/allegro/herbert-base-cased/resolve/main/merges.txt'
},
}
__A : Dict = {'allegro/herbert-base-cased': 514}
__A : List[str] = {}
class __UpperCamelCase ( __SCREAMING_SNAKE_CASE ):
lowercase : Dict = VOCAB_FILES_NAMES
lowercase : List[str] = PRETRAINED_VOCAB_FILES_MAP
lowercase : Union[str, Any] = PRETRAINED_INIT_CONFIGURATION
lowercase : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase : List[Any] = HerbertTokenizer
def __init__( self :Optional[Any] ,_UpperCamelCase :Tuple=None ,_UpperCamelCase :List[Any]=None ,_UpperCamelCase :Dict=None ,_UpperCamelCase :Union[str, Any]="<s>" ,_UpperCamelCase :Any="<unk>" ,_UpperCamelCase :Optional[int]="<pad>" ,_UpperCamelCase :Union[str, Any]="<mask>" ,_UpperCamelCase :str="</s>" ,**_UpperCamelCase :Tuple ,):
super().__init__(
_a ,_a ,tokenizer_file=_a ,cls_token=_a ,unk_token=_a ,pad_token=_a ,mask_token=_a ,sep_token=_a ,**_a ,)
def a__ ( self :Union[str, Any] ,_UpperCamelCase :List[Any] ,_UpperCamelCase :List[Any] = None ):
snake_case_ : Union[str, Any] = [self.cls_token_id]
snake_case_ : List[str] = [self.sep_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def a__ ( self :Optional[int] ,_UpperCamelCase :int ,_UpperCamelCase :Optional[Any] = None ,_UpperCamelCase :List[Any] = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_a ,token_ids_a=_a ,already_has_special_tokens=_a )
if token_ids_a is None:
return [1] + ([0] * len(_a )) + [1]
return [1] + ([0] * len(_a )) + [1] + ([0] * len(_a )) + [1]
def a__ ( self :Optional[int] ,_UpperCamelCase :Tuple ,_UpperCamelCase :str = None ):
snake_case_ : Optional[int] = [self.sep_token_id]
snake_case_ : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def a__ ( self :Union[str, Any] ,_UpperCamelCase :Dict ,_UpperCamelCase :Optional[int] = None ):
snake_case_ : Dict = self._tokenizer.model.save(_a ,name=_a )
return tuple(_a ) | 334 |
"""simple docstring"""
from __future__ import annotations
import requests
def lowercase ( lowerCAmelCase__ : str ) -> dict:
__a = f'''https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty'''
return requests.get(lowerCAmelCase__ ).json()
def lowercase ( lowerCAmelCase__ : int = 10 ) -> list[dict]:
__a = '''https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty'''
__a = requests.get(lowerCAmelCase__ ).json()[:max_stories]
return [get_hackernews_story(lowerCAmelCase__ ) for story_id in story_ids]
def lowercase ( lowerCAmelCase__ : int = 10 ) -> str:
__a = hackernews_top_stories(lowerCAmelCase__ )
return "\n".join('''* [{title}]({url})'''.format(**lowerCAmelCase__ ) for story in stories )
if __name__ == "__main__":
print(hackernews_top_stories_as_markdown())
| 695 | 0 |
'''simple docstring'''
import unittest
from transformers import TrOCRConfig
from transformers.testing_utils import is_torch_available, require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.trocr.modeling_trocr import TrOCRDecoder, TrOCRForCausalLM
@require_torch
class UpperCAmelCase :
def __init__( self :Dict , lowercase_ :str , lowercase_ :Optional[Any]=99 , lowercase_ :int=13 , lowercase_ :List[Any]=16 , lowercase_ :List[Any]=7 , lowercase_ :Union[str, Any]=True , lowercase_ :Tuple=True , lowercase_ :str=True , lowercase_ :Optional[Any]=False , lowercase_ :Any=True , lowercase_ :Union[str, Any]=2 , lowercase_ :Optional[int]=32 , lowercase_ :List[str]=4 , lowercase_ :Tuple=4 , lowercase_ :Tuple=30 , lowercase_ :Union[str, Any]=0 , lowercase_ :Optional[Any]=1 , lowercase_ :List[Any]=2 , lowercase_ :str=None , )-> int:
A__ = parent
A__ = batch_size
A__ = decoder_seq_length
# For common tests
A__ = self.decoder_seq_length
A__ = is_training
A__ = use_attention_mask
A__ = use_labels
A__ = vocab_size
A__ = d_model
A__ = d_model
A__ = decoder_layers
A__ = decoder_layers
A__ = decoder_ffn_dim
A__ = decoder_attention_heads
A__ = decoder_attention_heads
A__ = eos_token_id
A__ = bos_token_id
A__ = pad_token_id
A__ = decoder_start_token_id
A__ = use_cache
A__ = max_position_embeddings
A__ = None
A__ = decoder_seq_length
A__ = 2
A__ = 1
def UpperCAmelCase_ ( self :int )-> Union[str, Any]:
A__ = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
A__ = None
if self.use_attention_mask:
A__ = ids_tensor([self.batch_size, self.decoder_seq_length] , vocab_size=2 )
A__ = None
if self.use_labels:
A__ = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
A__ = TrOCRConfig(
vocab_size=self.vocab_size , d_model=self.d_model , decoder_layers=self.decoder_layers , decoder_ffn_dim=self.decoder_ffn_dim , decoder_attention_heads=self.decoder_attention_heads , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , use_cache=self.use_cache , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , max_position_embeddings=self.max_position_embeddings , )
return (config, input_ids, attention_mask, lm_labels)
def UpperCAmelCase_ ( self :int , lowercase_ :int , lowercase_ :Any , lowercase_ :Tuple , lowercase_ :List[str] , )-> str:
A__ = True
A__ = TrOCRDecoder(config=_a ).to(_a ).eval()
A__ = input_ids[:2]
input_ids[input_ids == 0] += 1
# first forward pass
A__ = model(_a , use_cache=_a )
A__ = model(_a )
A__ = model(_a , use_cache=_a )
self.parent.assertTrue(len(_a ) == len(_a ) )
self.parent.assertTrue(len(_a ) == len(_a ) + 1 )
A__ = outputs["past_key_values"]
# create hypothetical next token and extent to next_input_ids
A__ = ids_tensor((2, 1) , config.vocab_size - 1 ) + 1
# append to next input_ids and
A__ = torch.cat([input_ids, next_tokens] , dim=-1 )
A__ = model(_a )["last_hidden_state"]
A__ = model(_a , past_key_values=_a )["last_hidden_state"]
# select random slice
A__ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
A__ = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach()
A__ = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
assert torch.allclose(_a , _a , atol=1E-3 )
def UpperCAmelCase_ ( self :Dict )-> Dict:
A__ = self.prepare_config_and_inputs()
A__, A__, A__, A__ = config_and_inputs
A__ = {"input_ids": input_ids, "attention_mask": attention_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
__lowercase = (TrOCRDecoder, TrOCRForCausalLM) if is_torch_available() else ()
__lowercase = (TrOCRForCausalLM,) if is_torch_available() else ()
__lowercase = {'text-generation': TrOCRForCausalLM} if is_torch_available() else {}
__lowercase = True
__lowercase = False
def UpperCAmelCase_ ( self :Any )-> Any:
A__ = TrOCRStandaloneDecoderModelTester(self , is_training=_a )
A__ = ConfigTester(self , config_class=_a )
def UpperCAmelCase_ ( self :Any )-> Union[str, Any]:
pass
def UpperCAmelCase_ ( self :str )-> List[str]:
pass
def UpperCAmelCase_ ( self :str )-> List[Any]:
pass
def UpperCAmelCase_ ( self :Any )-> Any:
self.config_tester.run_common_tests()
def UpperCAmelCase_ ( self :int )-> Optional[int]:
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past(*_a )
def UpperCAmelCase_ ( self :Optional[int] )-> List[Any]:
return
@unittest.skip("The model doesn\'t support left padding" ) # and it's not used enough to be worth fixing :)
def UpperCAmelCase_ ( self :str )-> Tuple:
pass
| 440 |
"""simple docstring"""
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"salesforce/blip2-opt-2.7b": "https://huggingface.co/salesforce/blip2-opt-2.7b/resolve/main/config.json",
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = 'blip_2_vision_model'
def __init__( self , _a=1_408 , _a=6_144 , _a=39 , _a=16 , _a=224 , _a=14 , _a="gelu" , _a=0.0_0001 , _a=0.0 , _a=1E-10 , _a=True , **_a , ):
super().__init__(**_a )
__a = hidden_size
__a = intermediate_size
__a = num_hidden_layers
__a = num_attention_heads
__a = patch_size
__a = image_size
__a = initializer_range
__a = attention_dropout
__a = layer_norm_eps
__a = hidden_act
__a = qkv_bias
@classmethod
def __UpperCAmelCase ( cls , _a , **_a ):
cls._set_token_in_kwargs(_a )
__a , __a = cls.get_config_dict(_a , **_a )
# get the vision config dict if we are loading from Blip2Config
if config_dict.get('''model_type''' ) == "blip-2":
__a = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(_a , **_a )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : str = 'blip_2_qformer'
def __init__( self , _a=30_522 , _a=768 , _a=12 , _a=12 , _a=3_072 , _a="gelu" , _a=0.1 , _a=0.1 , _a=512 , _a=0.02 , _a=1E-12 , _a=0 , _a="absolute" , _a=2 , _a=1_408 , **_a , ):
super().__init__(pad_token_id=_a , **_a )
__a = vocab_size
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = hidden_act
__a = intermediate_size
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = max_position_embeddings
__a = initializer_range
__a = layer_norm_eps
__a = position_embedding_type
__a = cross_attention_frequency
__a = encoder_hidden_size
@classmethod
def __UpperCAmelCase ( cls , _a , **_a ):
cls._set_token_in_kwargs(_a )
__a , __a = cls.get_config_dict(_a , **_a )
# get the qformer config dict if we are loading from Blip2Config
if config_dict.get('''model_type''' ) == "blip-2":
__a = config_dict['''qformer_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(_a , **_a )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : Any = 'blip-2'
__UpperCAmelCase : List[str] = True
def __init__( self , _a=None , _a=None , _a=None , _a=32 , **_a ):
super().__init__(**_a )
if vision_config is None:
__a = {}
logger.info('''vision_config is None. initializing the Blip2VisionConfig with default values.''' )
if qformer_config is None:
__a = {}
logger.info('''qformer_config is None. Initializing the Blip2QFormerConfig with default values.''' )
if text_config is None:
__a = {}
logger.info('''text_config is None. Initializing the text config with default values (`OPTConfig`).''' )
__a = BlipaVisionConfig(**_a )
__a = BlipaQFormerConfig(**_a )
__a = text_config['''model_type'''] if '''model_type''' in text_config else '''opt'''
__a = CONFIG_MAPPING[text_model_type](**_a )
__a = self.text_config.tie_word_embeddings
__a = self.text_config.is_encoder_decoder
__a = num_query_tokens
__a = self.vision_config.hidden_size
__a = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
__a = 1.0
__a = 0.02
@classmethod
def __UpperCAmelCase ( cls , _a , _a , _a , **_a , ):
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **_a , )
def __UpperCAmelCase ( self ):
__a = copy.deepcopy(self.__dict__ )
__a = self.vision_config.to_dict()
__a = self.qformer_config.to_dict()
__a = self.text_config.to_dict()
__a = self.__class__.model_type
return output
| 695 | 0 |
'''simple docstring'''
import os
from pathlib import Path
import numpy as np
import pytest
from pack_dataset import pack_data_dir
from parameterized import parameterized
from save_len_file import save_len_file
from torch.utils.data import DataLoader
from transformers import AutoTokenizer
from transformers.models.mbart.modeling_mbart import shift_tokens_right
from transformers.testing_utils import TestCasePlus, slow
from utils import FAIRSEQ_AVAILABLE, DistributedSortishSampler, LegacySeqaSeqDataset, SeqaSeqDataset
_UpperCAmelCase : Any = '''bert-base-cased'''
_UpperCAmelCase : Union[str, Any] = '''google/pegasus-xsum'''
_UpperCAmelCase : Optional[Any] = [''' Sam ate lunch today.''', '''Sams lunch ingredients.''']
_UpperCAmelCase : str = ['''A very interesting story about what I ate for lunch.''', '''Avocado, celery, turkey, coffee''']
_UpperCAmelCase : List[str] = '''patrickvonplaten/t5-tiny-random'''
_UpperCAmelCase : Union[str, Any] = '''sshleifer/bart-tiny-random'''
_UpperCAmelCase : Union[str, Any] = '''sshleifer/tiny-mbart'''
_UpperCAmelCase : Optional[Any] = '''sshleifer/tiny-marian-en-de'''
def _SCREAMING_SNAKE_CASE ( __snake_case : Path , __snake_case : list ):
_A = '\n'.join(lowerCAmelCase__ )
Path(lowerCAmelCase__ ).open('w' ).writelines(lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ( __snake_case : List[str] ):
for split in ["train", "val", "test"]:
_dump_articles(os.path.join(lowerCAmelCase__ , F'{split}.source' ) , lowerCAmelCase__ )
_dump_articles(os.path.join(lowerCAmelCase__ , F'{split}.target' ) , lowerCAmelCase__ )
return tmp_dir
class lowercase_ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
], )
@slow
def __UpperCAmelCase ( self : List[str], UpperCamelCase__ : Optional[int] ) -> List[Any]:
_A = AutoTokenizer.from_pretrained(_a )
_A = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
_A = max(len(tokenizer.encode(_a ) ) for a in ARTICLES )
_A = max(len(tokenizer.encode(_a ) ) for a in SUMMARIES )
_A = 4
_A = 8
assert max_len_target > max_src_len # Will be truncated
assert max_len_source > max_src_len # Will be truncated
_A , _A = 'ro_RO', 'de_DE' # ignored for all but mbart, but never causes error.
_A = SeqaSeqDataset(
_a, data_dir=_a, type_path='train', max_source_length=_a, max_target_length=_a, src_lang=_a, tgt_lang=_a, )
_A = DataLoader(_a, batch_size=2, collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert isinstance(_a, _a )
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_src_len
# show that targets are the same len
assert batch["labels"].shape[1] == max_tgt_len
if tok_name != MBART_TINY:
continue
# check language codes in correct place
_A = shift_tokens_right(batch['labels'], tokenizer.pad_token_id )
assert batch["decoder_input_ids"][0, 0].item() == tokenizer.lang_code_to_id[tgt_lang]
assert batch["decoder_input_ids"][0, -1].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -2].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -1].item() == tokenizer.lang_code_to_id[src_lang]
break # No need to test every batch
@parameterized.expand([BART_TINY, BERT_BASE_CASED] )
def __UpperCAmelCase ( self : List[Any], UpperCamelCase__ : List[str] ) -> Union[str, Any]:
_A = AutoTokenizer.from_pretrained(_a )
_A = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
_A = max(len(tokenizer.encode(_a ) ) for a in ARTICLES )
_A = max(len(tokenizer.encode(_a ) ) for a in SUMMARIES )
_A = 4
_A = LegacySeqaSeqDataset(
_a, data_dir=_a, type_path='train', max_source_length=20, max_target_length=_a, )
_A = DataLoader(_a, batch_size=2, collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_len_source
assert 20 >= batch["input_ids"].shape[1] # trimmed significantly
# show that targets were truncated
assert batch["labels"].shape[1] == trunc_target # Truncated
assert max_len_target > trunc_target # Truncated
break # No need to test every batch
def __UpperCAmelCase ( self : int ) -> Any:
_A = AutoTokenizer.from_pretrained('facebook/mbart-large-cc25' )
_A = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
_A = tmp_dir.joinpath('train.source' ).open().readlines()
_A = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
pack_data_dir(_a, _a, 1_28, _a )
_A = {x.name for x in tmp_dir.iterdir()}
_A = {x.name for x in save_dir.iterdir()}
_A = save_dir.joinpath('train.source' ).open().readlines()
# orig: [' Sam ate lunch today.\n', 'Sams lunch ingredients.']
# desired_packed: [' Sam ate lunch today.\n Sams lunch ingredients.']
assert len(_a ) < len(_a )
assert len(_a ) == 1
assert len(packed_examples[0] ) == sum(len(_a ) for x in orig_examples )
assert orig_paths == new_paths
@pytest.mark.skipif(not FAIRSEQ_AVAILABLE, reason='This test requires fairseq' )
def __UpperCAmelCase ( self : List[str] ) -> int:
if not FAIRSEQ_AVAILABLE:
return
_A , _A , _A = self._get_dataset(max_len=64 )
_A = 64
_A = ds.make_dynamic_sampler(_a, required_batch_size_multiple=_a )
_A = [len(_a ) for x in batch_sampler]
assert len(set(_a ) ) > 1 # it's not dynamic batch size if every batch is the same length
assert sum(_a ) == len(_a ) # no dropped or added examples
_A = DataLoader(_a, batch_sampler=_a, collate_fn=ds.collate_fn, num_workers=2 )
_A = []
_A = []
for batch in data_loader:
_A = batch['input_ids'].shape
_A = src_shape[0]
assert bs % required_batch_size_multiple == 0 or bs < required_batch_size_multiple
_A = np.product(batch['input_ids'].shape )
num_src_per_batch.append(_a )
if num_src_tokens > (max_tokens * 1.1):
failures.append(_a )
assert num_src_per_batch[0] == max(_a )
if failures:
raise AssertionError(f'too many tokens in {len(_a )} batches' )
def __UpperCAmelCase ( self : Any ) -> int:
_A , _A , _A = self._get_dataset(max_len=5_12 )
_A = 2
_A = ds.make_sortish_sampler(_a, shuffle=_a )
_A = DataLoader(_a, batch_size=_a, collate_fn=ds.collate_fn, num_workers=2 )
_A = DataLoader(_a, batch_size=_a, collate_fn=ds.collate_fn, num_workers=2, sampler=_a )
_A = tokenizer.pad_token_id
def count_pad_tokens(UpperCamelCase__ : List[Any], UpperCamelCase__ : List[str]="input_ids" ):
return [batch[k].eq(_a ).sum().item() for batch in data_loader]
assert sum(count_pad_tokens(_a, k='labels' ) ) < sum(count_pad_tokens(_a, k='labels' ) )
assert sum(count_pad_tokens(_a ) ) < sum(count_pad_tokens(_a ) )
assert len(_a ) == len(_a )
def __UpperCAmelCase ( self : int, UpperCamelCase__ : Dict=10_00, UpperCamelCase__ : Union[str, Any]=1_28 ) -> List[Any]:
if os.getenv('USE_REAL_DATA', _a ):
_A = 'examples/seq2seq/wmt_en_ro'
_A = max_len * 2 * 64
if not Path(_a ).joinpath('train.len' ).exists():
save_len_file(_a, _a )
else:
_A = 'examples/seq2seq/test_data/wmt_en_ro'
_A = max_len * 4
save_len_file(_a, _a )
_A = AutoTokenizer.from_pretrained(_a )
_A = SeqaSeqDataset(
_a, data_dir=_a, type_path='train', max_source_length=_a, max_target_length=_a, n_obs=_a, )
return ds, max_tokens, tokenizer
def __UpperCAmelCase ( self : Dict ) -> Optional[int]:
_A , _A , _A = self._get_dataset()
_A = set(DistributedSortishSampler(_a, 2_56, num_replicas=2, rank=0, add_extra_examples=_a ) )
_A = set(DistributedSortishSampler(_a, 2_56, num_replicas=2, rank=1, add_extra_examples=_a ) )
assert idsa.intersection(_a ) == set()
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
], )
def __UpperCAmelCase ( self : Tuple, UpperCamelCase__ : List[str] ) -> Tuple:
_A = AutoTokenizer.from_pretrained(_a, use_fast=_a )
if tok_name == MBART_TINY:
_A = SeqaSeqDataset(
_a, data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ), type_path='train', max_source_length=4, max_target_length=8, src_lang='EN', tgt_lang='FR', )
_A = train_dataset.dataset_kwargs
assert "src_lang" in kwargs and "tgt_lang" in kwargs
else:
_A = SeqaSeqDataset(
_a, data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ), type_path='train', max_source_length=4, max_target_length=8, )
_A = train_dataset.dataset_kwargs
assert "add_prefix_space" not in kwargs if tok_name != BART_TINY else "add_prefix_space" in kwargs
assert len(_a ) == 1 if tok_name == BART_TINY else len(_a ) == 0
| 107 |
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"microsoft/deberta-v2-xlarge": "https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json",
"microsoft/deberta-v2-xxlarge": "https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json",
"microsoft/deberta-v2-xlarge-mnli": (
"https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json"
),
"microsoft/deberta-v2-xxlarge-mnli": (
"https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json"
),
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : Dict = 'deberta-v2'
def __init__( self , _a=128_100 , _a=1_536 , _a=24 , _a=24 , _a=6_144 , _a="gelu" , _a=0.1 , _a=0.1 , _a=512 , _a=0 , _a=0.02 , _a=1E-7 , _a=False , _a=-1 , _a=0 , _a=True , _a=None , _a=0 , _a="gelu" , **_a , ):
super().__init__(**_a )
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = intermediate_size
__a = hidden_act
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = max_position_embeddings
__a = type_vocab_size
__a = initializer_range
__a = relative_attention
__a = max_relative_positions
__a = pad_token_id
__a = position_biased_input
# Backwards compatibility
if type(_a ) == str:
__a = [x.strip() for x in pos_att_type.lower().split('''|''' )]
__a = pos_att_type
__a = vocab_size
__a = layer_norm_eps
__a = kwargs.get('''pooler_hidden_size''' , _a )
__a = pooler_dropout
__a = pooler_hidden_act
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@property
def __UpperCAmelCase ( self ):
if self.task == "multiple-choice":
__a = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
__a = {0: '''batch''', 1: '''sequence'''}
if self._config.type_vocab_size > 0:
return OrderedDict(
[('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ('''token_type_ids''', dynamic_axis)] )
else:
return OrderedDict([('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis)] )
@property
def __UpperCAmelCase ( self ):
return 12
def __UpperCAmelCase ( self , _a , _a = -1 , _a = -1 , _a = -1 , _a = False , _a = None , _a = 3 , _a = 40 , _a = 40 , _a = None , ):
__a = super().generate_dummy_inputs(preprocessor=_a , framework=_a )
if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs:
del dummy_inputs["token_type_ids"]
return dummy_inputs
| 695 | 0 |
from timeit import timeit
a_ : List[Any] = {
'MALAYALAM': True,
'String': False,
'rotor': True,
'level': True,
'A': True,
'BB': True,
'ABC': False,
'amanaplanacanalpanama': True, # "a man a plan a canal panama"
}
# Ensure our test data is valid
assert all((key == key[::-1]) is value for key, value in test_data.items())
def __a ( __UpperCAmelCase ):
a__ = 0
a__ = len(lowerCAmelCase__ ) - 1
while start_i < end_i:
if s[start_i] == s[end_i]:
start_i += 1
end_i -= 1
else:
return False
return True
def __a ( __UpperCAmelCase ):
a__ = len(lowerCAmelCase__ ) // 2
a__ = len(lowerCAmelCase__ )
# We need to traverse till half of the length of string
# as we can get access of the i'th last element from
# i'th index.
# eg: [0,1,2,3,4,5] => 4th index can be accessed
# with the help of 1st index (i==n-i-1)
# where n is length of string
return all(s[i] == s[n - i - 1] for i in range(lowerCAmelCase__ ) )
def __a ( __UpperCAmelCase ):
if len(lowerCAmelCase__ ) <= 2:
return True
if s[0] == s[len(lowerCAmelCase__ ) - 1]:
return is_palindrome_recursive(s[1:-1] )
else:
return False
def __a ( __UpperCAmelCase ):
return s == s[::-1]
def __a ( __UpperCAmelCase ):
a__ = f"all({name}(key) is value for key, value in test_data.items())"
a__ = f"from __main__ import test_data, {name}"
a__ = 50_0000
a__ = timeit(stmt=lowerCAmelCase__ , setup=lowerCAmelCase__ , number=lowerCAmelCase__ )
print(f"{name:<35} finished {number:,} runs in {result:.5f} seconds" )
if __name__ == "__main__":
for key, value in test_data.items():
assert is_palindrome(key) is is_palindrome_recursive(key)
assert is_palindrome(key) is is_palindrome_slice(key)
print(f'{key:21} {value}')
print('a man a plan a canal panama')
# finished 500,000 runs in 0.46793 seconds
benchmark_function('is_palindrome_slice')
# finished 500,000 runs in 0.85234 seconds
benchmark_function('is_palindrome')
# finished 500,000 runs in 1.32028 seconds
benchmark_function('is_palindrome_recursive')
# finished 500,000 runs in 2.08679 seconds
benchmark_function('is_palindrome_traversal')
| 194 |
"""simple docstring"""
import importlib.metadata
import operator
import re
import sys
from typing import Optional
from packaging import version
lowercase_ = {
"<": operator.lt,
"<=": operator.le,
"==": operator.eq,
"!=": operator.ne,
">=": operator.ge,
">": operator.gt,
}
def lowercase ( lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : int , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Optional[Any] ) -> Dict:
if got_ver is None or want_ver is None:
raise ValueError(
f'''Unable to compare versions for {requirement}: need={want_ver} found={got_ver}. This is unusual. Consider'''
f''' reinstalling {pkg}.''' )
if not ops[op](version.parse(lowerCAmelCase__ ) , version.parse(lowerCAmelCase__ ) ):
raise ImportError(
f'''{requirement} is required for a normal functioning of this module, but found {pkg}=={got_ver}.{hint}''' )
def lowercase ( lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[str] = None ) -> None:
__a = f'''\n{hint}''' if hint is not None else ''''''
# non-versioned check
if re.match(r'''^[\w_\-\d]+$''' , lowerCAmelCase__ ):
__a , __a , __a = requirement, None, None
else:
__a = re.findall(r'''^([^!=<>\s]+)([\s!=<>]{1,2}.+)''' , lowerCAmelCase__ )
if not match:
raise ValueError(
'''requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23, but'''
f''' got {requirement}''' )
__a , __a = match[0]
__a = want_full.split(''',''' ) # there could be multiple requirements
__a = {}
for w in want_range:
__a = re.findall(r'''^([\s!=<>]{1,2})(.+)''' , lowerCAmelCase__ )
if not match:
raise ValueError(
'''requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23,'''
f''' but got {requirement}''' )
__a , __a = match[0]
__a = want_ver
if op not in ops:
raise ValueError(f'''{requirement}: need one of {list(ops.keys() )}, but got {op}''' )
# special case
if pkg == "python":
__a = '''.'''.join([str(lowerCAmelCase__ ) for x in sys.version_info[:3]] )
for op, want_ver in wanted.items():
_compare_versions(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
return
# check if any version is installed
try:
__a = importlib.metadata.version(lowerCAmelCase__ )
except importlib.metadata.PackageNotFoundError:
raise importlib.metadata.PackageNotFoundError(
f'''The \'{requirement}\' distribution was not found and is required by this application. {hint}''' )
# check that the right version is installed if version number or a range was provided
if want_ver is not None:
for op, want_ver in wanted.items():
_compare_versions(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def lowercase ( lowerCAmelCase__ : Tuple ) -> Optional[Any]:
__a = '''Try: pip install transformers -U or pip install -e \'.[dev]\' if you\'re working with git main'''
return require_version(lowerCAmelCase__ , lowerCAmelCase__ )
| 695 | 0 |
'''simple docstring'''
from __future__ import annotations
lowerCAmelCase_ : Tuple = list[tuple[int, int]]
lowerCAmelCase_ : int = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
lowerCAmelCase_ : Optional[int] = ([-1, 0], [0, -1], [1, 0], [0, 1]) # up, left, down, right
class SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Union[str, Any] , lowercase__ : Dict , lowercase__ : Optional[int] , lowercase__ : List[Any] , lowercase__ : Dict , lowercase__ : List[str] , lowercase__ : Optional[Any] , ) ->Union[str, Any]:
'''simple docstring'''
_UpperCamelCase : Optional[Any] = pos_x
_UpperCamelCase : List[Any] = pos_y
_UpperCamelCase : Any = (pos_y, pos_x)
_UpperCamelCase : Union[str, Any] = goal_x
_UpperCamelCase : Optional[Any] = goal_y
_UpperCamelCase : str = g_cost
_UpperCamelCase : Union[str, Any] = parent
_UpperCamelCase : str = self.calculate_heuristic()
def snake_case__ ( self : List[str] ) ->Tuple:
'''simple docstring'''
_UpperCamelCase : Union[str, Any] = abs(self.pos_x - self.goal_x )
_UpperCamelCase : Tuple = abs(self.pos_y - self.goal_y )
return dx + dy
def __lt__( self : Optional[Any] , lowercase__ : int ) ->Tuple:
'''simple docstring'''
return self.f_cost < other.f_cost
class SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Union[str, Any] , lowercase__ : Dict , lowercase__ : Tuple ) ->List[str]:
'''simple docstring'''
_UpperCamelCase : List[str] = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , _a )
_UpperCamelCase : str = Node(goal[1] , goal[0] , goal[1] , goal[0] , 99_999 , _a )
_UpperCamelCase : Optional[Any] = [self.start]
_UpperCamelCase : Tuple = []
_UpperCamelCase : Tuple = False
def snake_case__ ( self : Optional[int] ) ->Union[str, Any]:
'''simple docstring'''
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
_UpperCamelCase : Optional[Any] = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
_UpperCamelCase : Optional[Any] = True
return self.retrace_path(_a )
self.closed_nodes.append(_a )
_UpperCamelCase : Optional[int] = self.get_successors(_a )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(_a )
else:
# retrieve the best current path
_UpperCamelCase : Union[str, Any] = self.open_nodes.pop(self.open_nodes.index(_a ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(_a )
else:
self.open_nodes.append(_a )
if not self.reached:
return [self.start.pos]
return None
def snake_case__ ( self : Any , lowercase__ : Dict ) ->Optional[int]:
'''simple docstring'''
_UpperCamelCase : Dict = []
for action in delta:
_UpperCamelCase : Optional[Any] = parent.pos_x + action[1]
_UpperCamelCase : Union[str, Any] = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(_a ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
_a , _a , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , _a , ) )
return successors
def snake_case__ ( self : List[str] , lowercase__ : Optional[int] ) ->List[Any]:
'''simple docstring'''
_UpperCamelCase : Any = node
_UpperCamelCase : Any = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
_UpperCamelCase : Tuple = current_node.parent
path.reverse()
return path
if __name__ == "__main__":
lowerCAmelCase_ : List[str] = (0, 0)
lowerCAmelCase_ : Tuple = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
print("""------""")
lowerCAmelCase_ : Dict = GreedyBestFirst(init, goal)
lowerCAmelCase_ : Union[str, Any] = greedy_bf.search()
if path:
for pos_x, pos_y in path:
lowerCAmelCase_ : Tuple = 2
for elem in grid:
print(elem)
| 435 |
"""simple docstring"""
from __future__ import annotations
lowercase_ = list[tuple[int, int]]
lowercase_ = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
lowercase_ = ([-1, 0], [0, -1], [1, 0], [0, 1]) # up, left, down, right
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self , _a , _a , _a , _a , _a , _a , ):
__a = pos_x
__a = pos_y
__a = (pos_y, pos_x)
__a = goal_x
__a = goal_y
__a = g_cost
__a = parent
__a = self.calculate_heuristic()
def __UpperCAmelCase ( self ):
__a = abs(self.pos_x - self.goal_x )
__a = abs(self.pos_y - self.goal_y )
return dx + dy
def __lt__( self , _a ):
return self.f_cost < other.f_cost
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self , _a , _a ):
__a = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , _a )
__a = Node(goal[1] , goal[0] , goal[1] , goal[0] , 99_999 , _a )
__a = [self.start]
__a = []
__a = False
def __UpperCAmelCase ( self ):
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
__a = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
__a = True
return self.retrace_path(_a )
self.closed_nodes.append(_a )
__a = self.get_successors(_a )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(_a )
else:
# retrieve the best current path
__a = self.open_nodes.pop(self.open_nodes.index(_a ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(_a )
else:
self.open_nodes.append(_a )
if not self.reached:
return [self.start.pos]
return None
def __UpperCAmelCase ( self , _a ):
__a = []
for action in delta:
__a = parent.pos_x + action[1]
__a = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(_a ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
_a , _a , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , _a , ) )
return successors
def __UpperCAmelCase ( self , _a ):
__a = node
__a = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
__a = current_node.parent
path.reverse()
return path
if __name__ == "__main__":
lowercase_ = (0, 0)
lowercase_ = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
print("------")
lowercase_ = GreedyBestFirst(init, goal)
lowercase_ = greedy_bf.search()
if path:
for pos_x, pos_y in path:
lowercase_ = 2
for elem in grid:
print(elem)
| 695 | 0 |
'''simple docstring'''
import os
import string
import sys
snake_case_ : Any = 1 << 8
snake_case_ : Optional[Any] = {
'''tab''': ord('''\t'''),
'''newline''': ord('''\r'''),
'''esc''': 27,
'''up''': 65 + ARROW_KEY_FLAG,
'''down''': 66 + ARROW_KEY_FLAG,
'''right''': 67 + ARROW_KEY_FLAG,
'''left''': 68 + ARROW_KEY_FLAG,
'''mod_int''': 91,
'''undefined''': sys.maxsize,
'''interrupt''': 3,
'''insert''': 50,
'''delete''': 51,
'''pg_up''': 53,
'''pg_down''': 54,
}
snake_case_ : List[str] = KEYMAP['''up''']
snake_case_ : int = KEYMAP['''left''']
if sys.platform == "win32":
snake_case_ : Tuple = []
snake_case_ : Optional[int] = {
B'''\xe0H''': KEYMAP['''up'''] - ARROW_KEY_FLAG,
B'''\x00H''': KEYMAP['''up'''] - ARROW_KEY_FLAG,
B'''\xe0P''': KEYMAP['''down'''] - ARROW_KEY_FLAG,
B'''\x00P''': KEYMAP['''down'''] - ARROW_KEY_FLAG,
B'''\xe0M''': KEYMAP['''right'''] - ARROW_KEY_FLAG,
B'''\x00M''': KEYMAP['''right'''] - ARROW_KEY_FLAG,
B'''\xe0K''': KEYMAP['''left'''] - ARROW_KEY_FLAG,
B'''\x00K''': KEYMAP['''left'''] - ARROW_KEY_FLAG,
}
for i in range(10):
snake_case_ : str = ord(str(i))
def lowercase__( )-> Dict:
"""simple docstring"""
if os.name == "nt":
import msvcrt
_UpperCamelCase = "mbcs"
# Flush the keyboard buffer
while msvcrt.kbhit():
msvcrt.getch()
if len(lowerCAmelCase__ ) == 0:
# Read the keystroke
_UpperCamelCase = msvcrt.getch()
# If it is a prefix char, get second part
if ch in (b"\x00", b"\xe0"):
_UpperCamelCase = ch + msvcrt.getch()
# Translate actual Win chars to bullet char types
try:
_UpperCamelCase = chr(WIN_KEYMAP[cha] )
WIN_CH_BUFFER.append(chr(KEYMAP["mod_int"] ) )
WIN_CH_BUFFER.append(lowerCAmelCase__ )
if ord(lowerCAmelCase__ ) in (
KEYMAP["insert"] - 1 << 9,
KEYMAP["delete"] - 1 << 9,
KEYMAP["pg_up"] - 1 << 9,
KEYMAP["pg_down"] - 1 << 9,
):
WIN_CH_BUFFER.append(chr(126 ) )
_UpperCamelCase = chr(KEYMAP["esc"] )
except KeyError:
_UpperCamelCase = cha[1]
else:
_UpperCamelCase = ch.decode(lowerCAmelCase__ )
else:
_UpperCamelCase = WIN_CH_BUFFER.pop(0 )
elif os.name == "posix":
import termios
import tty
_UpperCamelCase = sys.stdin.fileno()
_UpperCamelCase = termios.tcgetattr(lowerCAmelCase__ )
try:
tty.setraw(lowerCAmelCase__ )
_UpperCamelCase = sys.stdin.read(1 )
finally:
termios.tcsetattr(lowerCAmelCase__ , termios.TCSADRAIN , lowerCAmelCase__ )
return ch
def lowercase__( )-> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = get_raw_chars()
if ord(lowerCAmelCase__ ) in [KEYMAP["interrupt"], KEYMAP["newline"]]:
return char
elif ord(lowerCAmelCase__ ) == KEYMAP["esc"]:
_UpperCamelCase = get_raw_chars()
if ord(lowerCAmelCase__ ) == KEYMAP["mod_int"]:
_UpperCamelCase = get_raw_chars()
if ord(lowerCAmelCase__ ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(lowerCAmelCase__ ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG:
return chr(ord(lowerCAmelCase__ ) + ARROW_KEY_FLAG )
else:
return KEYMAP["undefined"]
else:
return get_raw_chars()
else:
if char in string.printable:
return char
else:
return KEYMAP["undefined"]
| 138 |
"""simple docstring"""
import argparse
import torch
from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert
from transformers.utils import logging
logging.set_verbosity_info()
def lowercase ( lowerCAmelCase__ : Any , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : str ) -> List[Any]:
# Initialise PyTorch model
__a = RemBertConfig.from_json_file(lowerCAmelCase__ )
print('''Building PyTorch model from configuration: {}'''.format(str(lowerCAmelCase__ ) ) )
__a = RemBertModel(lowerCAmelCase__ )
# Load weights from tf checkpoint
load_tf_weights_in_rembert(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# Save pytorch-model
print('''Save PyTorch model to {}'''.format(lowerCAmelCase__ ) )
torch.save(model.state_dict() , lowerCAmelCase__ )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--rembert_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained RemBERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
lowercase_ = parser.parse_args()
convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path)
| 695 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCAmelCase_ : Union[str, Any] = {
'''configuration_m2m_100''': ['''M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''M2M100Config''', '''M2M100OnnxConfig'''],
'''tokenization_m2m_100''': ['''M2M100Tokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Dict = [
'''M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''M2M100ForConditionalGeneration''',
'''M2M100Model''',
'''M2M100PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mam_aaa import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, MaMaaaConfig, MaMaaaOnnxConfig
from .tokenization_mam_aaa import MaMaaaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mam_aaa import (
M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST,
MaMaaaForConditionalGeneration,
MaMaaaModel,
MaMaaaPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 255 |
"""simple docstring"""
import tempfile
import unittest
import numpy as np
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_flax
if is_flax_available():
import os
from flax.core.frozen_dict import unfreeze
from flax.traverse_util import flatten_dict
from transformers import FlaxBertModel
lowercase_ = "0.12" # assumed parallelism: 8
@require_flax
@is_staging_test
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def __UpperCAmelCase ( cls ):
__a = TOKEN
HfFolder.save_token(_a )
@classmethod
def __UpperCAmelCase ( cls ):
try:
delete_repo(token=cls._token , repo_id='''test-model-flax''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-model-flax-org''' )
except HTTPError:
pass
def __UpperCAmelCase ( self ):
__a = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
__a = FlaxBertModel(_a )
model.push_to_hub('''test-model-flax''' , use_auth_token=self._token )
__a = FlaxBertModel.from_pretrained(f'''{USER}/test-model-flax''' )
__a = flatten_dict(unfreeze(model.params ) )
__a = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
__a = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_a , 1E-3 , msg=f'''{key} not identical''' )
# Reset repo
delete_repo(token=self._token , repo_id='''test-model-flax''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(_a , repo_id='''test-model-flax''' , push_to_hub=_a , use_auth_token=self._token )
__a = FlaxBertModel.from_pretrained(f'''{USER}/test-model-flax''' )
__a = flatten_dict(unfreeze(model.params ) )
__a = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
__a = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_a , 1E-3 , msg=f'''{key} not identical''' )
def __UpperCAmelCase ( self ):
__a = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
__a = FlaxBertModel(_a )
model.push_to_hub('''valid_org/test-model-flax-org''' , use_auth_token=self._token )
__a = FlaxBertModel.from_pretrained('''valid_org/test-model-flax-org''' )
__a = flatten_dict(unfreeze(model.params ) )
__a = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
__a = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_a , 1E-3 , msg=f'''{key} not identical''' )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-model-flax-org''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(
_a , repo_id='''valid_org/test-model-flax-org''' , push_to_hub=_a , use_auth_token=self._token )
__a = FlaxBertModel.from_pretrained('''valid_org/test-model-flax-org''' )
__a = flatten_dict(unfreeze(model.params ) )
__a = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
__a = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_a , 1E-3 , msg=f'''{key} not identical''' )
def lowercase ( lowerCAmelCase__ : str , lowerCAmelCase__ : Dict ) -> Optional[int]:
__a = True
__a = flatten_dict(modela.params )
__a = flatten_dict(modela.params )
for key in flat_params_a.keys():
if np.sum(np.abs(flat_params_a[key] - flat_params_a[key] ) ) > 1e-4:
__a = False
return models_are_equal
@require_flax
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self ):
__a = BertConfig.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' )
__a = FlaxBertModel(_a )
__a = '''bert'''
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(_a , _a ) )
with self.assertRaises(_a ):
__a = FlaxBertModel.from_pretrained(_a )
__a = FlaxBertModel.from_pretrained(_a , subfolder=_a )
self.assertTrue(check_models_equal(_a , _a ) )
def __UpperCAmelCase ( self ):
__a = BertConfig.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' )
__a = FlaxBertModel(_a )
__a = '''bert'''
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(_a , _a ) , max_shard_size='''10KB''' )
with self.assertRaises(_a ):
__a = FlaxBertModel.from_pretrained(_a )
__a = FlaxBertModel.from_pretrained(_a , subfolder=_a )
self.assertTrue(check_models_equal(_a , _a ) )
def __UpperCAmelCase ( self ):
__a = '''bert'''
__a = '''hf-internal-testing/tiny-random-bert-subfolder'''
with self.assertRaises(_a ):
__a = FlaxBertModel.from_pretrained(_a )
__a = FlaxBertModel.from_pretrained(_a , subfolder=_a )
self.assertIsNotNone(_a )
def __UpperCAmelCase ( self ):
__a = '''bert'''
__a = '''hf-internal-testing/tiny-random-bert-sharded-subfolder'''
with self.assertRaises(_a ):
__a = FlaxBertModel.from_pretrained(_a )
__a = FlaxBertModel.from_pretrained(_a , subfolder=_a )
self.assertIsNotNone(_a )
| 695 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
lowercase : Union[str, Any] = {
"""configuration_xlm""": ["""XLM_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XLMConfig""", """XLMOnnxConfig"""],
"""tokenization_xlm""": ["""XLMTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Any = [
"""XLM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XLMForMultipleChoice""",
"""XLMForQuestionAnswering""",
"""XLMForQuestionAnsweringSimple""",
"""XLMForSequenceClassification""",
"""XLMForTokenClassification""",
"""XLMModel""",
"""XLMPreTrainedModel""",
"""XLMWithLMHeadModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Tuple = [
"""TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFXLMForMultipleChoice""",
"""TFXLMForQuestionAnsweringSimple""",
"""TFXLMForSequenceClassification""",
"""TFXLMForTokenClassification""",
"""TFXLMMainLayer""",
"""TFXLMModel""",
"""TFXLMPreTrainedModel""",
"""TFXLMWithLMHeadModel""",
]
if TYPE_CHECKING:
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
from .tokenization_xlm import XLMTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
else:
import sys
lowercase : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 116 |
"""simple docstring"""
import unittest
from diffusers.models.unet_ad_blocks import * # noqa F403
from diffusers.utils import torch_device
from .test_unet_blocks_common import UNetBlockTesterMixin
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = DownBlockaD # noqa F405
__UpperCAmelCase : Any = 'down'
def __UpperCAmelCase ( self ):
__a = [-0.0232, -0.9869, 0.8054, -0.0637, -0.1688, -1.4264, 0.4470, -1.3394, 0.0904]
super().test_output(_a )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : str = ResnetDownsampleBlockaD # noqa F405
__UpperCAmelCase : List[str] = 'down'
def __UpperCAmelCase ( self ):
__a = [0.0710, 0.2410, -0.7320, -1.0757, -1.1343, 0.3540, -0.0133, -0.2576, 0.0948]
super().test_output(_a )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] = AttnDownBlockaD # noqa F405
__UpperCAmelCase : Optional[Any] = 'down'
def __UpperCAmelCase ( self ):
__a = [0.0636, 0.8964, -0.6234, -1.0131, 0.0844, 0.4935, 0.3437, 0.0911, -0.2957]
super().test_output(_a )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = CrossAttnDownBlockaD # noqa F405
__UpperCAmelCase : Optional[Any] = 'down'
def __UpperCAmelCase ( self ):
__a , __a = super().prepare_init_args_and_inputs_for_common()
__a = 32
return init_dict, inputs_dict
def __UpperCAmelCase ( self ):
__a = [0.2238, -0.7396, -0.2255, -0.3829, 0.1925, 1.1665, 0.0603, -0.7295, 0.1983]
super().test_output(_a )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : int = SimpleCrossAttnDownBlockaD # noqa F405
__UpperCAmelCase : Any = 'down'
@property
def __UpperCAmelCase ( self ):
return super().get_dummy_input(include_encoder_hidden_states=_a )
def __UpperCAmelCase ( self ):
__a , __a = super().prepare_init_args_and_inputs_for_common()
__a = 32
return init_dict, inputs_dict
@unittest.skipIf(torch_device == '''mps''' , '''MPS result is not consistent''' )
def __UpperCAmelCase ( self ):
__a = [0.7921, -0.0992, -0.1962, -0.7695, -0.4242, 0.7804, 0.4737, 0.2765, 0.3338]
super().test_output(_a )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : int = SkipDownBlockaD # noqa F405
__UpperCAmelCase : Tuple = 'down'
@property
def __UpperCAmelCase ( self ):
return super().get_dummy_input(include_skip_sample=_a )
def __UpperCAmelCase ( self ):
__a = [-0.0845, -0.2087, -0.2465, 0.0971, 0.1900, -0.0484, 0.2664, 0.4179, 0.5069]
super().test_output(_a )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = AttnSkipDownBlockaD # noqa F405
__UpperCAmelCase : Optional[int] = 'down'
@property
def __UpperCAmelCase ( self ):
return super().get_dummy_input(include_skip_sample=_a )
def __UpperCAmelCase ( self ):
__a = [0.5539, 0.1609, 0.4924, 0.0537, -0.1995, 0.4050, 0.0979, -0.2721, -0.0642]
super().test_output(_a )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : int = DownEncoderBlockaD # noqa F405
__UpperCAmelCase : Optional[int] = 'down'
@property
def __UpperCAmelCase ( self ):
return super().get_dummy_input(include_temb=_a )
def __UpperCAmelCase ( self ):
__a = {
'''in_channels''': 32,
'''out_channels''': 32,
}
__a = self.dummy_input
return init_dict, inputs_dict
def __UpperCAmelCase ( self ):
__a = [1.1102, 0.5302, 0.4872, -0.0023, -0.8042, 0.0483, -0.3489, -0.5632, 0.7626]
super().test_output(_a )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = AttnDownEncoderBlockaD # noqa F405
__UpperCAmelCase : Any = 'down'
@property
def __UpperCAmelCase ( self ):
return super().get_dummy_input(include_temb=_a )
def __UpperCAmelCase ( self ):
__a = {
'''in_channels''': 32,
'''out_channels''': 32,
}
__a = self.dummy_input
return init_dict, inputs_dict
def __UpperCAmelCase ( self ):
__a = [0.8966, -0.1486, 0.8568, 0.8141, -0.9046, -0.1342, -0.0972, -0.7417, 0.1538]
super().test_output(_a )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : str = UNetMidBlockaD # noqa F405
__UpperCAmelCase : Any = 'mid'
def __UpperCAmelCase ( self ):
__a = {
'''in_channels''': 32,
'''temb_channels''': 128,
}
__a = self.dummy_input
return init_dict, inputs_dict
def __UpperCAmelCase ( self ):
__a = [-0.1062, 1.7248, 0.3494, 1.4569, -0.0910, -1.2421, -0.9984, 0.6736, 1.0028]
super().test_output(_a )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : str = UNetMidBlockaDCrossAttn # noqa F405
__UpperCAmelCase : str = 'mid'
def __UpperCAmelCase ( self ):
__a , __a = super().prepare_init_args_and_inputs_for_common()
__a = 32
return init_dict, inputs_dict
def __UpperCAmelCase ( self ):
__a = [0.0187, 2.4220, 0.4484, 1.1203, -0.6121, -1.5122, -0.8270, 0.7851, 1.8335]
super().test_output(_a )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : Any = UNetMidBlockaDSimpleCrossAttn # noqa F405
__UpperCAmelCase : List[Any] = 'mid'
@property
def __UpperCAmelCase ( self ):
return super().get_dummy_input(include_encoder_hidden_states=_a )
def __UpperCAmelCase ( self ):
__a , __a = super().prepare_init_args_and_inputs_for_common()
__a = 32
return init_dict, inputs_dict
def __UpperCAmelCase ( self ):
__a = [0.7143, 1.9974, 0.5448, 1.3977, 0.1282, -1.1237, -1.4238, 0.5530, 0.8880]
super().test_output(_a )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = UpBlockaD # noqa F405
__UpperCAmelCase : Union[str, Any] = 'up'
@property
def __UpperCAmelCase ( self ):
return super().get_dummy_input(include_res_hidden_states_tuple=_a )
def __UpperCAmelCase ( self ):
__a = [-0.2041, -0.4165, -0.3022, 0.0041, -0.6628, -0.7053, 0.1928, -0.0325, 0.0523]
super().test_output(_a )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : str = ResnetUpsampleBlockaD # noqa F405
__UpperCAmelCase : int = 'up'
@property
def __UpperCAmelCase ( self ):
return super().get_dummy_input(include_res_hidden_states_tuple=_a )
def __UpperCAmelCase ( self ):
__a = [0.2287, 0.3549, -0.1346, 0.4797, -0.1715, -0.9649, 0.7305, -0.5864, -0.6244]
super().test_output(_a )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : Dict = CrossAttnUpBlockaD # noqa F405
__UpperCAmelCase : List[Any] = 'up'
@property
def __UpperCAmelCase ( self ):
return super().get_dummy_input(include_res_hidden_states_tuple=_a )
def __UpperCAmelCase ( self ):
__a , __a = super().prepare_init_args_and_inputs_for_common()
__a = 32
return init_dict, inputs_dict
def __UpperCAmelCase ( self ):
__a = [-0.1403, -0.3515, -0.0420, -0.1425, 0.3167, 0.5094, -0.2181, 0.5931, 0.5582]
super().test_output(_a )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = SimpleCrossAttnUpBlockaD # noqa F405
__UpperCAmelCase : Optional[int] = 'up'
@property
def __UpperCAmelCase ( self ):
return super().get_dummy_input(include_res_hidden_states_tuple=_a , include_encoder_hidden_states=_a )
def __UpperCAmelCase ( self ):
__a , __a = super().prepare_init_args_and_inputs_for_common()
__a = 32
return init_dict, inputs_dict
def __UpperCAmelCase ( self ):
__a = [0.2645, 0.1480, 0.0909, 0.8044, -0.9758, -0.9083, 0.0994, -1.1453, -0.7402]
super().test_output(_a )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : Any = AttnUpBlockaD # noqa F405
__UpperCAmelCase : List[Any] = 'up'
@property
def __UpperCAmelCase ( self ):
return super().get_dummy_input(include_res_hidden_states_tuple=_a )
@unittest.skipIf(torch_device == '''mps''' , '''MPS result is not consistent''' )
def __UpperCAmelCase ( self ):
__a = [0.0979, 0.1326, 0.0021, 0.0659, 0.2249, 0.0059, 0.1132, 0.5952, 0.1033]
super().test_output(_a )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : Any = SkipUpBlockaD # noqa F405
__UpperCAmelCase : str = 'up'
@property
def __UpperCAmelCase ( self ):
return super().get_dummy_input(include_res_hidden_states_tuple=_a )
def __UpperCAmelCase ( self ):
__a = [-0.0893, -0.1234, -0.1506, -0.0332, 0.0123, -0.0211, 0.0566, 0.0143, 0.0362]
super().test_output(_a )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = AttnSkipUpBlockaD # noqa F405
__UpperCAmelCase : int = 'up'
@property
def __UpperCAmelCase ( self ):
return super().get_dummy_input(include_res_hidden_states_tuple=_a )
def __UpperCAmelCase ( self ):
__a = [0.0361, 0.0617, 0.2787, -0.0350, 0.0342, 0.3421, -0.0843, 0.0913, 0.3015]
super().test_output(_a )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = UpDecoderBlockaD # noqa F405
__UpperCAmelCase : List[str] = 'up'
@property
def __UpperCAmelCase ( self ):
return super().get_dummy_input(include_temb=_a )
def __UpperCAmelCase ( self ):
__a = {'''in_channels''': 32, '''out_channels''': 32}
__a = self.dummy_input
return init_dict, inputs_dict
def __UpperCAmelCase ( self ):
__a = [0.4404, 0.1998, -0.9886, -0.3320, -0.3128, -0.7034, -0.6955, -0.2338, -0.3137]
super().test_output(_a )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] = AttnUpDecoderBlockaD # noqa F405
__UpperCAmelCase : Any = 'up'
@property
def __UpperCAmelCase ( self ):
return super().get_dummy_input(include_temb=_a )
def __UpperCAmelCase ( self ):
__a = {'''in_channels''': 32, '''out_channels''': 32}
__a = self.dummy_input
return init_dict, inputs_dict
def __UpperCAmelCase ( self ):
__a = [0.6738, 0.4491, 0.1055, 1.0710, 0.7316, 0.3339, 0.3352, 0.1023, 0.3568]
super().test_output(_a )
| 695 | 0 |
"""simple docstring"""
import numpy as np
def lowercase_ ( _lowercase : Optional[int] , _lowercase : List[Any] , _lowercase : int , _lowercase : List[str] , _lowercase : Any ):
'''simple docstring'''
UpperCAmelCase : List[str] = int(np.ceil((x_end - xa) / h ) )
UpperCAmelCase : Tuple = np.zeros((n + 1,) )
UpperCAmelCase : List[str] = ya
UpperCAmelCase : Optional[int] = xa
for k in range(lowerCAmelCase__ ):
UpperCAmelCase : Optional[int] = f(lowerCAmelCase__ , y[k] )
UpperCAmelCase : List[Any] = f(x + 0.5 * h , y[k] + 0.5 * h * ka )
UpperCAmelCase : Union[str, Any] = f(x + 0.5 * h , y[k] + 0.5 * h * ka )
UpperCAmelCase : Union[str, Any] = f(x + h , y[k] + h * ka )
UpperCAmelCase : Tuple = y[k] + (1 / 6) * h * (ka + 2 * ka + 2 * ka + ka)
x += h
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 595 |
"""simple docstring"""
import copy
from typing import Dict, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
from ..detr import DetrConfig
from ..swin import SwinConfig
lowercase_ = {
"facebook/maskformer-swin-base-ade": (
"https://huggingface.co/facebook/maskformer-swin-base-ade/blob/main/config.json"
)
# See all MaskFormer models at https://huggingface.co/models?filter=maskformer
}
lowercase_ = logging.get_logger(__name__)
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : str = 'maskformer'
__UpperCAmelCase : Optional[int] = {'hidden_size': 'mask_feature_size'}
__UpperCAmelCase : Any = ['resnet', 'swin']
__UpperCAmelCase : Dict = ['detr']
def __init__( self , _a = 256 , _a = 256 , _a = 0.1 , _a = False , _a = None , _a = None , _a = 0.02 , _a = 1.0 , _a = 1.0 , _a = 1.0 , _a = 20.0 , _a = None , **_a , ):
if backbone_config is None:
# fall back to https://huggingface.co/microsoft/swin-base-patch4-window12-384-in22k
__a = SwinConfig(
image_size=384 , in_channels=3 , patch_size=4 , embed_dim=128 , depths=[2, 2, 18, 2] , num_heads=[4, 8, 16, 32] , window_size=12 , drop_path_rate=0.3 , out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] , )
if isinstance(_a , _a ):
__a = backbone_config.pop('''model_type''' )
__a = CONFIG_MAPPING[backbone_model_type]
__a = config_class.from_dict(_a )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
f'''Backbone {backbone_config.model_type} is not a supported model and may not be compatible with MaskFormer. '''
f'''Supported model types: {','.join(self.backbones_supported )}''' )
if decoder_config is None:
# fall back to https://huggingface.co/facebook/detr-resnet-50
__a = DetrConfig()
else:
# verify that the decoder is supported
__a = (
decoder_config.pop('''model_type''' ) if isinstance(_a , _a ) else decoder_config.model_type
)
if decoder_type not in self.decoders_supported:
raise ValueError(
f'''Transformer Decoder {decoder_type} not supported, please use one of'''
f''' {','.join(self.decoders_supported )}''' )
if isinstance(_a , _a ):
__a = CONFIG_MAPPING[decoder_type]
__a = config_class.from_dict(_a )
__a = backbone_config
__a = decoder_config
# main feature dimension for the model
__a = fpn_feature_size
__a = mask_feature_size
# initializer
__a = init_std
__a = init_xavier_std
# Hungarian matcher && loss
__a = cross_entropy_weight
__a = dice_weight
__a = mask_weight
__a = use_auxiliary_loss
__a = no_object_weight
__a = output_auxiliary_logits
__a = self.decoder_config.encoder_attention_heads
__a = self.decoder_config.num_hidden_layers
super().__init__(**_a )
@classmethod
def __UpperCAmelCase ( cls , _a , _a , **_a ):
return cls(
backbone_config=_a , decoder_config=_a , **_a , )
def __UpperCAmelCase ( self ):
__a = copy.deepcopy(self.__dict__ )
__a = self.backbone_config.to_dict()
__a = self.decoder_config.to_dict()
__a = self.__class__.model_type
return output
| 695 | 0 |
import torch
from diffusers import StableDiffusionPipeline
_snake_case = "path-to-your-trained-model"
_snake_case = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.floataa).to("cuda")
_snake_case = "A photo of sks dog in a bucket"
_snake_case = pipe(prompt, num_inference_steps=50, guidance_scale=7.5).images[0]
image.save("dog-bucket.png")
| 307 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Generator
import requests
from bsa import BeautifulSoup
lowercase_ = "https://www.indeed.co.in/jobs?q=mobile+app+development&l="
def lowercase ( lowerCAmelCase__ : str = "mumbai" ) -> Generator[tuple[str, str], None, None]:
__a = BeautifulSoup(requests.get(url + location ).content , '''html.parser''' )
# This attribute finds out all the specifics listed in a job
for job in soup.find_all('''div''' , attrs={'''data-tn-component''': '''organicJob'''} ):
__a = job.find('''a''' , attrs={'''data-tn-element''': '''jobTitle'''} ).text.strip()
__a = job.find('''span''' , {'''class''': '''company'''} ).text.strip()
yield job_title, company_name
if __name__ == "__main__":
for i, job in enumerate(fetch_jobs("Bangalore"), 1):
print(F'''Job {i:>2} is {job[0]} at {job[1]}''')
| 695 | 0 |
"""simple docstring"""
import copy
import importlib.metadata
import json
import os
from dataclasses import dataclass
from typing import Any, Dict, Union
from packaging import version
from ..utils import is_torch_available, logging
if is_torch_available():
import torch
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
@dataclass
class _UpperCAmelCase :
def __init__( self , lowercase_=False , lowercase_=False , lowercase_=6.0 , lowercase_=None , lowercase_=False , lowercase_=False , lowercase_=None , lowercase_="fp4" , lowercase_=False , **lowercase_ , ) -> List[Any]:
UpperCAmelCase = load_in_abit
UpperCAmelCase = load_in_abit
UpperCAmelCase = llm_inta_threshold
UpperCAmelCase = llm_inta_skip_modules
UpperCAmelCase = llm_inta_enable_fpaa_cpu_offload
UpperCAmelCase = llm_inta_has_fpaa_weight
UpperCAmelCase = bnb_abit_quant_type
UpperCAmelCase = bnb_abit_use_double_quant
if bnb_abit_compute_dtype is None:
UpperCAmelCase = torch.floataa
elif isinstance(_a , _a ):
UpperCAmelCase = getattr(_a , _a )
elif isinstance(_a , torch.dtype ):
UpperCAmelCase = bnb_abit_compute_dtype
else:
raise ValueError('bnb_4bit_compute_dtype must be a string or a torch.dtype' )
self.post_init()
def a_ ( self ) -> int:
if not isinstance(self.llm_inta_threshold , _a ):
raise ValueError('llm_int8_threshold must be a float' )
if self.llm_inta_skip_modules is not None and not isinstance(self.llm_inta_skip_modules , _a ):
raise ValueError('llm_int8_skip_modules must be a list of strings' )
if not isinstance(self.llm_inta_enable_fpaa_cpu_offload , _a ):
raise ValueError('llm_int8_enable_fp32_cpu_offload must be a boolean' )
if not isinstance(self.llm_inta_has_fpaa_weight , _a ):
raise ValueError('llm_int8_has_fp16_weight must be a boolean' )
if self.bnb_abit_compute_dtype is not None and not isinstance(self.bnb_abit_compute_dtype , torch.dtype ):
raise ValueError('bnb_4bit_compute_dtype must be torch.dtype' )
if not isinstance(self.bnb_abit_quant_type , _a ):
raise ValueError('bnb_4bit_quant_type must be a string' )
if not isinstance(self.bnb_abit_use_double_quant , _a ):
raise ValueError('bnb_4bit_use_double_quant must be a boolean' )
if self.load_in_abit and not version.parse(importlib.metadata.version('bitsandbytes' ) ) >= version.parse(
'0.39.0' ):
raise ValueError(
'4 bit quantization requires bitsandbytes>=0.39.0 - please upgrade your bitsandbytes version' )
def a_ ( self ) -> List[Any]:
return self.load_in_abit or self.load_in_abit
def a_ ( self ) -> Optional[int]:
if self.load_in_abit:
return "llm_int8"
elif self.load_in_abit and self.bnb_abit_quant_type == "fp4":
return "fp4"
elif self.load_in_abit and self.bnb_abit_quant_type == "nf4":
return "nf4"
else:
return None
@classmethod
def a_ ( cls , lowercase_ , lowercase_ , **lowercase_ ) -> Optional[Any]:
UpperCAmelCase = cls(**_a )
UpperCAmelCase = []
for key, value in kwargs.items():
if hasattr(_a , _a ):
setattr(_a , _a , _a )
to_remove.append(_a )
for key in to_remove:
kwargs.pop(_a , _a )
if return_unused_kwargs:
return config, kwargs
else:
return config
def a_ ( self , lowercase_ ) -> Tuple:
with open(_a , 'w' , encoding='utf-8' ) as writer:
UpperCAmelCase = self.to_dict()
UpperCAmelCase = json.dumps(_a , indent=2 , sort_keys=_a ) + '\n'
writer.write(_a )
def a_ ( self ) -> List[str]:
UpperCAmelCase = copy.deepcopy(self.__dict__ )
UpperCAmelCase = str(output['bnb_4bit_compute_dtype'] ).split('.' )[1]
return output
def __repr__( self ) -> List[str]:
return F"{self.__class__.__name__} {self.to_json_string()}"
def a_ ( self , lowercase_ = True ) -> int:
if use_diff is True:
UpperCAmelCase = self.to_diff_dict()
else:
UpperCAmelCase = self.to_dict()
return json.dumps(_a , indent=2 , sort_keys=_a ) + "\n"
def a_ ( self ) -> Optional[int]:
UpperCAmelCase = self.to_dict()
# get the default config dict
UpperCAmelCase = BitsAndBytesConfig().to_dict()
UpperCAmelCase = {}
# only serialize values that differ from the default config
for key, value in config_dict.items():
if value != default_config_dict[key]:
UpperCAmelCase = value
return serializable_config_dict
| 373 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"bigcode/gpt_bigcode-santacoder": "https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json",
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : List[str] = 'gpt_bigcode'
__UpperCAmelCase : Tuple = ['past_key_values']
__UpperCAmelCase : Dict = {
'hidden_size': 'n_embd',
'max_position_embeddings': 'n_positions',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self , _a=50_257 , _a=1_024 , _a=768 , _a=12 , _a=12 , _a=None , _a="gelu_pytorch_tanh" , _a=0.1 , _a=0.1 , _a=0.1 , _a=1E-5 , _a=0.02 , _a=True , _a=True , _a=50_256 , _a=50_256 , _a=True , _a=True , _a=True , **_a , ):
__a = vocab_size
__a = n_positions
__a = n_embd
__a = n_layer
__a = n_head
__a = n_inner
__a = activation_function
__a = resid_pdrop
__a = embd_pdrop
__a = attn_pdrop
__a = layer_norm_epsilon
__a = initializer_range
__a = scale_attn_weights
__a = use_cache
__a = attention_softmax_in_fpaa
__a = scale_attention_softmax_in_fpaa
__a = multi_query
__a = bos_token_id
__a = eos_token_id
super().__init__(bos_token_id=_a , eos_token_id=_a , **_a )
| 695 | 0 |
'''simple docstring'''
__A : Tuple = '0.18.2'
from .configuration_utils import ConfigMixin
from .utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_inflect_available,
is_invisible_watermark_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_librosa_available,
is_note_seq_available,
is_onnx_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
is_transformers_available,
is_transformers_version,
is_unidecode_available,
logging,
)
try:
if not is_onnx_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_onnx_objects import * # noqa F403
else:
from .pipelines import OnnxRuntimeModel
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_pt_objects import * # noqa F403
else:
from .models import (
AutoencoderKL,
ControlNetModel,
ModelMixin,
PriorTransformer,
TaFilmDecoder,
TransformeraDModel,
UNetaDModel,
UNetaDConditionModel,
UNetaDModel,
UNetaDConditionModel,
VQModel,
)
from .optimization import (
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
get_scheduler,
)
from .pipelines import (
AudioPipelineOutput,
ConsistencyModelPipeline,
DanceDiffusionPipeline,
DDIMPipeline,
DDPMPipeline,
DiffusionPipeline,
DiTPipeline,
ImagePipelineOutput,
KarrasVePipeline,
LDMPipeline,
LDMSuperResolutionPipeline,
PNDMPipeline,
RePaintPipeline,
ScoreSdeVePipeline,
)
from .schedulers import (
CMStochasticIterativeScheduler,
DDIMInverseScheduler,
DDIMParallelScheduler,
DDIMScheduler,
DDPMParallelScheduler,
DDPMScheduler,
DEISMultistepScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
HeunDiscreteScheduler,
IPNDMScheduler,
KarrasVeScheduler,
KDPMaAncestralDiscreteScheduler,
KDPMaDiscreteScheduler,
PNDMScheduler,
RePaintScheduler,
SchedulerMixin,
ScoreSdeVeScheduler,
UnCLIPScheduler,
UniPCMultistepScheduler,
VQDiffusionScheduler,
)
from .training_utils import EMAModel
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .schedulers import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .schedulers import DPMSolverSDEScheduler
try:
if not (is_torch_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
AltDiffusionImgaImgPipeline,
AltDiffusionPipeline,
AudioLDMPipeline,
CycleDiffusionPipeline,
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
ImageTextPipelineOutput,
KandinskyImgaImgPipeline,
KandinskyInpaintPipeline,
KandinskyPipeline,
KandinskyPriorPipeline,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaControlnetPipeline,
KandinskyVaaImgaImgPipeline,
KandinskyVaaInpaintPipeline,
KandinskyVaaPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
KandinskyVaaPriorPipeline,
LDMTextToImagePipeline,
PaintByExamplePipeline,
SemanticStableDiffusionPipeline,
ShapEImgaImgPipeline,
ShapEPipeline,
StableDiffusionAttendAndExcitePipeline,
StableDiffusionControlNetImgaImgPipeline,
StableDiffusionControlNetInpaintPipeline,
StableDiffusionControlNetPipeline,
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionImageVariationPipeline,
StableDiffusionImgaImgPipeline,
StableDiffusionInpaintPipeline,
StableDiffusionInpaintPipelineLegacy,
StableDiffusionInstructPixaPixPipeline,
StableDiffusionLatentUpscalePipeline,
StableDiffusionLDMaDPipeline,
StableDiffusionModelEditingPipeline,
StableDiffusionPanoramaPipeline,
StableDiffusionParadigmsPipeline,
StableDiffusionPipeline,
StableDiffusionPipelineSafe,
StableDiffusionPixaPixZeroPipeline,
StableDiffusionSAGPipeline,
StableDiffusionUpscalePipeline,
StableUnCLIPImgaImgPipeline,
StableUnCLIPPipeline,
TextToVideoSDPipeline,
TextToVideoZeroPipeline,
UnCLIPImageVariationPipeline,
UnCLIPPipeline,
UniDiffuserModel,
UniDiffuserPipeline,
UniDiffuserTextDecoder,
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
VideoToVideoSDPipeline,
VQDiffusionPipeline,
)
try:
if not (is_torch_available() and is_transformers_available() and is_invisible_watermark_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_invisible_watermark_objects import * # noqa F403
else:
from .pipelines import StableDiffusionXLImgaImgPipeline, StableDiffusionXLPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_k_diffusion_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipelines import StableDiffusionKDiffusionPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_onnx_objects import * # noqa F403
else:
from .pipelines import (
OnnxStableDiffusionImgaImgPipeline,
OnnxStableDiffusionInpaintPipeline,
OnnxStableDiffusionInpaintPipelineLegacy,
OnnxStableDiffusionPipeline,
OnnxStableDiffusionUpscalePipeline,
StableDiffusionOnnxPipeline,
)
try:
if not (is_torch_available() and is_librosa_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_librosa_objects import * # noqa F403
else:
from .pipelines import AudioDiffusionPipeline, Mel
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .pipelines import SpectrogramDiffusionPipeline
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_objects import * # noqa F403
else:
from .models.controlnet_flax import FlaxControlNetModel
from .models.modeling_flax_utils import FlaxModelMixin
from .models.unet_ad_condition_flax import FlaxUNetaDConditionModel
from .models.vae_flax import FlaxAutoencoderKL
from .pipelines import FlaxDiffusionPipeline
from .schedulers import (
FlaxDDIMScheduler,
FlaxDDPMScheduler,
FlaxDPMSolverMultistepScheduler,
FlaxKarrasVeScheduler,
FlaxLMSDiscreteScheduler,
FlaxPNDMScheduler,
FlaxSchedulerMixin,
FlaxScoreSdeVeScheduler,
)
try:
if not (is_flax_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
FlaxStableDiffusionControlNetPipeline,
FlaxStableDiffusionImgaImgPipeline,
FlaxStableDiffusionInpaintPipeline,
FlaxStableDiffusionPipeline,
)
try:
if not (is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_note_seq_objects import * # noqa F403
else:
from .pipelines import MidiProcessor | 334 |
"""simple docstring"""
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
lowercase_ = 1_6
lowercase_ = 3_2
def lowercase ( lowerCAmelCase__ : Accelerator , lowerCAmelCase__ : int = 16 , lowerCAmelCase__ : str = "bert-base-cased" ) -> Optional[int]:
__a = AutoTokenizer.from_pretrained(lowerCAmelCase__ )
__a = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(lowerCAmelCase__ : Optional[Any] ):
# max_length=None => use the model max length (it's actually the default)
__a = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=lowerCAmelCase__ , max_length=lowerCAmelCase__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
__a = datasets.map(
lowerCAmelCase__ , batched=lowerCAmelCase__ , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , load_from_cache_file=lowerCAmelCase__ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__a = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(lowerCAmelCase__ : int ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(lowerCAmelCase__ , padding='''max_length''' , max_length=128 , return_tensors='''pt''' )
return tokenizer.pad(lowerCAmelCase__ , padding='''longest''' , return_tensors='''pt''' )
# Instantiate dataloaders.
__a = DataLoader(
tokenized_datasets['''train'''] , shuffle=lowerCAmelCase__ , collate_fn=lowerCAmelCase__ , batch_size=lowerCAmelCase__ )
__a = DataLoader(
tokenized_datasets['''validation'''] , shuffle=lowerCAmelCase__ , collate_fn=lowerCAmelCase__ , batch_size=lowerCAmelCase__ )
return train_dataloader, eval_dataloader
def lowercase ( lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Union[str, Any] ) -> Optional[int]:
# Initialize accelerator
__a = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__a = config['''lr''']
__a = int(config['''num_epochs'''] )
__a = int(config['''seed'''] )
__a = int(config['''batch_size'''] )
__a = args.model_name_or_path
set_seed(lowerCAmelCase__ )
__a , __a = get_dataloaders(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__a = AutoModelForSequenceClassification.from_pretrained(lowerCAmelCase__ , return_dict=lowerCAmelCase__ )
# Instantiate optimizer
__a = (
AdamW
if accelerator.state.deepspeed_plugin is None
or '''optimizer''' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
__a = optimizer_cls(params=model.parameters() , lr=lowerCAmelCase__ )
if accelerator.state.deepspeed_plugin is not None:
__a = accelerator.state.deepspeed_plugin.deepspeed_config[
'''gradient_accumulation_steps'''
]
else:
__a = 1
__a = (len(lowerCAmelCase__ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
__a = get_linear_schedule_with_warmup(
optimizer=lowerCAmelCase__ , num_warmup_steps=0 , num_training_steps=lowerCAmelCase__ , )
else:
__a = DummyScheduler(lowerCAmelCase__ , total_num_steps=lowerCAmelCase__ , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__a , __a , __a , __a , __a = accelerator.prepare(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# We need to keep track of how many total steps we have iterated over
__a = 0
# We also need to keep track of the stating epoch so files are named properly
__a = 0
# Now we train the model
__a = evaluate.load('''glue''' , '''mrpc''' )
__a = 0
__a = {}
for epoch in range(lowerCAmelCase__ , lowerCAmelCase__ ):
model.train()
for step, batch in enumerate(lowerCAmelCase__ ):
__a = model(**lowerCAmelCase__ )
__a = outputs.loss
__a = loss / gradient_accumulation_steps
accelerator.backward(lowerCAmelCase__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
model.eval()
__a = 0
for step, batch in enumerate(lowerCAmelCase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__a = model(**lowerCAmelCase__ )
__a = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
__a , __a = accelerator.gather(
(predictions, batch['''labels''']) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(lowerCAmelCase__ ) - 1:
__a = predictions[: len(eval_dataloader.dataset ) - samples_seen]
__a = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=lowerCAmelCase__ , references=lowerCAmelCase__ , )
__a = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'''epoch {epoch}:''' , lowerCAmelCase__ )
__a = eval_metric['''accuracy''']
if best_performance < eval_metric["accuracy"]:
__a = eval_metric['''accuracy''']
if args.performance_lower_bound is not None:
assert (
args.performance_lower_bound <= best_performance
), f'''Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}'''
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , '''all_results.json''' ) , '''w''' ) as f:
json.dump(lowerCAmelCase__ , lowerCAmelCase__ )
def lowercase ( ) -> List[str]:
__a = argparse.ArgumentParser(description='''Simple example of training script tracking peak GPU memory usage.''' )
parser.add_argument(
'''--model_name_or_path''' , type=lowerCAmelCase__ , default='''bert-base-cased''' , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , required=lowerCAmelCase__ , )
parser.add_argument(
'''--output_dir''' , type=lowerCAmelCase__ , default='''.''' , help='''Optional save directory where all checkpoint folders will be stored. Default is the current working directory.''' , )
parser.add_argument(
'''--performance_lower_bound''' , type=lowerCAmelCase__ , default=lowerCAmelCase__ , help='''Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value.''' , )
parser.add_argument(
'''--num_epochs''' , type=lowerCAmelCase__ , default=3 , help='''Number of train epochs.''' , )
__a = parser.parse_args()
__a = {'''lr''': 2e-5, '''num_epochs''': args.num_epochs, '''seed''': 42, '''batch_size''': 16}
training_function(lowerCAmelCase__ , lowerCAmelCase__ )
if __name__ == "__main__":
main()
| 695 | 0 |
'''simple docstring'''
import argparse
import requests
import torch
from PIL import Image
from transformers import SwinConfig, SwinForMaskedImageModeling, ViTImageProcessor
def UpperCamelCase ( _lowerCamelCase : List[Any] ):
A__ = SwinConfig(image_size=1_92 )
if "base" in model_name:
A__ = 6
A__ = 1_28
A__ = (2, 2, 18, 2)
A__ = (4, 8, 16, 32)
elif "large" in model_name:
A__ = 12
A__ = 1_92
A__ = (2, 2, 18, 2)
A__ = (6, 12, 24, 48)
else:
raise ValueError("Model not supported, only supports base and large variants" )
A__ = window_size
A__ = embed_dim
A__ = depths
A__ = num_heads
return config
def UpperCamelCase ( _lowerCamelCase : Optional[Any] ):
if "encoder.mask_token" in name:
A__ = name.replace("encoder.mask_token" , "embeddings.mask_token" )
if "encoder.patch_embed.proj" in name:
A__ = name.replace("encoder.patch_embed.proj" , "embeddings.patch_embeddings.projection" )
if "encoder.patch_embed.norm" in name:
A__ = name.replace("encoder.patch_embed.norm" , "embeddings.norm" )
if "attn.proj" in name:
A__ = name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name:
A__ = name.replace("attn" , "attention.self" )
if "norm1" in name:
A__ = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
A__ = name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
A__ = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
A__ = name.replace("mlp.fc2" , "output.dense" )
if name == "encoder.norm.weight":
A__ = "layernorm.weight"
if name == "encoder.norm.bias":
A__ = "layernorm.bias"
if "decoder" in name:
pass
else:
A__ = "swin." + name
return name
def UpperCamelCase ( _lowerCamelCase : Tuple , _lowerCamelCase : Any ):
for key in orig_state_dict.copy().keys():
A__ = orig_state_dict.pop(lowerCAmelCase__ )
if "attn_mask" in key:
pass
elif "qkv" in key:
A__ = key.split("." )
A__ = int(key_split[2] )
A__ = int(key_split[4] )
A__ = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
A__ = val[:dim, :]
A__ = val[
dim : dim * 2, :
]
A__ = val[-dim:, :]
else:
A__ = val[
:dim
]
A__ = val[
dim : dim * 2
]
A__ = val[
-dim:
]
else:
A__ = val
return orig_state_dict
def UpperCamelCase ( _lowerCamelCase : List[str] , _lowerCamelCase : List[Any] , _lowerCamelCase : List[Any] , _lowerCamelCase : Tuple ):
A__ = torch.load(lowerCAmelCase__ , map_location="cpu" )["model"]
A__ = get_swin_config(lowerCAmelCase__ )
A__ = SwinForMaskedImageModeling(lowerCAmelCase__ )
model.eval()
A__ = convert_state_dict(lowerCAmelCase__ , lowerCAmelCase__ )
model.load_state_dict(lowerCAmelCase__ )
A__ = "http://images.cocodataset.org/val2017/000000039769.jpg"
A__ = ViTImageProcessor(size={"height": 1_92, "width": 1_92} )
A__ = Image.open(requests.get(lowerCAmelCase__ , stream=lowerCAmelCase__ ).raw )
A__ = image_processor(images=lowerCAmelCase__ , return_tensors="pt" )
with torch.no_grad():
A__ = model(**lowerCAmelCase__ ).logits
print(outputs.keys() )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(F"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(lowerCAmelCase__ )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(lowerCAmelCase__ )
if push_to_hub:
print(F"Pushing model and image processor for {model_name} to hub" )
model.push_to_hub(F"microsoft/{model_name}" )
image_processor.push_to_hub(F"microsoft/{model_name}" )
if __name__ == "__main__":
__lowerCAmelCase : Dict =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="swin-base-simmim-window6-192",
type=str,
choices=["swin-base-simmim-window6-192", "swin-large-simmim-window12-192"],
help="Name of the Swin SimMIM model you'd like to convert.",
)
parser.add_argument(
"--checkpoint_path",
default="/Users/nielsrogge/Documents/SwinSimMIM/simmim_pretrain__swin_base__img192_window6__100ep.pth",
type=str,
help="Path to the original PyTorch checkpoint (.pth file).",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
__lowerCAmelCase : List[Any] =parser.parse_args()
convert_swin_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 440 |
"""simple docstring"""
from typing import Any
def lowercase ( lowerCAmelCase__ : list , lowerCAmelCase__ : list , lowerCAmelCase__ : dict , lowerCAmelCase__ : dict , lowerCAmelCase__ : dict , ) -> list:
_validation(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , )
# Creates data structures and fill initial step
__a = {}
__a = {}
for state in states_space:
__a = observations_space[0]
__a = (
initial_probabilities[state] * emission_probabilities[state][observation]
)
__a = None
# Fills the data structure with the probabilities of
# different transitions and pointers to previous states
for o in range(1 , len(lowerCAmelCase__ ) ):
__a = observations_space[o]
__a = observations_space[o - 1]
for state in states_space:
# Calculates the argmax for probability function
__a = ''''''
__a = -1
for k_state in states_space:
__a = (
probabilities[(k_state, prior_observation)]
* transition_probabilities[k_state][state]
* emission_probabilities[state][observation]
)
if probability > max_probability:
__a = probability
__a = k_state
# Update probabilities and pointers dicts
__a = (
probabilities[(arg_max, prior_observation)]
* transition_probabilities[arg_max][state]
* emission_probabilities[state][observation]
)
__a = arg_max
# The final observation
__a = observations_space[len(lowerCAmelCase__ ) - 1]
# argmax for given final observation
__a = ''''''
__a = -1
for k_state in states_space:
__a = probabilities[(k_state, final_observation)]
if probability > max_probability:
__a = probability
__a = k_state
__a = arg_max
# Process pointers backwards
__a = last_state
__a = []
for o in range(len(lowerCAmelCase__ ) - 1 , -1 , -1 ):
result.append(lowerCAmelCase__ )
__a = pointers[previous, observations_space[o]]
result.reverse()
return result
def lowercase ( lowerCAmelCase__ : Any , lowerCAmelCase__ : Any , lowerCAmelCase__ : Any , lowerCAmelCase__ : Any , lowerCAmelCase__ : Any , ) -> None:
_validate_not_empty(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , )
_validate_lists(lowerCAmelCase__ , lowerCAmelCase__ )
_validate_dicts(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def lowercase ( lowerCAmelCase__ : Any , lowerCAmelCase__ : Any , lowerCAmelCase__ : Any , lowerCAmelCase__ : Any , lowerCAmelCase__ : Any , ) -> None:
if not all(
[
observations_space,
states_space,
initial_probabilities,
transition_probabilities,
emission_probabilities,
] ):
raise ValueError('''There\'s an empty parameter''' )
def lowercase ( lowerCAmelCase__ : Any , lowerCAmelCase__ : Any ) -> None:
_validate_list(lowerCAmelCase__ , '''observations_space''' )
_validate_list(lowerCAmelCase__ , '''states_space''' )
def lowercase ( lowerCAmelCase__ : Any , lowerCAmelCase__ : str ) -> None:
if not isinstance(_object , lowerCAmelCase__ ):
__a = f'''{var_name} must be a list'''
raise ValueError(lowerCAmelCase__ )
else:
for x in _object:
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
__a = f'''{var_name} must be a list of strings'''
raise ValueError(lowerCAmelCase__ )
def lowercase ( lowerCAmelCase__ : Any , lowerCAmelCase__ : Any , lowerCAmelCase__ : Any , ) -> None:
_validate_dict(lowerCAmelCase__ , '''initial_probabilities''' , lowerCAmelCase__ )
_validate_nested_dict(lowerCAmelCase__ , '''transition_probabilities''' )
_validate_nested_dict(lowerCAmelCase__ , '''emission_probabilities''' )
def lowercase ( lowerCAmelCase__ : Any , lowerCAmelCase__ : str ) -> None:
_validate_dict(_object , lowerCAmelCase__ , lowerCAmelCase__ )
for x in _object.values():
_validate_dict(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def lowercase ( lowerCAmelCase__ : Any , lowerCAmelCase__ : str , lowerCAmelCase__ : type , lowerCAmelCase__ : bool = False ) -> None:
if not isinstance(_object , lowerCAmelCase__ ):
__a = f'''{var_name} must be a dict'''
raise ValueError(lowerCAmelCase__ )
if not all(isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) for x in _object ):
__a = f'''{var_name} all keys must be strings'''
raise ValueError(lowerCAmelCase__ )
if not all(isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) for x in _object.values() ):
__a = '''nested dictionary ''' if nested else ''''''
__a = f'''{var_name} {nested_text}all values must be {value_type.__name__}'''
raise ValueError(lowerCAmelCase__ )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 695 | 0 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel
from diffusers import DDIMScheduler, LDMPipeline, UNetaDModel, VQModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class lowercase_ ( unittest.TestCase ):
"""simple docstring"""
@property
def __UpperCAmelCase ( self : Tuple ) -> Any:
torch.manual_seed(0 )
_A = UNetaDModel(
block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=3, out_channels=3, down_block_types=('DownBlock2D', 'AttnDownBlock2D'), up_block_types=('AttnUpBlock2D', 'UpBlock2D'), )
return model
@property
def __UpperCAmelCase ( self : str ) -> Union[str, Any]:
torch.manual_seed(0 )
_A = VQModel(
block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'], up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'], latent_channels=3, )
return model
@property
def __UpperCAmelCase ( self : Optional[int] ) -> Optional[Any]:
torch.manual_seed(0 )
_A = CLIPTextConfig(
bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=10_00, )
return CLIPTextModel(_a )
def __UpperCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]:
_A = self.dummy_uncond_unet
_A = DDIMScheduler()
_A = self.dummy_vq_model
_A = LDMPipeline(unet=_a, vqvae=_a, scheduler=_a )
ldm.to(_a )
ldm.set_progress_bar_config(disable=_a )
_A = torch.manual_seed(0 )
_A = ldm(generator=_a, num_inference_steps=2, output_type='numpy' ).images
_A = torch.manual_seed(0 )
_A = ldm(generator=_a, num_inference_steps=2, output_type='numpy', return_dict=_a )[0]
_A = image[0, -3:, -3:, -1]
_A = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_A = np.array([0.8_512, 0.818, 0.6_411, 0.6_808, 0.4_465, 0.5_618, 0.46, 0.6_231, 0.5_172] )
_A = 1e-2 if torch_device != 'mps' else 3e-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < tolerance
@slow
@require_torch
class lowercase_ ( unittest.TestCase ):
"""simple docstring"""
def __UpperCAmelCase ( self : Tuple ) -> Any:
_A = LDMPipeline.from_pretrained('CompVis/ldm-celebahq-256' )
ldm.to(_a )
ldm.set_progress_bar_config(disable=_a )
_A = torch.manual_seed(0 )
_A = ldm(generator=_a, num_inference_steps=5, output_type='numpy' ).images
_A = image[0, -3:, -3:, -1]
assert image.shape == (1, 2_56, 2_56, 3)
_A = np.array([0.4_399, 0.44_975, 0.46_825, 0.474, 0.4_359, 0.4_581, 0.45_095, 0.4_341, 0.4_447] )
_A = 1e-2 if torch_device != 'mps' else 3e-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
| 107 |
"""simple docstring"""
import math
def lowercase ( lowerCAmelCase__ : int ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(lowerCAmelCase__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def lowercase ( lowerCAmelCase__ : float = 0.1 ) -> int:
__a = 3
__a = 3
while primes / (2 * j - 1) >= ratio:
for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1 ):
primes += is_prime(lowerCAmelCase__ )
j += 2
return j
if __name__ == "__main__":
import doctest
doctest.testmod()
| 695 | 0 |
from math import pi, sqrt
def __a ( __UpperCAmelCase ):
if num <= 0:
raise ValueError('''math domain error''' )
if num > 171.5:
raise OverflowError('''math range error''' )
elif num - int(lowerCAmelCase__ ) not in (0, 0.5):
raise NotImplementedError('''num must be an integer or a half-integer''' )
elif num == 0.5:
return sqrt(lowerCAmelCase__ )
else:
return 1.0 if num == 1 else (num - 1) * gamma(num - 1 )
def __a ( ):
assert gamma(0.5 ) == sqrt(lowerCAmelCase__ )
assert gamma(1 ) == 1.0
assert gamma(2 ) == 1.0
if __name__ == "__main__":
from doctest import testmod
testmod()
a_ : str = 1.0
while num:
a_ : str = float(input('Gamma of: '))
print(f'gamma({num}) = {gamma(num)}')
print('\nEnter 0 to exit...')
| 194 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase_ = {
"configuration_mctct": ["MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP", "MCTCTConfig"],
"feature_extraction_mctct": ["MCTCTFeatureExtractor"],
"processing_mctct": ["MCTCTProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST",
"MCTCTForCTC",
"MCTCTModel",
"MCTCTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 695 | 0 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.