code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->UnCLIP
class __magic_name__ ( _a ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = 4_2
SCREAMING_SNAKE_CASE_ : int = None
def _UpperCAmelCase ( a : str , a : Optional[Any]=0.9_99 , a : Union[str, Any]="cosine" , ) -> int:
"""simple docstring"""
if alpha_transform_type == "cosine":
def alpha_bar_fn(a : List[str] ):
return math.cos((t + 0.0_08) / 1.0_08 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(a : Optional[Any] ):
return math.exp(t * -12.0 )
else:
raise ValueError(f"Unsupported alpha_tranform_type: {alpha_transform_type}" )
lowercase_ : Any = []
for i in range(UpperCAmelCase__ ):
lowercase_ : Dict = i / num_diffusion_timesteps
lowercase_ : Optional[int] = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(UpperCAmelCase__ ) / alpha_bar_fn(UpperCAmelCase__ ) , UpperCAmelCase__ ) )
return torch.tensor(UpperCAmelCase__ , dtype=torch.floataa )
class __magic_name__ ( _a, _a ):
"""simple docstring"""
@register_to_config
def __init__( self , _lowercase = 1000 , _lowercase = "fixed_small_log" , _lowercase = True , _lowercase = 1.0 , _lowercase = "epsilon" , _lowercase = "squaredcos_cap_v2" , ) -> Dict:
if beta_schedule != "squaredcos_cap_v2":
raise ValueError('UnCLIPScheduler only supports `beta_schedule`: \'squaredcos_cap_v2\'' )
lowercase_ : Dict = betas_for_alpha_bar(_A )
lowercase_ : Any = 1.0 - self.betas
lowercase_ : int = torch.cumprod(self.alphas , dim=0 )
lowercase_ : Tuple = torch.tensor(1.0 )
# standard deviation of the initial noise distribution
lowercase_ : Tuple = 1.0
# setable values
lowercase_ : str = None
lowercase_ : Optional[Any] = torch.from_numpy(np.arange(0 , _A )[::-1].copy() )
lowercase_ : Dict = variance_type
def lowerCamelCase__ ( self , _lowercase , _lowercase = None ) -> Dict:
return sample
def lowerCamelCase__ ( self , _lowercase , _lowercase = None ) -> Any:
lowercase_ : Any = num_inference_steps
lowercase_ : str = (self.config.num_train_timesteps - 1) / (self.num_inference_steps - 1)
lowercase_ : Optional[int] = (np.arange(0 , _A ) * step_ratio).round()[::-1].copy().astype(np.intaa )
lowercase_ : Tuple = torch.from_numpy(_A ).to(_A )
def lowerCamelCase__ ( self , _lowercase , _lowercase=None , _lowercase=None , _lowercase=None ) -> Any:
if prev_timestep is None:
lowercase_ : Optional[Any] = t - 1
lowercase_ : str = self.alphas_cumprod[t]
lowercase_ : str = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
lowercase_ : Dict = 1 - alpha_prod_t
lowercase_ : Optional[Any] = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
lowercase_ : int = self.betas[t]
else:
lowercase_ : Tuple = 1 - alpha_prod_t / alpha_prod_t_prev
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
lowercase_ : str = beta_prod_t_prev / beta_prod_t * beta
if variance_type is None:
lowercase_ : List[str] = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small_log":
lowercase_ : List[str] = torch.log(torch.clamp(_A , min=1E-2_0 ) )
lowercase_ : Optional[int] = torch.exp(0.5 * variance )
elif variance_type == "learned_range":
# NOTE difference with DDPM scheduler
lowercase_ : Optional[int] = variance.log()
lowercase_ : Optional[int] = beta.log()
lowercase_ : Optional[Any] = (predicted_variance + 1) / 2
lowercase_ : Union[str, Any] = frac * max_log + (1 - frac) * min_log
return variance
def lowerCamelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase = None , _lowercase=None , _lowercase = True , ) -> Dict:
lowercase_ : Union[str, Any] = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type == "learned_range":
lowercase_ , lowercase_ : int = torch.split(_A , sample.shape[1] , dim=1 )
else:
lowercase_ : List[Any] = None
# 1. compute alphas, betas
if prev_timestep is None:
lowercase_ : Any = t - 1
lowercase_ : Dict = self.alphas_cumprod[t]
lowercase_ : int = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
lowercase_ : int = 1 - alpha_prod_t
lowercase_ : List[str] = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
lowercase_ : List[str] = self.betas[t]
lowercase_ : int = self.alphas[t]
else:
lowercase_ : List[Any] = 1 - alpha_prod_t / alpha_prod_t_prev
lowercase_ : str = 1 - beta
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
lowercase_ : Dict = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
lowercase_ : Tuple = model_output
else:
raise ValueError(
f"prediction_type given as {self.config.prediction_type} must be one of `epsilon` or `sample`"
' for the UnCLIPScheduler.' )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
lowercase_ : int = torch.clamp(
_A , -self.config.clip_sample_range , self.config.clip_sample_range )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
lowercase_ : Union[str, Any] = (alpha_prod_t_prev ** 0.5 * beta) / beta_prod_t
lowercase_ : int = alpha ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
lowercase_ : List[str] = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
lowercase_ : Optional[Any] = 0
if t > 0:
lowercase_ : str = randn_tensor(
model_output.shape , dtype=model_output.dtype , generator=_A , device=model_output.device )
lowercase_ : Optional[int] = self._get_variance(
_A , predicted_variance=_A , prev_timestep=_A , )
if self.variance_type == "fixed_small_log":
lowercase_ : List[str] = variance
elif self.variance_type == "learned_range":
lowercase_ : Tuple = (0.5 * variance).exp()
else:
raise ValueError(
f"variance_type given as {self.variance_type} must be one of `fixed_small_log` or `learned_range`"
' for the UnCLIPScheduler.' )
lowercase_ : int = variance * variance_noise
lowercase_ : Optional[int] = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return UnCLIPSchedulerOutput(prev_sample=_A , pred_original_sample=_A )
def lowerCamelCase__ ( self , _lowercase , _lowercase , _lowercase , ) -> int:
# Make sure alphas_cumprod and timestep have same device and dtype as original_samples
lowercase_ : int = self.alphas_cumprod.to(device=original_samples.device , dtype=original_samples.dtype )
lowercase_ : Union[str, Any] = timesteps.to(original_samples.device )
lowercase_ : str = alphas_cumprod[timesteps] ** 0.5
lowercase_ : Optional[Any] = sqrt_alpha_prod.flatten()
while len(sqrt_alpha_prod.shape ) < len(original_samples.shape ):
lowercase_ : Optional[int] = sqrt_alpha_prod.unsqueeze(-1 )
lowercase_ : int = (1 - alphas_cumprod[timesteps]) ** 0.5
lowercase_ : Tuple = sqrt_one_minus_alpha_prod.flatten()
while len(sqrt_one_minus_alpha_prod.shape ) < len(original_samples.shape ):
lowercase_ : List[Any] = sqrt_one_minus_alpha_prod.unsqueeze(-1 )
lowercase_ : Dict = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
| 713 |
'''simple docstring'''
from __future__ import annotations
def _UpperCAmelCase ( a : int = 4 ) -> list[list[int]]:
"""simple docstring"""
lowercase_ : Tuple = abs(a ) or 4
return [[1 + x + y * row_size for x in range(a )] for y in range(a )]
def _UpperCAmelCase ( a : list[list[int]] ) -> list[list[int]]:
"""simple docstring"""
return reverse_row(transpose(a ) )
# OR.. transpose(reverse_column(matrix))
def _UpperCAmelCase ( a : list[list[int]] ) -> list[list[int]]:
"""simple docstring"""
return reverse_row(reverse_column(a ) )
# OR.. reverse_column(reverse_row(matrix))
def _UpperCAmelCase ( a : list[list[int]] ) -> list[list[int]]:
"""simple docstring"""
return reverse_column(transpose(a ) )
# OR.. transpose(reverse_row(matrix))
def _UpperCAmelCase ( a : list[list[int]] ) -> list[list[int]]:
"""simple docstring"""
lowercase_ : Any = [list(a ) for x in zip(*a )]
return matrix
def _UpperCAmelCase ( a : list[list[int]] ) -> list[list[int]]:
"""simple docstring"""
lowercase_ : List[str] = matrix[::-1]
return matrix
def _UpperCAmelCase ( a : list[list[int]] ) -> list[list[int]]:
"""simple docstring"""
lowercase_ : str = [x[::-1] for x in matrix]
return matrix
def _UpperCAmelCase ( a : list[list[int]] ) -> None:
"""simple docstring"""
for i in matrix:
print(*a )
if __name__ == "__main__":
A: Dict = make_matrix()
print("\norigin:\n")
print_matrix(matrix)
print("\nrotate 90 counterclockwise:\n")
print_matrix(rotate_aa(matrix))
A: List[Any] = make_matrix()
print("\norigin:\n")
print_matrix(matrix)
print("\nrotate 180:\n")
print_matrix(rotate_aaa(matrix))
A: List[str] = make_matrix()
print("\norigin:\n")
print_matrix(matrix)
print("\nrotate 270 counterclockwise:\n")
print_matrix(rotate_aaa(matrix))
| 7 | 0 |
'''simple docstring'''
from pathlib import Path
from typing import List
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import get_tests_dir, is_tool_test
from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
A: int = ["text", "image", "audio"]
def _UpperCAmelCase ( a : List[str] ) -> str:
"""simple docstring"""
lowercase_ : int = []
for input_type in input_types:
if input_type == "text":
inputs.append('Text input' )
elif input_type == "image":
inputs.append(
Image.open(Path(get_tests_dir('fixtures/tests_samples/COCO' ) ) / '000000039769.png' ).resize((5_1_2, 5_1_2) ) )
elif input_type == "audio":
inputs.append(torch.ones(3_0_0_0 ) )
elif isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
inputs.append(create_inputs(UpperCAmelCase__ ) )
else:
raise ValueError(f"Invalid type requested: {input_type}" )
return inputs
def _UpperCAmelCase ( a : List ) -> List[Any]:
"""simple docstring"""
lowercase_ : List[Any] = []
for output in outputs:
if isinstance(UpperCAmelCase__ , (str, AgentText) ):
output_types.append('text' )
elif isinstance(UpperCAmelCase__ , (Image.Image, AgentImage) ):
output_types.append('image' )
elif isinstance(UpperCAmelCase__ , (torch.Tensor, AgentAudio) ):
output_types.append('audio' )
else:
raise ValueError(f"Invalid output: {output}" )
return output_types
@is_tool_test
class __magic_name__ :
"""simple docstring"""
def lowerCamelCase__ ( self ) -> Optional[Any]:
self.assertTrue(hasattr(self.tool , 'inputs' ) )
self.assertTrue(hasattr(self.tool , 'outputs' ) )
lowercase_ : Any = self.tool.inputs
for _input in inputs:
if isinstance(_input , __UpperCamelCase ):
for __input in _input:
self.assertTrue(__input in authorized_types )
else:
self.assertTrue(_input in authorized_types )
lowercase_ : List[str] = self.tool.outputs
for _output in outputs:
self.assertTrue(_output in authorized_types )
def lowerCamelCase__ ( self ) -> List[Any]:
lowercase_ : Any = create_inputs(self.tool.inputs )
lowercase_ : Optional[int] = self.tool(*__UpperCamelCase )
# There is a single output
if len(self.tool.outputs ) == 1:
lowercase_ : str = [outputs]
self.assertListEqual(output_types(__UpperCamelCase ) , self.tool.outputs )
def lowerCamelCase__ ( self ) -> Union[str, Any]:
self.assertTrue(hasattr(self.tool , 'description' ) )
self.assertTrue(hasattr(self.tool , 'default_checkpoint' ) )
self.assertTrue(self.tool.description.startswith('This is a tool that' ) )
def lowerCamelCase__ ( self ) -> List[Any]:
lowercase_ : List[Any] = create_inputs(self.tool.inputs )
lowercase_ : Tuple = self.tool(*__UpperCamelCase )
if not isinstance(__UpperCamelCase , __UpperCamelCase ):
lowercase_ : Dict = [outputs]
self.assertEqual(len(__UpperCamelCase ) , len(self.tool.outputs ) )
for output, output_type in zip(__UpperCamelCase , self.tool.outputs ):
lowercase_ : int = AGENT_TYPE_MAPPING[output_type]
self.assertTrue(isinstance(__UpperCamelCase , __UpperCamelCase ) )
def lowerCamelCase__ ( self ) -> List[str]:
lowercase_ : int = create_inputs(self.tool.inputs )
lowercase_ : int = []
for _input, input_type in zip(__UpperCamelCase , self.tool.inputs ):
if isinstance(__UpperCamelCase , __UpperCamelCase ):
_inputs.append([AGENT_TYPE_MAPPING[_input_type](_input ) for _input_type in input_type] )
else:
_inputs.append(AGENT_TYPE_MAPPING[input_type](_input ) )
# Should not raise an error
lowercase_ : Tuple = self.tool(*__UpperCamelCase )
if not isinstance(__UpperCamelCase , __UpperCamelCase ):
lowercase_ : Any = [outputs]
self.assertEqual(len(__UpperCamelCase ) , len(self.tool.outputs ) ) | 714 |
'''simple docstring'''
# Usage:
# ./gen-card-facebook-wmt19.py
import os
from pathlib import Path
def _UpperCAmelCase ( a : Dict , a : Optional[int] , a : Tuple ) -> Optional[int]:
"""simple docstring"""
lowercase_ : Any = {
'en': 'Machine learning is great, isn\'t it?',
'ru': 'Машинное обучение - это здорово, не так ли?',
'de': 'Maschinelles Lernen ist großartig, oder?',
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
lowercase_ : List[str] = {
'ru-en': ['[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)', '39.20'],
'en-ru': ['[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)', '33.47'],
'en-de': ['[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)', '42.83'],
'de-en': ['[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)', '41.35'],
}
lowercase_ : Optional[Any] = f"{src_lang}-{tgt_lang}"
lowercase_ : Optional[Any] = f"\n---\nlanguage: \n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt19\n- facebook\nlicense: apache-2.0\ndatasets:\n- wmt19\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.\n\nFor more details, please see, [Facebook FAIR's WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).\n\nThe abbreviation FSMT stands for FairSeqMachineTranslation\n\nAll four models are available:\n\n* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)\n* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)\n* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)\n* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = \"facebook/wmt19-{src_lang}-{tgt_lang}\"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = \"{texts[src_lang]}\"\ninput_ids = tokenizer.encode(input, return_tensors=\"pt\")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n- The original (and this ported model) doesn't seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)\n\n## Training data\n\nPretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).\n\n## Eval results\n\npair | fairseq | transformers\n-------|---------|----------\n{pair} | {scores[pair][0]} | {scores[pair][1]}\n\nThe score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn't support:\n- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).\n- re-ranking\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=15\nmkdir -p $DATA_DIR\nsacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\nnote: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt19/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)\n\n\n### BibTeX entry and citation info\n\n```bibtex\n@inproceedings{{...,\n year={{2020}},\n title={{Facebook FAIR's WMT19 News Translation Task Submission}},\n author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},\n booktitle={{Proc. of WMT}},\n}}\n```\n\n\n## TODO\n\n- port model ensemble (fairseq uses 4 model checkpoints)\n\n"
os.makedirs(a , exist_ok=a )
lowercase_ : int = os.path.join(a , 'README.md' )
print(f"Generating {path}" )
with open(a , 'w' , encoding='utf-8' ) as f:
f.write(a )
# make sure we are under the root of the project
A: List[str] = Path(__file__).resolve().parent.parent.parent
A: List[str] = repo_dir / "model_cards"
for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
A , A , A: Any = model_name.split("-")
A: int = model_cards_dir / "facebook" / model_name
write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
| 7 | 0 |
from __future__ import annotations
A: str = "Muhammad Umer Farooq"
A: Any = "MIT"
A: int = "1.0.0"
A: Union[str, Any] = "Muhammad Umer Farooq"
A: List[Any] = "[email protected]"
A: List[str] = "Alpha"
import re
from html.parser import HTMLParser
from urllib import parse
import requests
class __magic_name__ ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self , _lowercase ) -> Optional[Any]:
super().__init__()
lowercase_ : List[str] = []
lowercase_ : Optional[Any] = domain
def lowerCamelCase__ ( self , _lowercase , _lowercase ) -> str:
if tag == "a":
# Check the list of defined attributes.
for name, value in attrs:
# If href is defined, and not empty nor # print it.
if name == "href" and value != "#" and value != "":
# If not already in urls.
if value not in self.urls:
lowercase_ : Dict = parse.urljoin(self.domain , _lowercase )
self.urls.append(_lowercase )
def _UpperCAmelCase ( a : str ) -> str:
"""simple docstring"""
return ".".join(get_sub_domain_name(_UpperCamelCase ).split('.' )[-2:] )
def _UpperCAmelCase ( a : str ) -> str:
"""simple docstring"""
return parse.urlparse(_UpperCamelCase ).netloc
def _UpperCAmelCase ( a : str = "https://github.com" ) -> list[str]:
"""simple docstring"""
lowercase_ : List[Any] = get_domain_name(_UpperCamelCase )
# Initialize the parser
lowercase_ : str = Parser(_UpperCamelCase )
try:
# Open URL
lowercase_ : int = requests.get(_UpperCamelCase )
# pass the raw HTML to the parser to get links
parser.feed(r.text )
# Get links and loop through
lowercase_ : List[Any] = set()
for link in parser.urls:
# open URL.
# read = requests.get(link)
try:
lowercase_ : Optional[Any] = requests.get(_UpperCamelCase )
# Get the valid email.
lowercase_ : int = re.findall('[a-zA-Z0-9]+@' + domain , read.text )
# If not in list then append it.
for email in emails:
valid_emails.add(_UpperCamelCase )
except ValueError:
pass
except ValueError:
raise SystemExit(1 )
# Finally return a sorted list of email addresses with no duplicates.
return sorted(_UpperCamelCase )
if __name__ == "__main__":
A: Dict = emails_from_url("https://github.com")
print(f"""{len(emails)} emails found:""")
print("\n".join(sorted(emails)))
| 715 |
'''simple docstring'''
import json
import logging
import os
import socket
import git
import numpy as np
import torch
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - PID: %(process)d - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
)
A: Tuple = logging.getLogger(__name__)
def _UpperCAmelCase ( a : str ) -> List[Any]:
"""simple docstring"""
lowercase_ : List[str] = git.Repo(search_parent_directories=a )
lowercase_ : Union[str, Any] = {
'repo_id': str(a ),
'repo_sha': str(repo.head.object.hexsha ),
'repo_branch': str(repo.active_branch ),
}
with open(os.path.join(a , 'git_log.json' ) , 'w' ) as f:
json.dump(a , a , indent=4 )
def _UpperCAmelCase ( a : str ) -> Union[str, Any]:
"""simple docstring"""
if params.n_gpu <= 0:
lowercase_ : int = 0
lowercase_ : Union[str, Any] = -1
lowercase_ : List[str] = True
lowercase_ : Optional[Any] = False
return
assert torch.cuda.is_available()
logger.info('Initializing GPUs' )
if params.n_gpu > 1:
assert params.local_rank != -1
lowercase_ : Dict = int(os.environ['WORLD_SIZE'] )
lowercase_ : Union[str, Any] = int(os.environ['N_GPU_NODE'] )
lowercase_ : Optional[int] = int(os.environ['RANK'] )
# number of nodes / node ID
lowercase_ : int = params.world_size // params.n_gpu_per_node
lowercase_ : str = params.global_rank // params.n_gpu_per_node
lowercase_ : Dict = True
assert params.n_nodes == int(os.environ['N_NODES'] )
assert params.node_id == int(os.environ['NODE_RANK'] )
# local job (single GPU)
else:
assert params.local_rank == -1
lowercase_ : str = 1
lowercase_ : Dict = 0
lowercase_ : Tuple = 0
lowercase_ : List[Any] = 0
lowercase_ : int = 1
lowercase_ : Tuple = 1
lowercase_ : str = False
# sanity checks
assert params.n_nodes >= 1
assert 0 <= params.node_id < params.n_nodes
assert 0 <= params.local_rank <= params.global_rank < params.world_size
assert params.world_size == params.n_nodes * params.n_gpu_per_node
# define whether this is the master process / if we are in multi-node distributed mode
lowercase_ : List[str] = params.node_id == 0 and params.local_rank == 0
lowercase_ : Optional[Any] = params.n_nodes > 1
# summary
lowercase_ : int = f"--- Global rank: {params.global_rank} - "
logger.info(PREFIX + 'Number of nodes: %i' % params.n_nodes )
logger.info(PREFIX + 'Node ID : %i' % params.node_id )
logger.info(PREFIX + 'Local rank : %i' % params.local_rank )
logger.info(PREFIX + 'World size : %i' % params.world_size )
logger.info(PREFIX + 'GPUs per node : %i' % params.n_gpu_per_node )
logger.info(PREFIX + 'Master : %s' % str(params.is_master ) )
logger.info(PREFIX + 'Multi-node : %s' % str(params.multi_node ) )
logger.info(PREFIX + 'Multi-GPU : %s' % str(params.multi_gpu ) )
logger.info(PREFIX + 'Hostname : %s' % socket.gethostname() )
# set GPU device
torch.cuda.set_device(params.local_rank )
# initialize multi-GPU
if params.multi_gpu:
logger.info('Initializing PyTorch distributed' )
torch.distributed.init_process_group(
init_method='env://' , backend='nccl' , )
def _UpperCAmelCase ( a : Dict ) -> Optional[int]:
"""simple docstring"""
np.random.seed(args.seed )
torch.manual_seed(args.seed )
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed )
| 7 | 0 |
'''simple docstring'''
def _UpperCAmelCase ( a : List[Any] ) -> Optional[int]:
"""simple docstring"""
lowercase_ : List[Any] = []
if len(a ) == 1:
return [nums.copy()]
for _ in range(len(a ) ):
lowercase_ : Any = nums.pop(0 )
lowercase_ : List[Any] = permute(a )
for perm in permutations:
perm.append(a )
result.extend(a )
nums.append(a )
return result
def _UpperCAmelCase ( a : Tuple ) -> List[Any]:
"""simple docstring"""
def backtrack(a : List[Any] ):
if start == len(a ) - 1:
output.append(nums[:] )
else:
for i in range(a , len(a ) ):
lowercase_ , lowercase_ : Dict = nums[i], nums[start]
backtrack(start + 1 )
lowercase_ , lowercase_ : int = nums[i], nums[start] # backtrack
lowercase_ : Tuple = []
backtrack(0 )
return output
if __name__ == "__main__":
import doctest
# use res to print the data in permute2 function
A: Optional[Any] = permutea([1, 2, 3])
print(res)
doctest.testmod()
| 716 |
'''simple docstring'''
import os
from distutils.util import strtobool
def _UpperCAmelCase ( a : Any , a : int ) -> Any:
"""simple docstring"""
for e in env_keys:
lowercase_ : Optional[Any] = int(os.environ.get(a , -1 ) )
if val >= 0:
return val
return default
def _UpperCAmelCase ( a : List[Any] , a : Dict=False ) -> Optional[Any]:
"""simple docstring"""
lowercase_ : Optional[int] = os.environ.get(a , str(a ) )
return strtobool(a ) == 1 # As its name indicates `strtobool` actually returns an int...
def _UpperCAmelCase ( a : List[Any] , a : Dict="no" ) -> str:
"""simple docstring"""
lowercase_ : List[Any] = os.environ.get(a , str(a ) )
return value
| 7 | 0 |
'''simple docstring'''
import argparse
import shutil
import time
from json import JSONDecodeError
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import (
SeqaSeqDataset,
calculate_bleu,
calculate_rouge,
chunks,
lmap,
load_json,
parse_numeric_n_bool_cl_kwargs,
save_json,
use_task_specific_params,
write_txt_file,
)
A: Dict = getLogger(__name__)
def _UpperCAmelCase ( a : Union[str, Any] , a : str , a : str , a : int = 8 , a : int = 1_0_2_4 , a : int="val" , a : Optional[Any]=None , a : List[Any]=False , a : List[Any]="summarization" , a : Union[str, Any]=None , a : List[Any]=1 , a : Dict = None , a : List[str]="" , **a : Tuple , ) -> Any:
"""simple docstring"""
lowercase_ : List[str] = str(a )
assert local_rank is not None
torch.distributed.init_process_group(backend='nccl' , rank=a )
lowercase_ : Optional[Any] = Path(a )
lowercase_ : str = save_dir.joinpath(f"rank_{local_rank}_output.json" )
torch.cuda.set_device(a )
lowercase_ : Optional[Any] = AutoModelForSeqaSeqLM.from_pretrained(a ).cuda()
if fpaa:
lowercase_ : str = model.half()
# determine if we need to increase num_beams
use_task_specific_params(a , a ) # update config with task specific params
lowercase_ : Optional[Any] = generate_kwargs.pop('num_beams' , model.config.num_beams ) # AttributeError risk?
if num_return_sequences > num_beams:
lowercase_ : Optional[int] = num_return_sequences
lowercase_ : Any = AutoTokenizer.from_pretrained(a )
logger.info(f"Inferred tokenizer type: {tokenizer.__class__}" ) # if this is wrong, check config.model_type.
if max_source_length is None:
lowercase_ : Optional[int] = tokenizer.model_max_length
if prefix is None:
lowercase_ : int = prefix or getattr(model.config , 'prefix' , '' ) or ''''''
lowercase_ : Union[str, Any] = SeqaSeqDataset(
a , a , a , max_target_length=1_0_2_4 , type_path=a , n_obs=a , prefix=a , **a , )
# I set shuffle=True for a more accurate progress bar.
# If all the longest samples are first, the prog bar estimate is too high at the beginning.
lowercase_ : Tuple = ds.make_sortish_sampler(a , distributed=a , add_extra_examples=a , shuffle=a )
lowercase_ : List[Any] = DataLoader(a , sampler=a , batch_size=a , collate_fn=ds.collate_fn )
lowercase_ : Any = []
for batch in tqdm(a ):
lowercase_ : str = model.generate(
input_ids=batch['input_ids'].to(model.device ) , attention_mask=batch['attention_mask'].to(model.device ) , num_return_sequences=a , num_beams=a , **a , )
lowercase_ : Tuple = tokenizer.batch_decode(a , skip_special_tokens=a , clean_up_tokenization_spaces=a )
lowercase_ : Tuple = batch['''ids''']
if num_return_sequences > 1:
lowercase_ : str = chunks(a , a ) # batch size chunks, each of size num_return_seq
for i, pred in enumerate(a ):
results.append({'pred': pred, 'id': ids[i].item()} )
save_json(a , a )
return results, sampler.num_replicas
def _UpperCAmelCase ( ) -> Dict:
"""simple docstring"""
lowercase_ : Dict = argparse.ArgumentParser(
epilog='Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate' )
parser.add_argument('--data_dir' , type=a , help='like cnn_dm/test.source' )
parser.add_argument(
'--model_name' , type=a , help='like facebook/bart-large-cnn,t5-base, etc.' , default='sshleifer/distilbart-xsum-12-3' , )
parser.add_argument('--save_dir' , type=a , help='where to save' , default='tmp_gen' )
parser.add_argument('--max_source_length' , type=a , default=a )
parser.add_argument(
'--type_path' , type=a , default='test' , help='which subset to evaluate typically train/val/test' )
parser.add_argument('--task' , type=a , default='summarization' , help='used for task_specific_params + metrics' )
parser.add_argument('--bs' , type=a , default=8 , required=a , help='batch size' )
parser.add_argument(
'--local_rank' , type=a , default=-1 , required=a , help='should be passed by distributed.launch' )
parser.add_argument(
'--n_obs' , type=a , default=a , required=a , help='How many observations. Defaults to all.' )
parser.add_argument(
'--num_return_sequences' , type=a , default=1 , required=a , help='How many sequences to return' )
parser.add_argument(
'--sync_timeout' , type=a , default=6_0_0 , required=a , help='How long should master process wait for other processes to finish.' , )
parser.add_argument('--src_lang' , type=a , default=a , required=a )
parser.add_argument('--tgt_lang' , type=a , default=a , required=a )
parser.add_argument(
'--prefix' , type=a , required=a , default=a , help='will be added to the begininng of src examples' )
parser.add_argument('--fp16' , action='store_true' )
parser.add_argument('--debug' , action='store_true' )
lowercase_ : Any = time.time()
lowercase_ : List[Any] = parser.parse_known_args()
lowercase_ : Union[str, Any] = parse_numeric_n_bool_cl_kwargs(a )
if generate_kwargs and args.local_rank <= 0:
print(f"parsed the following generate kwargs: {generate_kwargs}" )
lowercase_ : str = Path(args.save_dir + '_tmp' )
Path(a ).mkdir(exist_ok=a ) # this handles locking.
lowercase_ : List[Any] = list(json_save_dir.glob('rank_*.json' ) )
if intermediate_files:
raise ValueError(f"Found files at {json_save_dir} please move or remove them." )
# In theory, a node could finish and save before another node hits this. If this happens, we can address later.
lowercase_ : Optional[Any] = {}
if args.src_lang is not None:
lowercase_ : Union[str, Any] = args.src_lang
if args.tgt_lang is not None:
lowercase_ : Optional[int] = args.tgt_lang
Path(args.save_dir ).mkdir(exist_ok=a )
lowercase_ : Dict = eval_data_dir(
args.data_dir , a , args.model_name , type_path=args.type_path , bs=args.bs , fpaa=args.fpaa , task=args.task , local_rank=args.local_rank , n_obs=args.n_obs , max_source_length=args.max_source_length , num_return_sequences=args.num_return_sequences , prefix=args.prefix , dataset_kwargs=a , **a , )
if args.local_rank <= 0:
lowercase_ : Optional[Any] = Path(args.save_dir )
save_dir.mkdir(exist_ok=a )
lowercase_ : Tuple = gather_results_from_each_node(a , a , args.sync_timeout )
lowercase_ : Dict = combine_partial_results(a )
if args.num_return_sequences > 1:
lowercase_ : Tuple = save_dir.joinpath('pseudolabel_results.json' )
print(f"Saving aggregated results at {save_path}, intermediate in {json_save_dir}/" )
save_json(a , a )
return
lowercase_ : str = Path(args.data_dir ).joinpath(args.type_path + '.target' )
with open(a ) as f:
lowercase_ : Optional[Any] = [x.rstrip() for x in f.readlines()][: len(a )]
# Calculate metrics, save metrics, and save _generations.txt
lowercase_ : Any = '''translation''' in args.task
lowercase_ : List[str] = calculate_bleu if calc_bleu else calculate_rouge
lowercase_ : Union[str, Any] = '''bleu''' if calc_bleu else '''rouge'''
lowercase_ : Dict = score_fn(a , a )
lowercase_ : int = len(a )
lowercase_ : Union[str, Any] = time.time() - start_time
lowercase_ : List[Any] = round(runtime / metrics['n_obs'] , 4 )
lowercase_ : Union[str, Any] = num_replicas
# TODO(@stas00): add whatever metadata to metrics
lowercase_ : Tuple = save_dir.joinpath(f"{args.type_path}_{metric_name}.json" )
save_json(a , a , indent=a )
print(a )
write_txt_file(a , save_dir.joinpath(f"{args.type_path}_generations.txt" ) )
if args.debug:
write_txt_file(a , save_dir.joinpath(f"{args.type_path}.target" ) )
else:
shutil.rmtree(a )
def _UpperCAmelCase ( a : Dict ) -> str:
"""simple docstring"""
lowercase_ : int = []
for partial_result in partial_results:
records.extend(a )
lowercase_ : List[str] = sorted(a , key=lambda a : x["id"] )
lowercase_ : Optional[Any] = [x['''pred'''] for x in records]
return preds
def _UpperCAmelCase ( a : Optional[Any] , a : Tuple , a : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
lowercase_ : Optional[int] = time.time()
logger.info('waiting for all nodes to finish' )
lowercase_ : str = None
while (time.time() - start_wait) < timeout:
lowercase_ : int = list(save_dir.glob('rank_*.json' ) )
if len(a ) < num_replicas:
continue
try:
# make sure all json files are fully saved
lowercase_ : List[Any] = lmap(a , a )
return json_data
except JSONDecodeError:
continue
else:
raise TimeoutError('Rank 0 gave up on waiting for other processes' )
# Unreachable
if __name__ == "__main__":
# Usage for MT:
run_generate()
| 717 |
'''simple docstring'''
from typing import Dict, Optional
import numpy as np
import datasets
A: int = "\nIoU is the area of overlap between the predicted segmentation and the ground truth divided by the area of union\nbetween the predicted segmentation and the ground truth. For binary (two classes) or multi-class segmentation,\nthe mean IoU of the image is calculated by taking the IoU of each class and averaging them.\n"
A: List[str] = "\nArgs:\n predictions (`List[ndarray]`):\n List of predicted segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n references (`List[ndarray]`):\n List of ground truth segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n num_labels (`int`):\n Number of classes (categories).\n ignore_index (`int`):\n Index that will be ignored during evaluation.\n nan_to_num (`int`, *optional*):\n If specified, NaN values will be replaced by the number defined by the user.\n label_map (`dict`, *optional*):\n If specified, dictionary mapping old label indices to new label indices.\n reduce_labels (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background,\n and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by 255.\n\nReturns:\n `Dict[str, float | ndarray]` comprising various elements:\n - *mean_iou* (`float`):\n Mean Intersection-over-Union (IoU averaged over all categories).\n - *mean_accuracy* (`float`):\n Mean accuracy (averaged over all categories).\n - *overall_accuracy* (`float`):\n Overall accuracy on all images.\n - *per_category_accuracy* (`ndarray` of shape `(num_labels,)`):\n Per category accuracy.\n - *per_category_iou* (`ndarray` of shape `(num_labels,)`):\n Per category IoU.\n\nExamples:\n\n >>> import numpy as np\n\n >>> mean_iou = datasets.load_metric(\"mean_iou\")\n\n >>> # suppose one has 3 different segmentation maps predicted\n >>> predicted_1 = np.array([[1, 2], [3, 4], [5, 255]])\n >>> actual_1 = np.array([[0, 3], [5, 4], [6, 255]])\n\n >>> predicted_2 = np.array([[2, 7], [9, 2], [3, 6]])\n >>> actual_2 = np.array([[1, 7], [9, 2], [3, 6]])\n\n >>> predicted_3 = np.array([[2, 2, 3], [8, 2, 4], [3, 255, 2]])\n >>> actual_3 = np.array([[1, 2, 2], [8, 2, 1], [3, 255, 1]])\n\n >>> predicted = [predicted_1, predicted_2, predicted_3]\n >>> ground_truth = [actual_1, actual_2, actual_3]\n\n >>> results = mean_iou.compute(predictions=predicted, references=ground_truth, num_labels=10, ignore_index=255, reduce_labels=False)\n >>> print(results) # doctest: +NORMALIZE_WHITESPACE\n {'mean_iou': 0.47750000000000004, 'mean_accuracy': 0.5916666666666666, 'overall_accuracy': 0.5263157894736842, 'per_category_iou': array([0. , 0. , 0.375, 0.4 , 0.5 , 0. , 0.5 , 1. , 1. , 1. ]), 'per_category_accuracy': array([0. , 0. , 0.75 , 0.66666667, 1. , 0. , 0.5 , 1. , 1. , 1. ])}\n"
A: Union[str, Any] = "\\n@software{MMSegmentation_Contributors_OpenMMLab_Semantic_Segmentation_2020,\nauthor = {{MMSegmentation Contributors}},\nlicense = {Apache-2.0},\nmonth = {7},\ntitle = {{OpenMMLab Semantic Segmentation Toolbox and Benchmark}},\nurl = {https://github.com/open-mmlab/mmsegmentation},\nyear = {2020}\n}"
def _UpperCAmelCase ( a : str , a : Union[str, Any] , a : Dict , a : bool , a : Optional[Dict[int, int]] = None , a : bool = False , ) -> str:
"""simple docstring"""
if label_map is not None:
for old_id, new_id in label_map.items():
lowercase_ : Union[str, Any] = new_id
# turn into Numpy arrays
lowercase_ : List[Any] = np.array(a )
lowercase_ : Optional[Any] = np.array(a )
if reduce_labels:
lowercase_ : Any = 2_5_5
lowercase_ : Dict = label - 1
lowercase_ : List[Any] = 2_5_5
lowercase_ : Any = label != ignore_index
lowercase_ : List[Any] = np.not_equal(a , a )
lowercase_ : Optional[int] = pred_label[mask]
lowercase_ : Union[str, Any] = np.array(a )[mask]
lowercase_ : Optional[int] = pred_label[pred_label == label]
lowercase_ : Optional[int] = np.histogram(a , bins=a , range=(0, num_labels - 1) )[0]
lowercase_ : Optional[int] = np.histogram(a , bins=a , range=(0, num_labels - 1) )[0]
lowercase_ : Dict = np.histogram(a , bins=a , range=(0, num_labels - 1) )[0]
lowercase_ : Optional[Any] = area_pred_label + area_label - area_intersect
return area_intersect, area_union, area_pred_label, area_label
def _UpperCAmelCase ( a : int , a : Optional[Any] , a : Optional[int] , a : bool , a : Optional[Dict[int, int]] = None , a : bool = False , ) -> Dict:
"""simple docstring"""
lowercase_ : Dict = np.zeros((num_labels,) , dtype=np.floataa )
lowercase_ : List[str] = np.zeros((num_labels,) , dtype=np.floataa )
lowercase_ : List[Any] = np.zeros((num_labels,) , dtype=np.floataa )
lowercase_ : str = np.zeros((num_labels,) , dtype=np.floataa )
for result, gt_seg_map in zip(a , a ):
lowercase_ , lowercase_ , lowercase_ , lowercase_ : Tuple = intersect_and_union(
a , a , a , a , a , a )
total_area_intersect += area_intersect
total_area_union += area_union
total_area_pred_label += area_pred_label
total_area_label += area_label
return total_area_intersect, total_area_union, total_area_pred_label, total_area_label
def _UpperCAmelCase ( a : Optional[Any] , a : List[str] , a : Optional[Any] , a : bool , a : Optional[int] = None , a : Optional[Dict[int, int]] = None , a : bool = False , ) -> Optional[int]:
"""simple docstring"""
lowercase_ , lowercase_ , lowercase_ , lowercase_ : Optional[Any] = total_intersect_and_union(
a , a , a , a , a , a )
# compute metrics
lowercase_ : str = {}
lowercase_ : str = total_area_intersect.sum() / total_area_label.sum()
lowercase_ : Optional[Any] = total_area_intersect / total_area_union
lowercase_ : List[Any] = total_area_intersect / total_area_label
lowercase_ : Any = np.nanmean(a )
lowercase_ : Optional[Any] = np.nanmean(a )
lowercase_ : int = all_acc
lowercase_ : Union[str, Any] = iou
lowercase_ : Optional[Any] = acc
if nan_to_num is not None:
lowercase_ : Optional[int] = {metric: np.nan_to_num(a , nan=a ) for metric, metric_value in metrics.items()}
return metrics
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class __magic_name__ ( datasets.Metric ):
"""simple docstring"""
def lowerCamelCase__ ( self ) -> int:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
# 1st Seq - height dim, 2nd - width dim
{
'predictions': datasets.Sequence(datasets.Sequence(datasets.Value('uint16' ) ) ),
'references': datasets.Sequence(datasets.Sequence(datasets.Value('uint16' ) ) ),
} ) , reference_urls=[
'https://github.com/open-mmlab/mmsegmentation/blob/71c201b1813267d78764f306a297ca717827c4bf/mmseg/core/evaluation/metrics.py'
] , )
def lowerCamelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase = None , _lowercase = None , _lowercase = False , ) -> Tuple:
lowercase_ : Optional[int] = mean_iou(
results=_lowercase , gt_seg_maps=_lowercase , num_labels=_lowercase , ignore_index=_lowercase , nan_to_num=_lowercase , label_map=_lowercase , reduce_labels=_lowercase , )
return iou_result
| 7 | 0 |
'''simple docstring'''
import re
def _UpperCAmelCase ( a : str ) -> List[Any]:
"""simple docstring"""
if len(re.findall('[ATCG]' , snake_case_ ) ) != len(snake_case_ ):
raise ValueError('Invalid Strand' )
return dna.translate(dna.maketrans('ATCG' , 'TAGC' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 718 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A: Dict = logging.get_logger(__name__)
A: Optional[Any] = {
"google/vit-base-patch16-224": "https://huggingface.co/vit-base-patch16-224/resolve/main/config.json",
# See all ViT models at https://huggingface.co/models?filter=vit
}
class __magic_name__ ( UpperCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = 'vit'
def __init__( self , _lowercase=768 , _lowercase=12 , _lowercase=12 , _lowercase=3072 , _lowercase="gelu" , _lowercase=0.0 , _lowercase=0.0 , _lowercase=0.02 , _lowercase=1E-1_2 , _lowercase=224 , _lowercase=16 , _lowercase=3 , _lowercase=True , _lowercase=16 , **_lowercase , ) -> List[str]:
super().__init__(**_lowercase )
lowercase_ : Optional[int] = hidden_size
lowercase_ : str = num_hidden_layers
lowercase_ : str = num_attention_heads
lowercase_ : int = intermediate_size
lowercase_ : List[Any] = hidden_act
lowercase_ : Any = hidden_dropout_prob
lowercase_ : List[str] = attention_probs_dropout_prob
lowercase_ : str = initializer_range
lowercase_ : List[str] = layer_norm_eps
lowercase_ : Any = image_size
lowercase_ : Tuple = patch_size
lowercase_ : Optional[Any] = num_channels
lowercase_ : str = qkv_bias
lowercase_ : List[str] = encoder_stride
class __magic_name__ ( UpperCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = version.parse('1.11' )
@property
def lowerCamelCase__ ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def lowerCamelCase__ ( self ) -> float:
return 1E-4
| 7 | 0 |
import os
import sys
import unittest
A: Any = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import get_test_info # noqa: E402
from get_test_info import ( # noqa: E402
get_model_to_test_mapping,
get_model_to_tester_mapping,
get_test_to_tester_mapping,
)
A: Any = os.path.join("tests", "models", "bert", "test_modeling_bert.py")
A: Optional[int] = os.path.join("tests", "models", "blip", "test_modeling_blip.py")
class __magic_name__ ( unittest.TestCase ):
"""simple docstring"""
def lowerCamelCase__ ( self ) -> Optional[int]:
lowercase_ : Union[str, Any] = get_test_to_tester_mapping(_lowercase )
lowercase_ : List[str] = get_test_to_tester_mapping(_lowercase )
lowercase_ : List[str] = {'BertModelTest': 'BertModelTester'}
lowercase_ : Dict = {
'BlipModelTest': 'BlipModelTester',
'BlipTextImageModelTest': 'BlipTextImageModelsModelTester',
'BlipTextModelTest': 'BlipTextModelTester',
'BlipTextRetrievalModelTest': 'BlipTextRetrievalModelTester',
'BlipVQAModelTest': 'BlipVQAModelTester',
'BlipVisionModelTest': 'BlipVisionModelTester',
}
self.assertEqual(get_test_info.to_json(_lowercase ) , _lowercase )
self.assertEqual(get_test_info.to_json(_lowercase ) , _lowercase )
def lowerCamelCase__ ( self ) -> Dict:
lowercase_ : Dict = get_model_to_test_mapping(_lowercase )
lowercase_ : Union[str, Any] = get_model_to_test_mapping(_lowercase )
lowercase_ : Optional[int] = {
'BertForMaskedLM': ['BertModelTest'],
'BertForMultipleChoice': ['BertModelTest'],
'BertForNextSentencePrediction': ['BertModelTest'],
'BertForPreTraining': ['BertModelTest'],
'BertForQuestionAnswering': ['BertModelTest'],
'BertForSequenceClassification': ['BertModelTest'],
'BertForTokenClassification': ['BertModelTest'],
'BertLMHeadModel': ['BertModelTest'],
'BertModel': ['BertModelTest'],
}
lowercase_ : str = {
'BlipForConditionalGeneration': ['BlipTextImageModelTest'],
'BlipForImageTextRetrieval': ['BlipTextRetrievalModelTest'],
'BlipForQuestionAnswering': ['BlipVQAModelTest'],
'BlipModel': ['BlipModelTest'],
'BlipTextModel': ['BlipTextModelTest'],
'BlipVisionModel': ['BlipVisionModelTest'],
}
self.assertEqual(get_test_info.to_json(_lowercase ) , _lowercase )
self.assertEqual(get_test_info.to_json(_lowercase ) , _lowercase )
def lowerCamelCase__ ( self ) -> Optional[int]:
lowercase_ : List[str] = get_model_to_tester_mapping(_lowercase )
lowercase_ : Union[str, Any] = get_model_to_tester_mapping(_lowercase )
lowercase_ : Union[str, Any] = {
'BertForMaskedLM': ['BertModelTester'],
'BertForMultipleChoice': ['BertModelTester'],
'BertForNextSentencePrediction': ['BertModelTester'],
'BertForPreTraining': ['BertModelTester'],
'BertForQuestionAnswering': ['BertModelTester'],
'BertForSequenceClassification': ['BertModelTester'],
'BertForTokenClassification': ['BertModelTester'],
'BertLMHeadModel': ['BertModelTester'],
'BertModel': ['BertModelTester'],
}
lowercase_ : Optional[Any] = {
'BlipForConditionalGeneration': ['BlipTextImageModelsModelTester'],
'BlipForImageTextRetrieval': ['BlipTextRetrievalModelTester'],
'BlipForQuestionAnswering': ['BlipVQAModelTester'],
'BlipModel': ['BlipModelTester'],
'BlipTextModel': ['BlipTextModelTester'],
'BlipVisionModel': ['BlipVisionModelTester'],
}
self.assertEqual(get_test_info.to_json(_lowercase ) , _lowercase )
self.assertEqual(get_test_info.to_json(_lowercase ) , _lowercase )
| 719 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A: int = logging.get_logger(__name__)
A: int = {
"bigcode/gpt_bigcode-santacoder": "https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json",
}
class __magic_name__ ( UpperCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = 'gpt_bigcode'
SCREAMING_SNAKE_CASE_ : int = ['past_key_values']
SCREAMING_SNAKE_CASE_ : Any = {
'hidden_size': 'n_embd',
'max_position_embeddings': 'n_positions',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self , _lowercase=5_0257 , _lowercase=1024 , _lowercase=768 , _lowercase=12 , _lowercase=12 , _lowercase=None , _lowercase="gelu_pytorch_tanh" , _lowercase=0.1 , _lowercase=0.1 , _lowercase=0.1 , _lowercase=1E-5 , _lowercase=0.02 , _lowercase=True , _lowercase=True , _lowercase=5_0256 , _lowercase=5_0256 , _lowercase=True , _lowercase=True , _lowercase=True , **_lowercase , ) -> Any:
lowercase_ : Tuple = vocab_size
lowercase_ : str = n_positions
lowercase_ : List[str] = n_embd
lowercase_ : str = n_layer
lowercase_ : Optional[Any] = n_head
lowercase_ : Optional[int] = n_inner
lowercase_ : Union[str, Any] = activation_function
lowercase_ : Dict = resid_pdrop
lowercase_ : str = embd_pdrop
lowercase_ : Optional[Any] = attn_pdrop
lowercase_ : List[Any] = layer_norm_epsilon
lowercase_ : Optional[int] = initializer_range
lowercase_ : List[Any] = scale_attn_weights
lowercase_ : Any = use_cache
lowercase_ : List[str] = attention_softmax_in_fpaa
lowercase_ : Any = scale_attention_softmax_in_fpaa
lowercase_ : Optional[Any] = multi_query
lowercase_ : Optional[Any] = bos_token_id
lowercase_ : Optional[Any] = eos_token_id
super().__init__(bos_token_id=_lowercase , eos_token_id=_lowercase , **_lowercase )
| 7 | 0 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_mobilevit import MobileViTImageProcessor
A: Any = logging.get_logger(__name__)
class __magic_name__ ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self , *_lowercase , **_lowercase ) -> None:
warnings.warn(
'The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use MobileViTImageProcessor instead.' , _lowercase , )
super().__init__(*_lowercase , **_lowercase )
| 720 |
'''simple docstring'''
import unittest
from diffusers.pipelines.pipeline_utils import is_safetensors_compatible
class __magic_name__ ( unittest.TestCase ):
"""simple docstring"""
def lowerCamelCase__ ( self ) -> Optional[Any]:
lowercase_ : Tuple = [
'safety_checker/pytorch_model.bin',
'safety_checker/model.safetensors',
'vae/diffusion_pytorch_model.bin',
'vae/diffusion_pytorch_model.safetensors',
'text_encoder/pytorch_model.bin',
'text_encoder/model.safetensors',
'unet/diffusion_pytorch_model.bin',
'unet/diffusion_pytorch_model.safetensors',
]
self.assertTrue(is_safetensors_compatible(_lowercase ) )
def lowerCamelCase__ ( self ) -> Dict:
lowercase_ : Tuple = [
'unet/diffusion_pytorch_model.bin',
'unet/diffusion_pytorch_model.safetensors',
]
self.assertTrue(is_safetensors_compatible(_lowercase ) )
def lowerCamelCase__ ( self ) -> str:
lowercase_ : int = [
'safety_checker/pytorch_model.bin',
'safety_checker/model.safetensors',
'vae/diffusion_pytorch_model.bin',
'vae/diffusion_pytorch_model.safetensors',
'text_encoder/pytorch_model.bin',
'text_encoder/model.safetensors',
'unet/diffusion_pytorch_model.bin',
# Removed: 'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(_lowercase ) )
def lowerCamelCase__ ( self ) -> Any:
lowercase_ : Dict = [
'text_encoder/pytorch_model.bin',
'text_encoder/model.safetensors',
]
self.assertTrue(is_safetensors_compatible(_lowercase ) )
def lowerCamelCase__ ( self ) -> Optional[Any]:
lowercase_ : List[Any] = [
'safety_checker/pytorch_model.bin',
'safety_checker/model.safetensors',
'vae/diffusion_pytorch_model.bin',
'vae/diffusion_pytorch_model.safetensors',
'text_encoder/pytorch_model.bin',
# Removed: 'text_encoder/model.safetensors',
'unet/diffusion_pytorch_model.bin',
'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(_lowercase ) )
def lowerCamelCase__ ( self ) -> Optional[int]:
lowercase_ : str = [
'safety_checker/pytorch_model.fp16.bin',
'safety_checker/model.fp16.safetensors',
'vae/diffusion_pytorch_model.fp16.bin',
'vae/diffusion_pytorch_model.fp16.safetensors',
'text_encoder/pytorch_model.fp16.bin',
'text_encoder/model.fp16.safetensors',
'unet/diffusion_pytorch_model.fp16.bin',
'unet/diffusion_pytorch_model.fp16.safetensors',
]
lowercase_ : Union[str, Any] = 'fp16'
self.assertTrue(is_safetensors_compatible(_lowercase , variant=_lowercase ) )
def lowerCamelCase__ ( self ) -> int:
lowercase_ : Optional[int] = [
'unet/diffusion_pytorch_model.fp16.bin',
'unet/diffusion_pytorch_model.fp16.safetensors',
]
lowercase_ : Dict = 'fp16'
self.assertTrue(is_safetensors_compatible(_lowercase , variant=_lowercase ) )
def lowerCamelCase__ ( self ) -> int:
# pass variant but use the non-variant filenames
lowercase_ : Optional[int] = [
'unet/diffusion_pytorch_model.bin',
'unet/diffusion_pytorch_model.safetensors',
]
lowercase_ : Dict = 'fp16'
self.assertTrue(is_safetensors_compatible(_lowercase , variant=_lowercase ) )
def lowerCamelCase__ ( self ) -> Union[str, Any]:
lowercase_ : int = [
'safety_checker/pytorch_model.fp16.bin',
'safety_checker/model.fp16.safetensors',
'vae/diffusion_pytorch_model.fp16.bin',
'vae/diffusion_pytorch_model.fp16.safetensors',
'text_encoder/pytorch_model.fp16.bin',
'text_encoder/model.fp16.safetensors',
'unet/diffusion_pytorch_model.fp16.bin',
# Removed: 'unet/diffusion_pytorch_model.fp16.safetensors',
]
lowercase_ : Dict = 'fp16'
self.assertFalse(is_safetensors_compatible(_lowercase , variant=_lowercase ) )
def lowerCamelCase__ ( self ) -> int:
lowercase_ : str = [
'text_encoder/pytorch_model.fp16.bin',
'text_encoder/model.fp16.safetensors',
]
lowercase_ : str = 'fp16'
self.assertTrue(is_safetensors_compatible(_lowercase , variant=_lowercase ) )
def lowerCamelCase__ ( self ) -> List[str]:
# pass variant but use the non-variant filenames
lowercase_ : List[Any] = [
'text_encoder/pytorch_model.bin',
'text_encoder/model.safetensors',
]
lowercase_ : Dict = 'fp16'
self.assertTrue(is_safetensors_compatible(_lowercase , variant=_lowercase ) )
def lowerCamelCase__ ( self ) -> List[str]:
lowercase_ : Union[str, Any] = [
'safety_checker/pytorch_model.fp16.bin',
'safety_checker/model.fp16.safetensors',
'vae/diffusion_pytorch_model.fp16.bin',
'vae/diffusion_pytorch_model.fp16.safetensors',
'text_encoder/pytorch_model.fp16.bin',
# 'text_encoder/model.fp16.safetensors',
'unet/diffusion_pytorch_model.fp16.bin',
'unet/diffusion_pytorch_model.fp16.safetensors',
]
lowercase_ : Dict = 'fp16'
self.assertFalse(is_safetensors_compatible(_lowercase , variant=_lowercase ) )
| 7 | 0 |
'''simple docstring'''
import numpy
class __magic_name__ :
"""simple docstring"""
def __init__( self , _lowercase , _lowercase ) -> None:
lowercase_ : Optional[int] = input_array
# Random initial weights are assigned where first argument is the
# number of nodes in previous layer and second argument is the
# number of nodes in the next layer.
# Random initial weights are assigned.
# self.input_array.shape[1] is used to represent number of nodes in input layer.
# First hidden layer consists of 4 nodes.
lowercase_ : Union[str, Any] = numpy.random.rand(
self.input_array.shape[1] , 4 )
# Random initial values for the first hidden layer.
# First hidden layer has 4 nodes.
# Second hidden layer has 3 nodes.
lowercase_ : List[Any] = numpy.random.rand(
4 , 3 )
# Random initial values for the second hidden layer.
# Second hidden layer has 3 nodes.
# Output layer has 1 node.
lowercase_ : Tuple = numpy.random.rand(3 , 1 )
# Real output values provided.
lowercase_ : Any = output_array
# Predicted output values by the neural network.
# Predicted_output array initially consists of zeroes.
lowercase_ : Union[str, Any] = numpy.zeros(output_array.shape )
def lowerCamelCase__ ( self ) -> numpy.ndarray:
lowercase_ : List[str] = sigmoid(
numpy.dot(self.input_array , self.input_layer_and_first_hidden_layer_weights ) )
# layer_between_first_hidden_layer_and_second_hidden_layer is the layer
# connecting the first hidden set of nodes with the second hidden set of nodes.
lowercase_ : Optional[Any] = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
# layer_between_second_hidden_layer_and_output is the layer connecting
# second hidden layer with the output node.
lowercase_ : str = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return self.layer_between_second_hidden_layer_and_output
def lowerCamelCase__ ( self ) -> None:
lowercase_ : Union[str, Any] = numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer.T , 2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , )
lowercase_ : Optional[Any] = numpy.dot(
self.layer_between_input_and_first_hidden_layer.T , numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , )
lowercase_ : Tuple = numpy.dot(
self.input_array.T , numpy.dot(
numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , self.first_hidden_layer_and_second_hidden_layer_weights.T , )
* sigmoid_derivative(self.layer_between_input_and_first_hidden_layer ) , )
self.input_layer_and_first_hidden_layer_weights += (
updated_input_layer_and_first_hidden_layer_weights
)
self.first_hidden_layer_and_second_hidden_layer_weights += (
updated_first_hidden_layer_and_second_hidden_layer_weights
)
self.second_hidden_layer_and_output_layer_weights += (
updated_second_hidden_layer_and_output_layer_weights
)
def lowerCamelCase__ ( self , _lowercase , _lowercase , _lowercase ) -> None:
for iteration in range(1 , iterations + 1 ):
lowercase_ : Optional[Any] = self.feedforward()
self.back_propagation()
if give_loss:
lowercase_ : str = numpy.mean(numpy.square(output - self.feedforward() ) )
print(f"Iteration {iteration} Loss: {loss}" )
def lowerCamelCase__ ( self , _lowercase ) -> int:
lowercase_ : List[str] = input_arr
lowercase_ : Any = sigmoid(
numpy.dot(self.array , self.input_layer_and_first_hidden_layer_weights ) )
lowercase_ : int = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
lowercase_ : List[str] = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return int(self.layer_between_second_hidden_layer_and_output > 0.6 )
def _UpperCAmelCase ( a : numpy.ndarray ) -> str:
"""simple docstring"""
return 1 / (1 + numpy.exp(-value ))
def _UpperCAmelCase ( a : numpy.ndarray ) -> List[str]:
"""simple docstring"""
return (value) * (1 - (value))
def _UpperCAmelCase ( ) -> Any:
"""simple docstring"""
lowercase_ : str = numpy.array(
(
[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
[1, 1, 1],
) , dtype=numpy.floataa , )
# True output values for the given input values.
lowercase_ : Union[str, Any] = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) , dtype=numpy.floataa )
# Calling neural network class.
lowercase_ : Union[str, Any] = TwoHiddenLayerNeuralNetwork(
input_array=a , output_array=a )
# Calling training function.
# Set give_loss to True if you want to see loss in every iteration.
neural_network.train(output=a , iterations=1_0 , give_loss=a )
return neural_network.predict(numpy.array(([1, 1, 1]) , dtype=numpy.floataa ) )
if __name__ == "__main__":
example()
| 721 |
'''simple docstring'''
import argparse
A: List[Any] = "docs/source/_static/js/custom.js"
def _UpperCAmelCase ( a : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
with open(a , encoding='utf-8' , newline='\n' ) as f:
lowercase_ : List[Any] = f.readlines()
lowercase_ : Dict = 0
# First let's put the right version
while not lines[index].startswith('const stableVersion =' ):
index += 1
lowercase_ : Dict = f"const stableVersion = \"v{version}\"\n"
# Then update the dictionary
while not lines[index].startswith('const versionMapping = {' ):
index += 1
# We go until the end
while not lines[index].startswith('}' ):
index += 1
# We add the new version at the end
lines[index - 1] += f" \"v{version}\": \"v{version}\",\n"
with open(a , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.writelines(a )
if __name__ == "__main__":
A: str = argparse.ArgumentParser()
parser.add_argument("--version", help="Release version.")
A: List[str] = parser.parse_args()
update_custom_js(args.version)
| 7 | 0 |
'''simple docstring'''
import itertools
import os
from collections import Counter, defaultdict
from concurrent.futures import ThreadPoolExecutor, as_completed
import numpy as np
import datasets
from .execute import check_correctness
A: Any = "\\n@misc{chen2021evaluating,\n title={Evaluating Large Language Models Trained on Code},\n author={Mark Chen and Jerry Tworek and Heewoo Jun and Qiming Yuan \\nand Henrique Ponde de Oliveira Pinto and Jared Kaplan and Harri Edwards \\nand Yuri Burda and Nicholas Joseph and Greg Brockman and Alex Ray \\nand Raul Puri and Gretchen Krueger and Michael Petrov and Heidy Khlaaf \\nand Girish Sastry and Pamela Mishkin and Brooke Chan and Scott Gray \\nand Nick Ryder and Mikhail Pavlov and Alethea Power and Lukasz Kaiser \\nand Mohammad Bavarian and Clemens Winter and Philippe Tillet \\nand Felipe Petroski Such and Dave Cummings and Matthias Plappert \\nand Fotios Chantzis and Elizabeth Barnes and Ariel Herbert-Voss \\nand William Hebgen Guss and Alex Nichol and Alex Paino and Nikolas Tezak \\nand Jie Tang and Igor Babuschkin and Suchir Balaji and Shantanu Jain \\nand William Saunders and Christopher Hesse and Andrew N. Carr \\nand Jan Leike and Josh Achiam and Vedant Misra and Evan Morikawa \\nand Alec Radford and Matthew Knight and Miles Brundage and Mira Murati \\nand Katie Mayer and Peter Welinder and Bob McGrew and Dario Amodei \\nand Sam McCandlish and Ilya Sutskever and Wojciech Zaremba},\n year={2021},\n eprint={2107.03374},\n archivePrefix={arXiv},\n primaryClass={cs.LG}\n}\n"
A: Union[str, Any] = "\\nThis metric implements the evaluation harness for the HumanEval problem solving dataset\ndescribed in the paper \"Evaluating Large Language Models Trained on Code\"\n(https://arxiv.org/abs/2107.03374).\n"
A: Union[str, Any] = "\nCalculates how good are predictions given some references, using certain scores\nArgs:\n predictions: list of candidates to evaluate. Each candidates should be a list\n of strings with several code candidates to solve the problem.\n references: a list with a test for each prediction. Each test should evaluate the\n correctness of a code candidate.\n k: number of code candidates to consider in the evaluation (Default: [1, 10, 100])\n num_workers: number of workers used to evaluate the canidate programs (Default: 4).\n timeout:\nReturns:\n pass_at_k: dict with pass rates for each k\n results: dict with granular results of each unittest\nExamples:\n >>> code_eval = datasets.load_metric(\"code_eval\")\n >>> test_cases = [\"assert add(2,3)==5\"]\n >>> candidates = [[\"def add(a,b): return a*b\", \"def add(a, b): return a+b\"]]\n >>> pass_at_k, results = code_eval.compute(references=test_cases, predictions=candidates, k=[1, 2])\n >>> print(pass_at_k)\n {'pass@1': 0.5, 'pass@2': 1.0}\n"
A: Any = "\n################################################################################\n !!!WARNING!!!\n################################################################################\nThe \"code_eval\" metric executes untrusted model-generated code in Python.\nAlthough it is highly unlikely that model-generated code will do something\novertly malicious in response to this test suite, model-generated code may act\ndestructively due to a lack of model capability or alignment.\nUsers are strongly encouraged to sandbox this evaluation suite so that it\ndoes not perform destructive actions on their host or network. For more\ninformation on how OpenAI sandboxes its code, see the paper \"Evaluating Large\nLanguage Models Trained on Code\" (https://arxiv.org/abs/2107.03374).\n\nOnce you have read this disclaimer and taken appropriate precautions,\nset the environment variable HF_ALLOW_CODE_EVAL=\"1\". Within Python you can to this\nwith:\n\n>>> import os\n>>> os.environ[\"HF_ALLOW_CODE_EVAL\"] = \"1\"\n\n################################################################################\\n"
A: Union[str, Any] = "The MIT License\n\nCopyright (c) OpenAI (https://openai.com)\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE."
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class __magic_name__ ( datasets.Metric ):
"""simple docstring"""
def lowerCamelCase__ ( self ) -> Optional[Any]:
return datasets.MetricInfo(
# This is the description that will appear on the metrics page.
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('string' ) ),
'references': datasets.Value('string' ),
} ) , homepage='https://github.com/openai/human-eval' , codebase_urls=['https://github.com/openai/human-eval'] , reference_urls=['https://github.com/openai/human-eval'] , license=_LICENSE , )
def lowerCamelCase__ ( self , _lowercase , _lowercase , _lowercase=[1, 10, 100] , _lowercase=4 , _lowercase=3.0 ) -> List[Any]:
if os.getenv('HF_ALLOW_CODE_EVAL' , 0 ) != "1":
raise ValueError(_WARNING )
if os.name == "nt":
raise NotImplementedError('This metric is currently not supported on Windows.' )
with ThreadPoolExecutor(max_workers=_lowercase ) as executor:
lowercase_ : List[Any] = []
lowercase_ : Union[str, Any] = Counter()
lowercase_ : List[Any] = 0
lowercase_ : Any = defaultdict(_lowercase )
for task_id, (candidates, test_case) in enumerate(zip(_lowercase , _lowercase ) ):
for candidate in candidates:
lowercase_ : str = candidate + '\n' + test_case
lowercase_ : Tuple = (test_program, timeout, task_id, completion_id[task_id])
lowercase_ : List[Any] = executor.submit(_lowercase , *_lowercase )
futures.append(_lowercase )
completion_id[task_id] += 1
n_samples += 1
for future in as_completed(_lowercase ):
lowercase_ : Optional[Any] = future.result()
results[result["task_id"]].append((result['completion_id'], result) )
lowercase_ : Optional[Any] = [], []
for result in results.values():
result.sort()
lowercase_ : Dict = [r[1]['passed'] for r in result]
total.append(len(_lowercase ) )
correct.append(sum(_lowercase ) )
lowercase_ : Optional[Any] = np.array(_lowercase )
lowercase_ : List[Any] = np.array(_lowercase )
lowercase_ : List[str] = k
lowercase_ : Dict = {f"pass@{k}": estimate_pass_at_k(_lowercase , _lowercase , _lowercase ).mean() for k in ks if (total >= k).all()}
return pass_at_k, results
def _UpperCAmelCase ( a : Union[str, Any] , a : Union[str, Any] , a : Any ) -> Any:
"""simple docstring"""
def estimator(a : int , a : int , a : int ) -> float:
if n - c < k:
return 1.0
return 1.0 - np.prod(1.0 - k / np.arange(n - c + 1 , n + 1 ) )
if isinstance(a , a ):
lowercase_ : Optional[Any] = itertools.repeat(a , len(a ) )
else:
assert len(a ) == len(a )
lowercase_ : Tuple = iter(a )
return np.array([estimator(int(a ) , int(a ) , a ) for n, c in zip(a , a )] )
| 700 |
'''simple docstring'''
def _UpperCAmelCase ( a : list[list[float]] ) -> list[list[float]]:
"""simple docstring"""
lowercase_ : list[list[float]] = []
for data in source_data:
for i, el in enumerate(a ):
if len(a ) < i + 1:
data_lists.append([] )
data_lists[i].append(float(a ) )
return data_lists
def _UpperCAmelCase ( a : list[list[float]] , a : list[int] ) -> list[list[float]]:
"""simple docstring"""
lowercase_ : list[list[float]] = []
for dlist, weight in zip(a , a ):
lowercase_ : Tuple = min(a )
lowercase_ : Any = max(a )
lowercase_ : list[float] = []
# for weight 0 score is 1 - actual score
if weight == 0:
for item in dlist:
try:
score.append(1 - ((item - mind) / (maxd - mind)) )
except ZeroDivisionError:
score.append(1 )
elif weight == 1:
for item in dlist:
try:
score.append((item - mind) / (maxd - mind) )
except ZeroDivisionError:
score.append(0 )
# weight not 0 or 1
else:
lowercase_ : str = f"Invalid weight of {weight:f} provided"
raise ValueError(a )
score_lists.append(a )
return score_lists
def _UpperCAmelCase ( a : list[list[float]] ) -> list[float]:
"""simple docstring"""
lowercase_ : list[float] = [0 for i in range(len(score_lists[0] ) )]
for slist in score_lists:
for j, ele in enumerate(a ):
lowercase_ : List[Any] = final_scores[j] + ele
return final_scores
def _UpperCAmelCase ( a : list[list[float]] , a : list[int] ) -> list[list[float]]:
"""simple docstring"""
lowercase_ : int = get_data(a )
lowercase_ : Optional[int] = calculate_each_score(a , a )
lowercase_ : Dict = generate_final_scores(a )
# append scores to source data
for i, ele in enumerate(a ):
source_data[i].append(a )
return source_data
| 7 | 0 |
'''simple docstring'''
def _UpperCAmelCase ( a : list ) -> list:
"""simple docstring"""
lowercase_ : int = False
while is_sorted is False: # Until all the indices are traversed keep looping
lowercase_ : List[str] = True
for i in range(0 , len(a ) - 1 , 2 ): # iterating over all even indices
if input_list[i] > input_list[i + 1]:
lowercase_ : Union[str, Any] = input_list[i + 1], input_list[i]
# swapping if elements not in order
lowercase_ : List[Any] = False
for i in range(1 , len(a ) - 1 , 2 ): # iterating over all odd indices
if input_list[i] > input_list[i + 1]:
lowercase_ : List[Any] = input_list[i + 1], input_list[i]
# swapping if elements not in order
lowercase_ : Dict = False
return input_list
if __name__ == "__main__":
print("Enter list to be sorted")
A: Optional[Any] = [int(x) for x in input().split()]
# inputing elements of the list in one line
A: int = odd_even_sort(input_list)
print("The sorted list is")
print(sorted_list)
| 701 |
'''simple docstring'''
def _UpperCAmelCase ( a : int , a : int ) -> int:
"""simple docstring"""
while second != 0:
lowercase_ : Any = first & second
first ^= second
lowercase_ : List[str] = c << 1
return first
if __name__ == "__main__":
import doctest
doctest.testmod()
A: Union[str, Any] = int(input("Enter the first number: ").strip())
A: Union[str, Any] = int(input("Enter the second number: ").strip())
print(f"""{add(first, second) = }""")
| 7 | 0 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
A: List[str] = logging.get_logger(__name__)
def _UpperCAmelCase ( a : str , a : str ) -> Optional[Any]:
"""simple docstring"""
lowercase_ : Dict = b.T
lowercase_ : str = np.sum(np.square(a ) , axis=1 )
lowercase_ : Optional[Any] = np.sum(np.square(a ) , axis=0 )
lowercase_ : Optional[int] = np.matmul(a , a )
lowercase_ : Any = aa[:, None] - 2 * ab + ba[None, :]
return d
def _UpperCAmelCase ( a : Dict , a : Tuple ) -> Union[str, Any]:
"""simple docstring"""
lowercase_ : List[Any] = x.reshape(-1 , 3 )
lowercase_ : Union[str, Any] = squared_euclidean_distance(a , a )
return np.argmin(a , axis=1 )
class __magic_name__ ( UpperCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = ['pixel_values']
def __init__( self , _lowercase = None , _lowercase = True , _lowercase = None , _lowercase = PILImageResampling.BILINEAR , _lowercase = True , _lowercase = True , **_lowercase , ) -> None:
super().__init__(**_lowercase )
lowercase_ : Optional[Any] = size if size is not None else {'height': 256, 'width': 256}
lowercase_ : Optional[Any] = get_size_dict(_lowercase )
lowercase_ : List[str] = np.array(_lowercase ) if clusters is not None else None
lowercase_ : Dict = do_resize
lowercase_ : Dict = size
lowercase_ : Optional[int] = resample
lowercase_ : Dict = do_normalize
lowercase_ : Tuple = do_color_quantize
def lowerCamelCase__ ( self , _lowercase , _lowercase , _lowercase = PILImageResampling.BILINEAR , _lowercase = None , **_lowercase , ) -> np.ndarray:
lowercase_ : List[Any] = get_size_dict(_lowercase )
if "height" not in size or "width" not in size:
raise ValueError(f"Size dictionary must contain both height and width keys. Got {size.keys()}" )
return resize(
_lowercase , size=(size['height'], size['width']) , resample=_lowercase , data_format=_lowercase , **_lowercase )
def lowerCamelCase__ ( self , _lowercase , _lowercase = None , ) -> np.ndarray:
lowercase_ : Optional[Any] = rescale(image=_lowercase , scale=1 / 127.5 , data_format=_lowercase )
lowercase_ : Optional[Any] = image - 1
return image
def lowerCamelCase__ ( self , _lowercase , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = ChannelDimension.FIRST , **_lowercase , ) -> PIL.Image.Image:
lowercase_ : Optional[Any] = do_resize if do_resize is not None else self.do_resize
lowercase_ : Optional[Any] = size if size is not None else self.size
lowercase_ : Any = get_size_dict(_lowercase )
lowercase_ : Any = resample if resample is not None else self.resample
lowercase_ : Tuple = do_normalize if do_normalize is not None else self.do_normalize
lowercase_ : Tuple = do_color_quantize if do_color_quantize is not None else self.do_color_quantize
lowercase_ : Union[str, Any] = clusters if clusters is not None else self.clusters
lowercase_ : Dict = np.array(_lowercase )
lowercase_ : str = make_list_of_images(_lowercase )
if not valid_images(_lowercase ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_color_quantize and clusters is None:
raise ValueError('Clusters must be specified if do_color_quantize is True.' )
# All transformations expect numpy arrays.
lowercase_ : Tuple = [to_numpy_array(_lowercase ) for image in images]
if do_resize:
lowercase_ : Dict = [self.resize(image=_lowercase , size=_lowercase , resample=_lowercase ) for image in images]
if do_normalize:
lowercase_ : str = [self.normalize(image=_lowercase ) for image in images]
if do_color_quantize:
lowercase_ : Tuple = [to_channel_dimension_format(_lowercase , ChannelDimension.LAST ) for image in images]
# color quantize from (batch_size, height, width, 3) to (batch_size, height, width)
lowercase_ : List[Any] = np.array(_lowercase )
lowercase_ : Optional[int] = color_quantize(_lowercase , _lowercase ).reshape(images.shape[:-1] )
# flatten to (batch_size, height*width)
lowercase_ : Optional[int] = images.shape[0]
lowercase_ : Optional[Any] = images.reshape(_lowercase , -1 )
# We need to convert back to a list of images to keep consistent behaviour across processors.
lowercase_ : List[str] = list(_lowercase )
else:
lowercase_ : List[str] = [to_channel_dimension_format(_lowercase , _lowercase ) for image in images]
lowercase_ : int = {'input_ids': images}
return BatchFeature(data=_lowercase , tensor_type=_lowercase )
| 702 |
'''simple docstring'''
class __magic_name__ :
"""simple docstring"""
def __init__( self , _lowercase ) -> Union[str, Any]:
lowercase_ : Dict = n
lowercase_ : Dict = [None] * self.n
lowercase_ : Tuple = 0 # index of the first element
lowercase_ : List[Any] = 0
lowercase_ : List[Any] = 0
def __len__( self ) -> int:
return self.size
def lowerCamelCase__ ( self ) -> bool:
return self.size == 0
def lowerCamelCase__ ( self ) -> List[Any]:
return False if self.is_empty() else self.array[self.front]
def lowerCamelCase__ ( self , _lowercase ) -> Any:
if self.size >= self.n:
raise Exception('QUEUE IS FULL' )
lowercase_ : Tuple = data
lowercase_ : List[Any] = (self.rear + 1) % self.n
self.size += 1
return self
def lowerCamelCase__ ( self ) -> Any:
if self.size == 0:
raise Exception('UNDERFLOW' )
lowercase_ : Dict = self.array[self.front]
lowercase_ : Tuple = None
lowercase_ : int = (self.front + 1) % self.n
self.size -= 1
return temp
| 7 | 0 |
'''simple docstring'''
from math import factorial, pi
def _UpperCAmelCase ( a : float , a : int = 3_0 ) -> float:
"""simple docstring"""
if not isinstance(a , (int, float) ):
raise ValueError('maclaurin_sin() requires either an int or float for theta' )
if not isinstance(a , a ) or accuracy <= 0:
raise ValueError('maclaurin_sin() requires a positive int for accuracy' )
lowercase_ : Dict = float(a )
lowercase_ : Dict = theta // (2 * pi)
theta -= 2 * div * pi
return sum(
(-1) ** r * theta ** (2 * r + 1) / factorial(2 * r + 1 ) for r in range(a ) )
def _UpperCAmelCase ( a : float , a : int = 3_0 ) -> float:
"""simple docstring"""
if not isinstance(a , (int, float) ):
raise ValueError('maclaurin_cos() requires either an int or float for theta' )
if not isinstance(a , a ) or accuracy <= 0:
raise ValueError('maclaurin_cos() requires a positive int for accuracy' )
lowercase_ : int = float(a )
lowercase_ : int = theta // (2 * pi)
theta -= 2 * div * pi
return sum((-1) ** r * theta ** (2 * r) / factorial(2 * r ) for r in range(a ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(maclaurin_sin(1_0))
print(maclaurin_sin(-1_0))
print(maclaurin_sin(1_0, 1_5))
print(maclaurin_sin(-1_0, 1_5))
print(maclaurin_cos(5))
print(maclaurin_cos(-5))
print(maclaurin_cos(1_0, 1_5))
print(maclaurin_cos(-1_0, 1_5))
| 703 |
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
A: List[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
A: Union[str, Any] = "\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline\n >>> from diffusers.utils import load_image\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... \"kandinsky-community/kandinsky-2-2-prior\", torch_dtype=torch.float16\n ... )\n >>> pipe_prior.to(\"cuda\")\n\n >>> prompt = \"A red cartoon frog, 4k\"\n >>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False)\n\n >>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained(\n ... \"kandinsky-community/kandinsky-2-2-decoder\", torch_dtype=torch.float16\n ... )\n >>> pipe.to(\"cuda\")\n\n >>> init_image = load_image(\n ... \"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main\"\n ... \"/kandinsky/frog.png\"\n ... )\n\n >>> image = pipe(\n ... image=init_image,\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=100,\n ... strength=0.2,\n ... ).images\n\n >>> image[0].save(\"red_frog.png\")\n ```\n"
def _UpperCAmelCase ( a : Tuple , a : Union[str, Any] , a : List[Any]=8 ) -> Dict:
"""simple docstring"""
lowercase_ : List[Any] = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
lowercase_ : List[str] = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
def _UpperCAmelCase ( a : Any , a : Dict=5_1_2 , a : Optional[Any]=5_1_2 ) -> Tuple:
"""simple docstring"""
lowercase_ : int = pil_image.resize((w, h) , resample=Image.BICUBIC , reducing_gap=1 )
lowercase_ : int = np.array(pil_image.convert('RGB' ) )
lowercase_ : Optional[int] = arr.astype(np.floataa ) / 1_27.5 - 1
lowercase_ : Any = np.transpose(a , [2, 0, 1] )
lowercase_ : Any = torch.from_numpy(a ).unsqueeze(0 )
return image
class __magic_name__ ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self , _lowercase , _lowercase , _lowercase , ) -> List[Any]:
super().__init__()
self.register_modules(
unet=_lowercase , scheduler=_lowercase , movq=_lowercase , )
lowercase_ : Dict = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def lowerCamelCase__ ( self , _lowercase , _lowercase , _lowercase ) -> int:
# get the original timestep using init_timestep
lowercase_ : List[Any] = min(int(num_inference_steps * strength ) , _lowercase )
lowercase_ : Tuple = max(num_inference_steps - init_timestep , 0 )
lowercase_ : Optional[Any] = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def lowerCamelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase=None ) -> Any:
if not isinstance(_lowercase , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(_lowercase )}" )
lowercase_ : Dict = image.to(device=_lowercase , dtype=_lowercase )
lowercase_ : Dict = batch_size * num_images_per_prompt
if image.shape[1] == 4:
lowercase_ : str = image
else:
if isinstance(_lowercase , _lowercase ) and len(_lowercase ) != batch_size:
raise ValueError(
f"You have passed a list of generators of length {len(_lowercase )}, but requested an effective batch"
f" size of {batch_size}. Make sure the batch size matches the length of the generators." )
elif isinstance(_lowercase , _lowercase ):
lowercase_ : List[Any] = [
self.movq.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(_lowercase )
]
lowercase_ : Union[str, Any] = torch.cat(_lowercase , dim=0 )
else:
lowercase_ : Union[str, Any] = self.movq.encode(_lowercase ).latent_dist.sample(_lowercase )
lowercase_ : str = self.movq.config.scaling_factor * init_latents
lowercase_ : int = torch.cat([init_latents] , dim=0 )
lowercase_ : Dict = init_latents.shape
lowercase_ : Dict = randn_tensor(_lowercase , generator=_lowercase , device=_lowercase , dtype=_lowercase )
# get latents
lowercase_ : List[str] = self.scheduler.add_noise(_lowercase , _lowercase , _lowercase )
lowercase_ : Optional[Any] = init_latents
return latents
def lowerCamelCase__ ( self , _lowercase=0 ) -> int:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
lowercase_ : List[Any] = torch.device(f"cuda:{gpu_id}" )
lowercase_ : Optional[Any] = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(_lowercase , _lowercase )
def lowerCamelCase__ ( self , _lowercase=0 ) -> int:
if is_accelerate_available() and is_accelerate_version('>=' , '0.17.0.dev0' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.' )
lowercase_ : List[Any] = torch.device(f"cuda:{gpu_id}" )
if self.device.type != "cpu":
self.to('cpu' , silence_dtype_warnings=_lowercase )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
lowercase_ : Tuple = None
for cpu_offloaded_model in [self.unet, self.movq]:
lowercase_ , lowercase_ : Dict = cpu_offload_with_hook(_lowercase , _lowercase , prev_module_hook=_lowercase )
# We'll offload the last model manually.
lowercase_ : List[str] = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def lowerCamelCase__ ( self ) -> List[str]:
if not hasattr(self.unet , '_hf_hook' ):
return self.device
for module in self.unet.modules():
if (
hasattr(_lowercase , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(_lowercase )
def __call__( self , _lowercase , _lowercase , _lowercase , _lowercase = 512 , _lowercase = 512 , _lowercase = 100 , _lowercase = 4.0 , _lowercase = 0.3 , _lowercase = 1 , _lowercase = None , _lowercase = "pil" , _lowercase = True , ) -> str:
lowercase_ : List[Any] = self._execution_device
lowercase_ : List[Any] = guidance_scale > 1.0
if isinstance(_lowercase , _lowercase ):
lowercase_ : Union[str, Any] = torch.cat(_lowercase , dim=0 )
lowercase_ : Optional[Any] = image_embeds.shape[0]
if isinstance(_lowercase , _lowercase ):
lowercase_ : List[str] = torch.cat(_lowercase , dim=0 )
if do_classifier_free_guidance:
lowercase_ : List[str] = image_embeds.repeat_interleave(_lowercase , dim=0 )
lowercase_ : Dict = negative_image_embeds.repeat_interleave(_lowercase , dim=0 )
lowercase_ : Dict = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=_lowercase )
if not isinstance(_lowercase , _lowercase ):
lowercase_ : Union[str, Any] = [image]
if not all(isinstance(_lowercase , (PIL.Image.Image, torch.Tensor) ) for i in image ):
raise ValueError(
f"Input is in incorrect format: {[type(_lowercase ) for i in image]}. Currently, we only support PIL image and pytorch tensor" )
lowercase_ : List[Any] = torch.cat([prepare_image(_lowercase , _lowercase , _lowercase ) for i in image] , dim=0 )
lowercase_ : Dict = image.to(dtype=image_embeds.dtype , device=_lowercase )
lowercase_ : Dict = self.movq.encode(_lowercase )['latents']
lowercase_ : Optional[Any] = latents.repeat_interleave(_lowercase , dim=0 )
self.scheduler.set_timesteps(_lowercase , device=_lowercase )
lowercase_ , lowercase_ : str = self.get_timesteps(_lowercase , _lowercase , _lowercase )
lowercase_ : int = timesteps[:1].repeat(batch_size * num_images_per_prompt )
lowercase_ , lowercase_ : Union[str, Any] = downscale_height_and_width(_lowercase , _lowercase , self.movq_scale_factor )
lowercase_ : List[str] = self.prepare_latents(
_lowercase , _lowercase , _lowercase , _lowercase , image_embeds.dtype , _lowercase , _lowercase )
for i, t in enumerate(self.progress_bar(_lowercase ) ):
# expand the latents if we are doing classifier free guidance
lowercase_ : int = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowercase_ : str = {'image_embeds': image_embeds}
lowercase_ : str = self.unet(
sample=_lowercase , timestep=_lowercase , encoder_hidden_states=_lowercase , added_cond_kwargs=_lowercase , return_dict=_lowercase , )[0]
if do_classifier_free_guidance:
lowercase_ , lowercase_ : Dict = noise_pred.split(latents.shape[1] , dim=1 )
lowercase_ , lowercase_ : Optional[int] = noise_pred.chunk(2 )
lowercase_ , lowercase_ : Tuple = variance_pred.chunk(2 )
lowercase_ : List[str] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
lowercase_ : Tuple = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , 'variance_type' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
lowercase_ , lowercase_ : List[str] = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
lowercase_ : Dict = self.scheduler.step(
_lowercase , _lowercase , _lowercase , generator=_lowercase , )[0]
# post-processing
lowercase_ : Any = self.movq.decode(_lowercase , force_not_quantize=_lowercase )['sample']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f"Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}" )
if output_type in ["np", "pil"]:
lowercase_ : Dict = image * 0.5 + 0.5
lowercase_ : Dict = image.clamp(0 , 1 )
lowercase_ : int = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
lowercase_ : int = self.numpy_to_pil(_lowercase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_lowercase )
| 7 | 0 |
'''simple docstring'''
def _UpperCAmelCase ( a : list[list[float]] ) -> list[list[float]]:
"""simple docstring"""
lowercase_ : list[list[float]] = []
for data in source_data:
for i, el in enumerate(a ):
if len(a ) < i + 1:
data_lists.append([] )
data_lists[i].append(float(a ) )
return data_lists
def _UpperCAmelCase ( a : list[list[float]] , a : list[int] ) -> list[list[float]]:
"""simple docstring"""
lowercase_ : list[list[float]] = []
for dlist, weight in zip(a , a ):
lowercase_ : Tuple = min(a )
lowercase_ : Any = max(a )
lowercase_ : list[float] = []
# for weight 0 score is 1 - actual score
if weight == 0:
for item in dlist:
try:
score.append(1 - ((item - mind) / (maxd - mind)) )
except ZeroDivisionError:
score.append(1 )
elif weight == 1:
for item in dlist:
try:
score.append((item - mind) / (maxd - mind) )
except ZeroDivisionError:
score.append(0 )
# weight not 0 or 1
else:
lowercase_ : str = f"Invalid weight of {weight:f} provided"
raise ValueError(a )
score_lists.append(a )
return score_lists
def _UpperCAmelCase ( a : list[list[float]] ) -> list[float]:
"""simple docstring"""
lowercase_ : list[float] = [0 for i in range(len(score_lists[0] ) )]
for slist in score_lists:
for j, ele in enumerate(a ):
lowercase_ : List[Any] = final_scores[j] + ele
return final_scores
def _UpperCAmelCase ( a : list[list[float]] , a : list[int] ) -> list[list[float]]:
"""simple docstring"""
lowercase_ : int = get_data(a )
lowercase_ : Optional[int] = calculate_each_score(a , a )
lowercase_ : Dict = generate_final_scores(a )
# append scores to source data
for i, ele in enumerate(a ):
source_data[i].append(a )
return source_data
| 704 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ..utils import _LazyModule
A: int = {
"config": [
"EXTERNAL_DATA_FORMAT_SIZE_LIMIT",
"OnnxConfig",
"OnnxConfigWithPast",
"OnnxSeq2SeqConfigWithPast",
"PatchingSpec",
],
"convert": ["export", "validate_model_outputs"],
"features": ["FeaturesManager"],
"utils": ["ParameterFormat", "compute_serialized_parameters_size"],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
A: Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 7 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
A: Dict = logging.get_logger(__name__)
class __magic_name__ ( UpperCAmelCase_, UpperCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = 'maskformer-swin'
SCREAMING_SNAKE_CASE_ : Dict = {
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self , _lowercase=224 , _lowercase=4 , _lowercase=3 , _lowercase=96 , _lowercase=[2, 2, 6, 2] , _lowercase=[3, 6, 12, 24] , _lowercase=7 , _lowercase=4.0 , _lowercase=True , _lowercase=0.0 , _lowercase=0.0 , _lowercase=0.1 , _lowercase="gelu" , _lowercase=False , _lowercase=0.02 , _lowercase=1E-5 , _lowercase=None , _lowercase=None , **_lowercase , ) -> str:
super().__init__(**_lowercase )
lowercase_ : List[str] = image_size
lowercase_ : Dict = patch_size
lowercase_ : Union[str, Any] = num_channels
lowercase_ : Tuple = embed_dim
lowercase_ : int = depths
lowercase_ : str = len(_lowercase )
lowercase_ : Optional[Any] = num_heads
lowercase_ : Any = window_size
lowercase_ : int = mlp_ratio
lowercase_ : Dict = qkv_bias
lowercase_ : Optional[int] = hidden_dropout_prob
lowercase_ : Any = attention_probs_dropout_prob
lowercase_ : List[str] = drop_path_rate
lowercase_ : Any = hidden_act
lowercase_ : Union[str, Any] = use_absolute_embeddings
lowercase_ : Tuple = layer_norm_eps
lowercase_ : str = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
lowercase_ : Optional[Any] = int(embed_dim * 2 ** (len(_lowercase ) - 1) )
lowercase_ : Tuple = ['stem'] + [f"stage{idx}" for idx in range(1 , len(_lowercase ) + 1 )]
lowercase_ : List[str] = get_aligned_output_features_output_indices(
out_features=_lowercase , out_indices=_lowercase , stage_names=self.stage_names )
| 705 |
'''simple docstring'''
import io
import json
import unittest
from parameterized import parameterized
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device
from utils import calculate_bleu
A: Any = get_tests_dir() + "/test_data/fsmt/fsmt_val_data.json"
with io.open(filename, "r", encoding="utf-8") as f:
A: List[Any] = json.load(f)
@require_torch
class __magic_name__ ( unittest.TestCase ):
"""simple docstring"""
def lowerCamelCase__ ( self , _lowercase ) -> Tuple:
return FSMTTokenizer.from_pretrained(_lowercase )
def lowerCamelCase__ ( self , _lowercase ) -> Optional[int]:
lowercase_ : str = FSMTForConditionalGeneration.from_pretrained(_lowercase ).to(_lowercase )
if torch_device == "cuda":
model.half()
return model
@parameterized.expand(
[
['en-ru', 26.0],
['ru-en', 22.0],
['en-de', 22.0],
['de-en', 29.0],
] )
@slow
def lowerCamelCase__ ( self , _lowercase , _lowercase ) -> Optional[int]:
# note: this test is not testing the best performance since it only evals a small batch
# but it should be enough to detect a regression in the output quality
lowercase_ : Optional[Any] = f"facebook/wmt19-{pair}"
lowercase_ : str = self.get_tokenizer(_lowercase )
lowercase_ : Any = self.get_model(_lowercase )
lowercase_ : Any = bleu_data[pair]['src']
lowercase_ : Any = bleu_data[pair]['tgt']
lowercase_ : Dict = tokenizer(_lowercase , return_tensors='pt' , truncation=_lowercase , padding='longest' ).to(_lowercase )
lowercase_ : str = model.generate(
input_ids=batch.input_ids , num_beams=8 , )
lowercase_ : Any = tokenizer.batch_decode(
_lowercase , skip_special_tokens=_lowercase , clean_up_tokenization_spaces=_lowercase )
lowercase_ : Union[str, Any] = calculate_bleu(_lowercase , _lowercase )
print(_lowercase )
self.assertGreaterEqual(scores['bleu'] , _lowercase )
| 7 | 0 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
A: Dict = logging.get_logger(__name__)
A: Union[str, Any] = "▁"
A: Optional[Any] = {"vocab_file": "sentencepiece.bpe.model"}
A: str = {
"vocab_file": {
"facebook/nllb-200-distilled-600M": (
"https://huggingface.co/facebook/nllb-200-distilled-600M/blob/main/sentencepiece.bpe.model"
),
}
}
A: Optional[Any] = {
"facebook/nllb-200-distilled-600M": 1_0_2_4,
}
# fmt: off
A: Dict = ["ace_Arab", "ace_Latn", "acm_Arab", "acq_Arab", "aeb_Arab", "afr_Latn", "ajp_Arab", "aka_Latn", "amh_Ethi", "apc_Arab", "arb_Arab", "ars_Arab", "ary_Arab", "arz_Arab", "asm_Beng", "ast_Latn", "awa_Deva", "ayr_Latn", "azb_Arab", "azj_Latn", "bak_Cyrl", "bam_Latn", "ban_Latn", "bel_Cyrl", "bem_Latn", "ben_Beng", "bho_Deva", "bjn_Arab", "bjn_Latn", "bod_Tibt", "bos_Latn", "bug_Latn", "bul_Cyrl", "cat_Latn", "ceb_Latn", "ces_Latn", "cjk_Latn", "ckb_Arab", "crh_Latn", "cym_Latn", "dan_Latn", "deu_Latn", "dik_Latn", "dyu_Latn", "dzo_Tibt", "ell_Grek", "eng_Latn", "epo_Latn", "est_Latn", "eus_Latn", "ewe_Latn", "fao_Latn", "pes_Arab", "fij_Latn", "fin_Latn", "fon_Latn", "fra_Latn", "fur_Latn", "fuv_Latn", "gla_Latn", "gle_Latn", "glg_Latn", "grn_Latn", "guj_Gujr", "hat_Latn", "hau_Latn", "heb_Hebr", "hin_Deva", "hne_Deva", "hrv_Latn", "hun_Latn", "hye_Armn", "ibo_Latn", "ilo_Latn", "ind_Latn", "isl_Latn", "ita_Latn", "jav_Latn", "jpn_Jpan", "kab_Latn", "kac_Latn", "kam_Latn", "kan_Knda", "kas_Arab", "kas_Deva", "kat_Geor", "knc_Arab", "knc_Latn", "kaz_Cyrl", "kbp_Latn", "kea_Latn", "khm_Khmr", "kik_Latn", "kin_Latn", "kir_Cyrl", "kmb_Latn", "kon_Latn", "kor_Hang", "kmr_Latn", "lao_Laoo", "lvs_Latn", "lij_Latn", "lim_Latn", "lin_Latn", "lit_Latn", "lmo_Latn", "ltg_Latn", "ltz_Latn", "lua_Latn", "lug_Latn", "luo_Latn", "lus_Latn", "mag_Deva", "mai_Deva", "mal_Mlym", "mar_Deva", "min_Latn", "mkd_Cyrl", "plt_Latn", "mlt_Latn", "mni_Beng", "khk_Cyrl", "mos_Latn", "mri_Latn", "zsm_Latn", "mya_Mymr", "nld_Latn", "nno_Latn", "nob_Latn", "npi_Deva", "nso_Latn", "nus_Latn", "nya_Latn", "oci_Latn", "gaz_Latn", "ory_Orya", "pag_Latn", "pan_Guru", "pap_Latn", "pol_Latn", "por_Latn", "prs_Arab", "pbt_Arab", "quy_Latn", "ron_Latn", "run_Latn", "rus_Cyrl", "sag_Latn", "san_Deva", "sat_Beng", "scn_Latn", "shn_Mymr", "sin_Sinh", "slk_Latn", "slv_Latn", "smo_Latn", "sna_Latn", "snd_Arab", "som_Latn", "sot_Latn", "spa_Latn", "als_Latn", "srd_Latn", "srp_Cyrl", "ssw_Latn", "sun_Latn", "swe_Latn", "swh_Latn", "szl_Latn", "tam_Taml", "tat_Cyrl", "tel_Telu", "tgk_Cyrl", "tgl_Latn", "tha_Thai", "tir_Ethi", "taq_Latn", "taq_Tfng", "tpi_Latn", "tsn_Latn", "tso_Latn", "tuk_Latn", "tum_Latn", "tur_Latn", "twi_Latn", "tzm_Tfng", "uig_Arab", "ukr_Cyrl", "umb_Latn", "urd_Arab", "uzn_Latn", "vec_Latn", "vie_Latn", "war_Latn", "wol_Latn", "xho_Latn", "ydd_Hebr", "yor_Latn", "yue_Hant", "zho_Hans", "zho_Hant", "zul_Latn"]
class __magic_name__ ( UpperCAmelCase_ ):
SCREAMING_SNAKE_CASE_ : str = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ : str = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ : Optional[Any] = ['input_ids', 'attention_mask']
SCREAMING_SNAKE_CASE_ : List[int] = []
SCREAMING_SNAKE_CASE_ : List[int] = []
def __init__( self , _lowercase , _lowercase="<s>" , _lowercase="</s>" , _lowercase="</s>" , _lowercase="<s>" , _lowercase="<unk>" , _lowercase="<pad>" , _lowercase="<mask>" , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase = None , _lowercase=None , _lowercase=False , **_lowercase , ) -> Optional[Any]:
# Mask token behave like a normal word, i.e. include the space before it
lowercase_ : Tuple = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else mask_token
lowercase_ : Dict = {} if sp_model_kwargs is None else sp_model_kwargs
lowercase_ : Union[str, Any] = legacy_behaviour
super().__init__(
bos_token=_lowercase , eos_token=_lowercase , unk_token=_lowercase , sep_token=_lowercase , cls_token=_lowercase , pad_token=_lowercase , mask_token=_lowercase , tokenizer_file=_lowercase , src_lang=_lowercase , tgt_lang=_lowercase , additional_special_tokens=_lowercase , sp_model_kwargs=self.sp_model_kwargs , legacy_behaviour=_lowercase , **_lowercase , )
lowercase_ : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_lowercase ) )
lowercase_ : Tuple = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | ---- | ---- | ---- | ---- | ---- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a'
# spm | '<unk>' | '<s>' | '</s>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a' | '▁s'
# Mimic fairseq token-to-id alignment for the first 4 token
lowercase_ : List[str] = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
lowercase_ : List[str] = 1
lowercase_ : Optional[int] = len(self.sp_model )
lowercase_ : int = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(_lowercase )
}
lowercase_ : Tuple = {v: k for k, v in self.lang_code_to_id.items()}
lowercase_ : Optional[int] = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
lowercase_ : Tuple = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
lowercase_ : Optional[Any] = list(self.lang_code_to_id.keys() )
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
self._additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in self._additional_special_tokens] )
lowercase_ : List[str] = src_lang if src_lang is not None else 'eng_Latn'
lowercase_ : Union[str, Any] = self.lang_code_to_id[self._src_lang]
lowercase_ : str = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self ) -> Tuple:
lowercase_ : Dict = self.__dict__.copy()
lowercase_ : Tuple = None
lowercase_ : List[Any] = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , _lowercase ) -> int:
lowercase_ : List[Any] = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
lowercase_ : str = {}
lowercase_ : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
@property
def lowerCamelCase__ ( self ) -> Optional[int]:
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def lowerCamelCase__ ( self ) -> str:
return self._src_lang
@src_lang.setter
def lowerCamelCase__ ( self , _lowercase ) -> None:
lowercase_ : Union[str, Any] = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def lowerCamelCase__ ( self , _lowercase , _lowercase = None , _lowercase = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowercase , token_ids_a=_lowercase , already_has_special_tokens=_lowercase )
lowercase_ : Union[str, Any] = [1] * len(self.prefix_tokens )
lowercase_ : Union[str, Any] = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(_lowercase )) + suffix_ones
return prefix_ones + ([0] * len(_lowercase )) + ([0] * len(_lowercase )) + suffix_ones
def lowerCamelCase__ ( self , _lowercase , _lowercase = None ) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def lowerCamelCase__ ( self , _lowercase , _lowercase = None ) -> List[int]:
lowercase_ : Any = [self.sep_token_id]
lowercase_ : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCamelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase , **_lowercase ) -> List[Any]:
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
lowercase_ : Dict = src_lang
lowercase_ : str = self(_lowercase , add_special_tokens=_lowercase , return_tensors=_lowercase , **_lowercase )
lowercase_ : str = self.convert_tokens_to_ids(_lowercase )
lowercase_ : Any = tgt_lang_id
return inputs
def lowerCamelCase__ ( self ) -> str:
lowercase_ : Dict = {self.convert_ids_to_tokens(_lowercase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowerCamelCase__ ( self , _lowercase ) -> List[str]:
return self.sp_model.encode(_lowercase , out_type=_lowercase )
def lowerCamelCase__ ( self , _lowercase ) -> Any:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
lowercase_ : str = self.sp_model.PieceToId(_lowercase )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def lowerCamelCase__ ( self , _lowercase ) -> str:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def lowerCamelCase__ ( self , _lowercase ) -> List[str]:
lowercase_ : Union[str, Any] = ''.join(_lowercase ).replace(_lowercase , ' ' ).strip()
return out_string
def lowerCamelCase__ ( self , _lowercase , _lowercase = None ) -> Tuple[str]:
if not os.path.isdir(_lowercase ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
lowercase_ : Any = os.path.join(
_lowercase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowercase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _lowercase )
elif not os.path.isfile(self.vocab_file ):
with open(_lowercase , 'wb' ) as fi:
lowercase_ : Union[str, Any] = self.sp_model.serialized_model_proto()
fi.write(_lowercase )
return (out_vocab_file,)
def lowerCamelCase__ ( self , _lowercase , _lowercase = "eng_Latn" , _lowercase = None , _lowercase = "fra_Latn" , **_lowercase , ) -> BatchEncoding:
lowercase_ : Optional[int] = src_lang
lowercase_ : int = tgt_lang
return super().prepare_seqaseq_batch(_lowercase , _lowercase , **_lowercase )
def lowerCamelCase__ ( self ) -> Tuple:
return self.set_src_lang_special_tokens(self.src_lang )
def lowerCamelCase__ ( self ) -> Optional[Any]:
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def lowerCamelCase__ ( self , _lowercase ) -> None:
lowercase_ : Optional[Any] = self.lang_code_to_id[src_lang]
if self.legacy_behaviour:
lowercase_ : Any = []
lowercase_ : Tuple = [self.eos_token_id, self.cur_lang_code]
else:
lowercase_ : str = [self.cur_lang_code]
lowercase_ : Union[str, Any] = [self.eos_token_id]
def lowerCamelCase__ ( self , _lowercase ) -> None:
lowercase_ : str = self.lang_code_to_id[lang]
if self.legacy_behaviour:
lowercase_ : List[Any] = []
lowercase_ : List[str] = [self.eos_token_id, self.cur_lang_code]
else:
lowercase_ : int = [self.cur_lang_code]
lowercase_ : Union[str, Any] = [self.eos_token_id]
| 706 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A: int = {
"configuration_trajectory_transformer": [
"TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"TrajectoryTransformerConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A: Union[str, Any] = [
"TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TrajectoryTransformerModel",
"TrajectoryTransformerPreTrainedModel",
"load_tf_weights_in_trajectory_transformer",
]
if TYPE_CHECKING:
from .configuration_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TrajectoryTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TrajectoryTransformerModel,
TrajectoryTransformerPreTrainedModel,
load_tf_weights_in_trajectory_transformer,
)
else:
import sys
A: int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 7 | 0 |
'''simple docstring'''
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import requests # noqa: F401 # Here to have a nice missing dependency error message early on
import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on
import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on
from mauve import compute_mauve # From: mauve-text
import datasets
A: int = "\\n@inproceedings{pillutla-etal:mauve:neurips2021,\n title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},\n author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},\n booktitle = {NeurIPS},\n year = {2021}\n}\n\n"
A: str = "\\nMAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.\n\nMAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.\n\nFor details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).\n\nThis metrics is a wrapper around the official implementation of MAUVE:\nhttps://github.com/krishnap25/mauve\n"
A: Tuple = "\nCalculates MAUVE scores between two lists of generated text and reference text.\nArgs:\n predictions: list of generated text to score. Each predictions\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\nOptional Args:\n num_buckets: the size of the histogram to quantize P and Q. Options: 'auto' (default) or an integer\n pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1\n kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9\n kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5\n kmeans_max_iter: maximum number of k-means iterations. Default 500\n featurize_model_name: name of the model from which features are obtained. Default 'gpt2-large' Use one of ['gpt2', 'gpt2-medium', 'gpt2-large', 'gpt2-xl'].\n device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU\n max_text_length: maximum number of tokens to consider. Default 1024\n divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25\n mauve_scaling_factor: \"c\" from the paper. Default 5.\n verbose: If True (default), print running time updates\n seed: random seed to initialize k-means cluster assignments.\nReturns:\n mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,\n frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,\n divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,\n p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,\n q_hist: same as above, but with q_text.\nExamples:\n\n >>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest\n >>> import datasets\n >>> mauve = datasets.load_metric('mauve')\n >>> predictions = [\"hello there\", \"general kenobi\"]\n >>> references = [\"hello there\", \"general kenobi\"]\n >>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP\n >>> print(out.mauve) # doctest: +SKIP\n 1.0\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class __magic_name__ ( datasets.Metric ):
"""simple docstring"""
def lowerCamelCase__ ( self ) -> List[str]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='https://github.com/krishnap25/mauve' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , codebase_urls=['https://github.com/krishnap25/mauve'] , reference_urls=[
'https://arxiv.org/abs/2102.01454',
'https://github.com/krishnap25/mauve',
] , )
def lowerCamelCase__ ( self , _lowercase , _lowercase , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase="auto" , _lowercase=-1 , _lowercase=0.9 , _lowercase=5 , _lowercase=500 , _lowercase="gpt2-large" , _lowercase=-1 , _lowercase=1024 , _lowercase=25 , _lowercase=5 , _lowercase=True , _lowercase=25 , ) -> str:
lowercase_ : Dict = compute_mauve(
p_text=_lowercase , q_text=_lowercase , p_features=_lowercase , q_features=_lowercase , p_tokens=_lowercase , q_tokens=_lowercase , num_buckets=_lowercase , pca_max_data=_lowercase , kmeans_explained_var=_lowercase , kmeans_num_redo=_lowercase , kmeans_max_iter=_lowercase , featurize_model_name=_lowercase , device_id=_lowercase , max_text_length=_lowercase , divergence_curve_discretization_size=_lowercase , mauve_scaling_factor=_lowercase , verbose=_lowercase , seed=_lowercase , )
return out
| 707 |
'''simple docstring'''
def _UpperCAmelCase ( a : str ) -> str:
"""simple docstring"""
lowercase_ : Dict = 0
# if input_string is "aba" than new_input_string become "a|b|a"
lowercase_ : Dict = ''
lowercase_ : Any = ''
# append each character + "|" in new_string for range(0, length-1)
for i in input_string[: len(a ) - 1]:
new_input_string += i + "|"
# append last character
new_input_string += input_string[-1]
# we will store the starting and ending of previous furthest ending palindromic
# substring
lowercase_ , lowercase_ : Dict = 0, 0
# length[i] shows the length of palindromic substring with center i
lowercase_ : List[Any] = [1 for i in range(len(a ) )]
# for each character in new_string find corresponding palindromic string
lowercase_ : Dict = 0
for j in range(len(a ) ):
lowercase_ : Tuple = 1 if j > r else min(length[l + r - j] // 2 , r - j + 1 )
while (
j - k >= 0
and j + k < len(a )
and new_input_string[k + j] == new_input_string[j - k]
):
k += 1
lowercase_ : int = 2 * k - 1
# does this string is ending after the previously explored end (that is r) ?
# if yes the update the new r to the last index of this
if j + k - 1 > r:
lowercase_ : Tuple = j - k + 1 # noqa: E741
lowercase_ : Tuple = j + k - 1
# update max_length and start position
if max_length < length[j]:
lowercase_ : Tuple = length[j]
lowercase_ : List[Any] = j
# create that string
lowercase_ : str = new_input_string[start - max_length // 2 : start + max_length // 2 + 1]
for i in s:
if i != "|":
output_string += i
return output_string
if __name__ == "__main__":
import doctest
doctest.testmod()
| 7 | 0 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DPMSolverMultistepScheduler,
TextToVideoSDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class __magic_name__ ( UpperCAmelCase_, unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = TextToVideoSDPipeline
SCREAMING_SNAKE_CASE_ : str = TEXT_TO_IMAGE_PARAMS
SCREAMING_SNAKE_CASE_ : str = TEXT_TO_IMAGE_BATCH_PARAMS
# No `output_type`.
SCREAMING_SNAKE_CASE_ : Tuple = frozenset(
[
'num_inference_steps',
'generator',
'latents',
'return_dict',
'callback',
'callback_steps',
] )
def snake_case__ ( self ) -> List[str]:
torch.manual_seed(0 )
lowercase_ : List[Any] = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('CrossAttnDownBlock3D', 'CrossAttnDownBlock3D', 'CrossAttnDownBlock3D', 'DownBlock3D') , up_block_types=('UpBlock3D', 'CrossAttnUpBlock3D', 'CrossAttnUpBlock3D', 'CrossAttnUpBlock3D') , cross_attention_dim=32 , attention_head_dim=4 , )
lowercase_ : Union[str, Any] = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='scaled_linear' , clip_sample=_lowercase , set_alpha_to_one=_lowercase , )
torch.manual_seed(0 )
lowercase_ : List[str] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
lowercase_ : Dict = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='gelu' , projection_dim=512 , )
lowercase_ : Dict = CLIPTextModel(_lowercase )
lowercase_ : str = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
lowercase_ : Optional[Any] = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
}
return components
def snake_case__ ( self , _lowercase , _lowercase=0 ) -> int:
if str(_lowercase ).startswith('mps' ):
lowercase_ : Tuple = torch.manual_seed(_lowercase )
else:
lowercase_ : List[str] = torch.Generator(device=_lowercase ).manual_seed(_lowercase )
lowercase_ : Union[str, Any] = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'pt',
}
return inputs
def snake_case__ ( self ) -> Optional[Any]:
lowercase_ : Dict = 'cpu' # ensure determinism for the device-dependent torch.Generator
lowercase_ : List[str] = self.get_dummy_components()
lowercase_ : int = TextToVideoSDPipeline(**_lowercase )
lowercase_ : Dict = sd_pipe.to(_lowercase )
sd_pipe.set_progress_bar_config(disable=_lowercase )
lowercase_ : Optional[Any] = self.get_dummy_inputs(_lowercase )
lowercase_ : Any = 'np'
lowercase_ : Union[str, Any] = sd_pipe(**_lowercase ).frames
lowercase_ : int = frames[0][-3:, -3:, -1]
assert frames[0].shape == (64, 64, 3)
lowercase_ : List[str] = np.array([158.0, 160.0, 153.0, 125.0, 100.0, 121.0, 111.0, 93.0, 113.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def snake_case__ ( self ) -> Optional[int]:
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=_lowercase , expected_max_diff=3E-3 )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def snake_case__ ( self ) -> Any:
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=_lowercase , expected_max_diff=1E-2 )
@unittest.skip(reason='Batching needs to be properly figured out first for this pipeline.' )
def snake_case__ ( self ) -> Union[str, Any]:
pass
@unittest.skip(reason='Batching needs to be properly figured out first for this pipeline.' )
def snake_case__ ( self ) -> Optional[Any]:
pass
@unittest.skip(reason='`num_images_per_prompt` argument is not supported for this pipeline.' )
def snake_case__ ( self ) -> Optional[int]:
pass
def snake_case__ ( self ) -> Union[str, Any]:
return super().test_progress_bar()
@slow
@skip_mps
class __magic_name__ ( unittest.TestCase ):
"""simple docstring"""
def snake_case__ ( self ) -> int:
lowercase_ : str = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy' )
lowercase_ : List[str] = TextToVideoSDPipeline.from_pretrained('damo-vilab/text-to-video-ms-1.7b' )
lowercase_ : Any = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
lowercase_ : int = pipe.to('cuda' )
lowercase_ : int = 'Spiderman is surfing'
lowercase_ : Tuple = torch.Generator(device='cpu' ).manual_seed(0 )
lowercase_ : int = pipe(_lowercase , generator=_lowercase , num_inference_steps=25 , output_type='pt' ).frames
lowercase_ : Tuple = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5E-2
def snake_case__ ( self ) -> int:
lowercase_ : List[Any] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy' )
lowercase_ : Union[str, Any] = TextToVideoSDPipeline.from_pretrained('damo-vilab/text-to-video-ms-1.7b' )
lowercase_ : List[str] = pipe.to('cuda' )
lowercase_ : str = 'Spiderman is surfing'
lowercase_ : Any = torch.Generator(device='cpu' ).manual_seed(0 )
lowercase_ : List[str] = pipe(_lowercase , generator=_lowercase , num_inference_steps=2 , output_type='pt' ).frames
lowercase_ : Any = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5E-2
| 708 |
'''simple docstring'''
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class __magic_name__ ( UpperCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ''
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 'hf-legacy' # "hf://"" is reserved for hffs
def __init__( self , _lowercase = None , _lowercase = None , **_lowercase , ) -> Optional[Any]:
super().__init__(self , **_lowercase )
lowercase_ : int = repo_info
lowercase_ : List[Any] = token
lowercase_ : Union[str, Any] = None
def lowerCamelCase__ ( self ) -> Optional[Any]:
if self.dir_cache is None:
lowercase_ : Optional[Any] = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
lowercase_ : str = {
'name': hf_file.rfilename,
'size': None,
'type': 'file',
}
self.dir_cache.update(
{
str(_lowercase ): {'name': str(_lowercase ), 'size': None, 'type': 'directory'}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def lowerCamelCase__ ( self , _lowercase , _lowercase = "rb" , **_lowercase , ) -> Dict:
if not isinstance(self.repo_info , _lowercase ):
raise NotImplementedError(f"Open is only implemented for dataset repositories, but got {self.repo_info}" )
lowercase_ : Optional[int] = hf_hub_url(self.repo_info.id , _lowercase , revision=self.repo_info.sha )
return fsspec.open(
_lowercase , mode=_lowercase , headers=get_authentication_headers_for_url(_lowercase , use_auth_token=self.token ) , client_kwargs={'trust_env': True} , ).open()
def lowerCamelCase__ ( self , _lowercase , **_lowercase ) -> Tuple:
self._get_dirs()
lowercase_ : str = self._strip_protocol(_lowercase )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(_lowercase )
def lowerCamelCase__ ( self , _lowercase , _lowercase=False , **_lowercase ) -> List[str]:
self._get_dirs()
lowercase_ : List[str] = PurePosixPath(path.strip('/' ) )
lowercase_ : List[str] = {}
for p, f in self.dir_cache.items():
lowercase_ : Tuple = PurePosixPath(p.strip('/' ) )
lowercase_ : Optional[int] = p.parent
if root == path:
lowercase_ : List[str] = f
lowercase_ : List[str] = list(paths.values() )
if detail:
return out
else:
return sorted(f['name'] for f in out )
| 7 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
A: Dict = {
"configuration_vision_text_dual_encoder": ["VisionTextDualEncoderConfig"],
"processing_vision_text_dual_encoder": ["VisionTextDualEncoderProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A: Any = ["VisionTextDualEncoderModel"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A: Union[str, Any] = ["FlaxVisionTextDualEncoderModel"]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A: int = ["TFVisionTextDualEncoderModel"]
if TYPE_CHECKING:
from .configuration_vision_text_dual_encoder import VisionTextDualEncoderConfig
from .processing_vision_text_dual_encoder import VisionTextDualEncoderProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_text_dual_encoder import VisionTextDualEncoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_text_dual_encoder import FlaxVisionTextDualEncoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_text_dual_encoder import TFVisionTextDualEncoderModel
else:
import sys
A: List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 709 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
BertTokenizer,
ViltConfig,
ViltForImageAndTextRetrieval,
ViltForImagesAndTextClassification,
ViltForMaskedLM,
ViltForQuestionAnswering,
ViltImageProcessor,
ViltProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
A: List[Any] = logging.get_logger(__name__)
def _UpperCAmelCase ( a : Any , a : Dict=False , a : Union[str, Any]=False , a : Tuple=False ) -> List[str]:
"""simple docstring"""
lowercase_ : int = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"transformer.blocks.{i}.norm1.weight", f"vilt.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((f"transformer.blocks.{i}.norm1.bias", f"vilt.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append(
(f"transformer.blocks.{i}.attn.proj.weight", f"vilt.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append(
(f"transformer.blocks.{i}.attn.proj.bias", f"vilt.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((f"transformer.blocks.{i}.norm2.weight", f"vilt.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((f"transformer.blocks.{i}.norm2.bias", f"vilt.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append(
(f"transformer.blocks.{i}.mlp.fc1.weight", f"vilt.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((f"transformer.blocks.{i}.mlp.fc1.bias", f"vilt.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((f"transformer.blocks.{i}.mlp.fc2.weight", f"vilt.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((f"transformer.blocks.{i}.mlp.fc2.bias", f"vilt.encoder.layer.{i}.output.dense.bias") )
# embeddings
rename_keys.extend(
[
# text embeddings
('text_embeddings.word_embeddings.weight', 'vilt.embeddings.text_embeddings.word_embeddings.weight'),
(
'text_embeddings.position_embeddings.weight',
'vilt.embeddings.text_embeddings.position_embeddings.weight',
),
('text_embeddings.position_ids', 'vilt.embeddings.text_embeddings.position_ids'),
(
'text_embeddings.token_type_embeddings.weight',
'vilt.embeddings.text_embeddings.token_type_embeddings.weight',
),
('text_embeddings.LayerNorm.weight', 'vilt.embeddings.text_embeddings.LayerNorm.weight'),
('text_embeddings.LayerNorm.bias', 'vilt.embeddings.text_embeddings.LayerNorm.bias'),
# patch embeddings
('transformer.cls_token', 'vilt.embeddings.cls_token'),
('transformer.patch_embed.proj.weight', 'vilt.embeddings.patch_embeddings.projection.weight'),
('transformer.patch_embed.proj.bias', 'vilt.embeddings.patch_embeddings.projection.bias'),
('transformer.pos_embed', 'vilt.embeddings.position_embeddings'),
# token type embeddings
('token_type_embeddings.weight', 'vilt.embeddings.token_type_embeddings.weight'),
] )
# final layernorm + pooler
rename_keys.extend(
[
('transformer.norm.weight', 'vilt.layernorm.weight'),
('transformer.norm.bias', 'vilt.layernorm.bias'),
('pooler.dense.weight', 'vilt.pooler.dense.weight'),
('pooler.dense.bias', 'vilt.pooler.dense.bias'),
] )
# classifier head(s)
if vqa_model:
# classification head
rename_keys.extend(
[
('vqa_classifier.0.weight', 'classifier.0.weight'),
('vqa_classifier.0.bias', 'classifier.0.bias'),
('vqa_classifier.1.weight', 'classifier.1.weight'),
('vqa_classifier.1.bias', 'classifier.1.bias'),
('vqa_classifier.3.weight', 'classifier.3.weight'),
('vqa_classifier.3.bias', 'classifier.3.bias'),
] )
elif nlvr_model:
# classification head
rename_keys.extend(
[
('nlvr2_classifier.0.weight', 'classifier.0.weight'),
('nlvr2_classifier.0.bias', 'classifier.0.bias'),
('nlvr2_classifier.1.weight', 'classifier.1.weight'),
('nlvr2_classifier.1.bias', 'classifier.1.bias'),
('nlvr2_classifier.3.weight', 'classifier.3.weight'),
('nlvr2_classifier.3.bias', 'classifier.3.bias'),
] )
else:
pass
return rename_keys
def _UpperCAmelCase ( a : Dict , a : Tuple ) -> Dict:
"""simple docstring"""
for i in range(config.num_hidden_layers ):
lowercase_ : Optional[int] = 'vilt.'
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowercase_ : str = state_dict.pop(f"transformer.blocks.{i}.attn.qkv.weight" )
lowercase_ : int = state_dict.pop(f"transformer.blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
lowercase_ : Dict = in_proj_weight[
: config.hidden_size, :
]
lowercase_ : List[str] = in_proj_bias[: config.hidden_size]
lowercase_ : int = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowercase_ : Optional[int] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowercase_ : Tuple = in_proj_weight[
-config.hidden_size :, :
]
lowercase_ : Dict = in_proj_bias[-config.hidden_size :]
def _UpperCAmelCase ( a : List[str] ) -> Optional[int]:
"""simple docstring"""
lowercase_ : Union[str, Any] = ['head.weight', 'head.bias']
for k in ignore_keys:
state_dict.pop(a , a )
def _UpperCAmelCase ( a : Optional[Any] , a : Tuple , a : Union[str, Any] ) -> Tuple:
"""simple docstring"""
lowercase_ : List[Any] = dct.pop(a )
lowercase_ : Dict = val
@torch.no_grad()
def _UpperCAmelCase ( a : List[Any] , a : List[Any] ) -> Optional[Any]:
"""simple docstring"""
lowercase_ : str = ViltConfig(image_size=3_8_4 , patch_size=3_2 , tie_word_embeddings=a )
lowercase_ : int = False
lowercase_ : Union[str, Any] = False
lowercase_ : List[str] = False
lowercase_ : str = False
if "vqa" in checkpoint_url:
lowercase_ : str = True
lowercase_ : Optional[int] = 3_1_2_9
lowercase_ : Any = 'huggingface/label-files'
lowercase_ : Optional[Any] = 'vqa2-id2label.json'
lowercase_ : int = json.load(open(hf_hub_download(a , a , repo_type='dataset' ) , 'r' ) )
lowercase_ : Optional[int] = {int(a ): v for k, v in idalabel.items()}
lowercase_ : List[Any] = idalabel
lowercase_ : str = {v: k for k, v in idalabel.items()}
lowercase_ : List[Any] = ViltForQuestionAnswering(a )
elif "nlvr" in checkpoint_url:
lowercase_ : Dict = True
lowercase_ : List[str] = 2
lowercase_ : Tuple = {0: 'False', 1: 'True'}
lowercase_ : Optional[int] = {v: k for k, v in config.idalabel.items()}
lowercase_ : int = 3
lowercase_ : Any = ViltForImagesAndTextClassification(a )
elif "irtr" in checkpoint_url:
lowercase_ : Union[str, Any] = True
lowercase_ : Dict = ViltForImageAndTextRetrieval(a )
elif "mlm_itm" in checkpoint_url:
lowercase_ : int = True
lowercase_ : Tuple = ViltForMaskedLM(a )
else:
raise ValueError('Unknown model type' )
# load state_dict of original model, remove and rename some keys
lowercase_ : List[Any] = torch.hub.load_state_dict_from_url(a , map_location='cpu' )['state_dict']
lowercase_ : Union[str, Any] = create_rename_keys(a , a , a , a )
for src, dest in rename_keys:
rename_key(a , a , a )
read_in_q_k_v(a , a )
if mlm_model or irtr_model:
lowercase_ : str = ['itm_score.fc.weight', 'itm_score.fc.bias']
for k in ignore_keys:
state_dict.pop(a , a )
# load state dict into HuggingFace model
model.eval()
if mlm_model:
lowercase_ , lowercase_ : Dict = model.load_state_dict(a , strict=a )
assert missing_keys == ["mlm_score.decoder.bias"]
else:
model.load_state_dict(a )
# Define processor
lowercase_ : Optional[int] = ViltImageProcessor(size=3_8_4 )
lowercase_ : Optional[int] = BertTokenizer.from_pretrained('bert-base-uncased' )
lowercase_ : Any = ViltProcessor(a , a )
# Forward pass on example inputs (image + text)
if nlvr_model:
lowercase_ : Union[str, Any] = Image.open(requests.get('https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg' , stream=a ).raw )
lowercase_ : Optional[Any] = Image.open(requests.get('https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg' , stream=a ).raw )
lowercase_ : Any = (
'The left image contains twice the number of dogs as the right image, and at least two dogs in total are'
' standing.'
)
lowercase_ : Union[str, Any] = processor(a , a , return_tensors='pt' )
lowercase_ : List[str] = processor(a , a , return_tensors='pt' )
lowercase_ : Union[str, Any] = model(
input_ids=encoding_a.input_ids , pixel_values=encoding_a.pixel_values , pixel_values_a=encoding_a.pixel_values , )
else:
lowercase_ : List[str] = Image.open(requests.get('http://images.cocodataset.org/val2017/000000039769.jpg' , stream=a ).raw )
if mlm_model:
lowercase_ : Dict = 'a bunch of [MASK] laying on a [MASK].'
else:
lowercase_ : List[Any] = 'How many cats are there?'
lowercase_ : List[Any] = processor(a , a , return_tensors='pt' )
lowercase_ : Optional[int] = model(**a )
# Verify outputs
if mlm_model:
lowercase_ : Union[str, Any] = torch.Size([1, 1_1, 3_0_5_2_2] )
lowercase_ : Optional[Any] = torch.tensor([-12.50_61, -12.51_23, -12.51_74] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , a , atol=1e-4 )
# verify masked token prediction equals "cats"
lowercase_ : int = outputs.logits[0, 4, :].argmax(-1 ).item()
assert tokenizer.decode([predicted_id] ) == "cats"
elif vqa_model:
lowercase_ : Optional[Any] = torch.Size([1, 3_1_2_9] )
lowercase_ : Tuple = torch.tensor([-15.94_95, -18.14_72, -10.30_41] )
assert torch.allclose(outputs.logits[0, :3] , a , atol=1e-4 )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , a , atol=1e-4 )
# verify vqa prediction equals "2"
lowercase_ : Union[str, Any] = outputs.logits.argmax(-1 ).item()
assert model.config.idalabel[predicted_idx] == "2"
elif nlvr_model:
lowercase_ : Optional[Any] = torch.Size([1, 2] )
lowercase_ : Optional[Any] = torch.tensor([-2.87_21, 2.12_91] )
assert torch.allclose(outputs.logits[0, :3] , a , atol=1e-4 )
assert outputs.logits.shape == expected_shape
Path(a ).mkdir(exist_ok=a )
print(f"Saving model and processor to {pytorch_dump_folder_path}" )
model.save_pretrained(a )
processor.save_pretrained(a )
if __name__ == "__main__":
A: Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://github.com/dandelin/ViLT/releases/download/200k/vilt_200k_mlm_itm.ckpt",
type=str,
help="URL of the checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
A: Union[str, Any] = parser.parse_args()
convert_vilt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 7 | 0 |
'''simple docstring'''
import absl # noqa: F401 # Here to have a nice missing dependency error message early on
import nltk # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import six # noqa: F401 # Here to have a nice missing dependency error message early on
from rouge_score import rouge_scorer, scoring
import datasets
__a: Any = "\\n@inproceedings{lin-2004-rouge,\n title = \"{ROUGE}: A Package for Automatic Evaluation of Summaries\",\n author = \"Lin, Chin-Yew\",\n booktitle = \"Text Summarization Branches Out\",\n month = jul,\n year = \"2004\",\n address = \"Barcelona, Spain\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/W04-1013\",\n pages = \"74--81\",\n}\n"
__a: Tuple = "\\nROUGE, or Recall-Oriented Understudy for Gisting Evaluation, is a set of metrics and a software package used for\nevaluating automatic summarization and machine translation software in natural language processing.\nThe metrics compare an automatically produced summary or translation against a reference or a set of references (human-produced) summary or translation.\n\nNote that ROUGE is case insensitive, meaning that upper case letters are treated the same way as lower case letters.\n\nThis metrics is a wrapper around Google Research reimplementation of ROUGE:\nhttps://github.com/google-research/google-research/tree/master/rouge\n"
__a: Optional[int] = "\nCalculates average rouge scores for a list of hypotheses and references\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n rouge_types: A list of rouge types to calculate.\n Valid names:\n `\"rouge{n}\"` (e.g. `\"rouge1\"`, `\"rouge2\"`) where: {n} is the n-gram based scoring,\n `\"rougeL\"`: Longest common subsequence based scoring.\n `\"rougeLSum\"`: rougeLsum splits text using `\"\n\"`.\n See details in https://github.com/huggingface/datasets/issues/617\n use_stemmer: Bool indicating whether Porter stemmer should be used to strip word suffixes.\n use_aggregator: Return aggregates if this is set to True\nReturns:\n rouge1: rouge_1 (precision, recall, f1),\n rouge2: rouge_2 (precision, recall, f1),\n rougeL: rouge_l (precision, recall, f1),\n rougeLsum: rouge_lsum (precision, recall, f1)\nExamples:\n\n >>> rouge = datasets.load_metric('rouge')\n >>> predictions = [\"hello there\", \"general kenobi\"]\n >>> references = [\"hello there\", \"general kenobi\"]\n >>> results = rouge.compute(predictions=predictions, references=references)\n >>> print(list(results.keys()))\n ['rouge1', 'rouge2', 'rougeL', 'rougeLsum']\n >>> print(results[\"rouge1\"])\n AggregateScore(low=Score(precision=1.0, recall=1.0, fmeasure=1.0), mid=Score(precision=1.0, recall=1.0, fmeasure=1.0), high=Score(precision=1.0, recall=1.0, fmeasure=1.0))\n >>> print(results[\"rouge1\"].mid.fmeasure)\n 1.0\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class __magic_name__ ( datasets.Metric ):
"""simple docstring"""
def lowerCamelCase__ ( self ) -> Union[str, Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , codebase_urls=['https://github.com/google-research/google-research/tree/master/rouge'] , reference_urls=[
'https://en.wikipedia.org/wiki/ROUGE_(metric)',
'https://github.com/google-research/google-research/tree/master/rouge',
] , )
def lowerCamelCase__ ( self , _lowercase , _lowercase , _lowercase=None , _lowercase=True , _lowercase=False ) -> str:
if rouge_types is None:
lowercase_ : Union[str, Any] = ['rouge1', 'rouge2', 'rougeL', 'rougeLsum']
lowercase_ : Optional[Any] = rouge_scorer.RougeScorer(rouge_types=_lowercase , use_stemmer=_lowercase )
if use_aggregator:
lowercase_ : List[str] = scoring.BootstrapAggregator()
else:
lowercase_ : str = []
for ref, pred in zip(_lowercase , _lowercase ):
lowercase_ : Optional[Any] = scorer.score(_lowercase , _lowercase )
if use_aggregator:
aggregator.add_scores(_lowercase )
else:
scores.append(_lowercase )
if use_aggregator:
lowercase_ : Optional[int] = aggregator.aggregate()
else:
lowercase_ : Optional[int] = {}
for key in scores[0]:
lowercase_ : int = [score[key] for score in scores]
return result
| 710 |
'''simple docstring'''
def _UpperCAmelCase ( a : list ) -> list:
"""simple docstring"""
for i in range(len(a ) - 1 , 0 , -1 ):
lowercase_ : Any = False
for j in range(a , 0 , -1 ):
if unsorted[j] < unsorted[j - 1]:
lowercase_ , lowercase_ : Any = unsorted[j - 1], unsorted[j]
lowercase_ : int = True
for j in range(a ):
if unsorted[j] > unsorted[j + 1]:
lowercase_ , lowercase_ : Union[str, Any] = unsorted[j + 1], unsorted[j]
lowercase_ : Optional[Any] = True
if not swapped:
break
return unsorted
if __name__ == "__main__":
import doctest
doctest.testmod()
A: Union[str, Any] = input("Enter numbers separated by a comma:\n").strip()
A: Tuple = [int(item) for item in user_input.split(",")]
print(f"""{cocktail_shaker_sort(unsorted) = }""")
| 7 | 0 |
'''simple docstring'''
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class __magic_name__ ( UpperCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = (DPMSolverSinglestepScheduler,)
SCREAMING_SNAKE_CASE_ : Any = (('num_inference_steps', 2_5),)
def lowerCamelCase__ ( self , **_lowercase ) -> Tuple:
lowercase_ : Union[str, Any] = {
'num_train_timesteps': 1000,
'beta_start': 0.00_01,
'beta_end': 0.02,
'beta_schedule': 'linear',
'solver_order': 2,
'prediction_type': 'epsilon',
'thresholding': False,
'sample_max_value': 1.0,
'algorithm_type': 'dpmsolver++',
'solver_type': 'midpoint',
'lambda_min_clipped': -float('inf' ),
'variance_type': None,
}
config.update(**_lowercase )
return config
def lowerCamelCase__ ( self , _lowercase=0 , **_lowercase ) -> Dict:
lowercase_ : Union[str, Any] = dict(self.forward_default_kwargs )
lowercase_ : List[Any] = kwargs.pop('num_inference_steps' , _lowercase )
lowercase_ : List[str] = self.dummy_sample
lowercase_ : Dict = 0.1 * sample
lowercase_ : List[Any] = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
lowercase_ : Optional[int] = self.get_scheduler_config(**_lowercase )
lowercase_ : List[str] = scheduler_class(**_lowercase )
scheduler.set_timesteps(_lowercase )
# copy over dummy past residuals
lowercase_ : Any = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_lowercase )
lowercase_ : int = scheduler_class.from_pretrained(_lowercase )
new_scheduler.set_timesteps(_lowercase )
# copy over dummy past residuals
lowercase_ : Union[str, Any] = dummy_past_residuals[: new_scheduler.config.solver_order]
lowercase_ : Any = sample, sample
for t in range(_lowercase , time_step + scheduler.config.solver_order + 1 ):
lowercase_ : Optional[int] = scheduler.step(_lowercase , _lowercase , _lowercase , **_lowercase ).prev_sample
lowercase_ : List[Any] = new_scheduler.step(_lowercase , _lowercase , _lowercase , **_lowercase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def lowerCamelCase__ ( self ) -> List[str]:
pass
def lowerCamelCase__ ( self , _lowercase=0 , **_lowercase ) -> List[str]:
lowercase_ : int = dict(self.forward_default_kwargs )
lowercase_ : int = kwargs.pop('num_inference_steps' , _lowercase )
lowercase_ : Any = self.dummy_sample
lowercase_ : Optional[Any] = 0.1 * sample
lowercase_ : Dict = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
lowercase_ : List[str] = self.get_scheduler_config()
lowercase_ : Any = scheduler_class(**_lowercase )
scheduler.set_timesteps(_lowercase )
# copy over dummy past residuals (must be after setting timesteps)
lowercase_ : Dict = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_lowercase )
lowercase_ : Any = scheduler_class.from_pretrained(_lowercase )
# copy over dummy past residuals
new_scheduler.set_timesteps(_lowercase )
# copy over dummy past residual (must be after setting timesteps)
lowercase_ : Tuple = dummy_past_residuals[: new_scheduler.config.solver_order]
lowercase_ : Dict = scheduler.step(_lowercase , _lowercase , _lowercase , **_lowercase ).prev_sample
lowercase_ : Union[str, Any] = new_scheduler.step(_lowercase , _lowercase , _lowercase , **_lowercase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def lowerCamelCase__ ( self , _lowercase=None , **_lowercase ) -> Optional[Any]:
if scheduler is None:
lowercase_ : Union[str, Any] = self.scheduler_classes[0]
lowercase_ : Optional[Any] = self.get_scheduler_config(**_lowercase )
lowercase_ : str = scheduler_class(**_lowercase )
lowercase_ : Optional[Any] = self.scheduler_classes[0]
lowercase_ : Optional[Any] = self.get_scheduler_config(**_lowercase )
lowercase_ : int = scheduler_class(**_lowercase )
lowercase_ : Tuple = 10
lowercase_ : List[Any] = self.dummy_model()
lowercase_ : Any = self.dummy_sample_deter
scheduler.set_timesteps(_lowercase )
for i, t in enumerate(scheduler.timesteps ):
lowercase_ : str = model(_lowercase , _lowercase )
lowercase_ : Dict = scheduler.step(_lowercase , _lowercase , _lowercase ).prev_sample
return sample
def lowerCamelCase__ ( self ) -> List[str]:
lowercase_ : Optional[Any] = DPMSolverSinglestepScheduler(**self.get_scheduler_config() )
lowercase_ : Tuple = 50
lowercase_ : int = self.dummy_model()
lowercase_ : List[str] = self.dummy_sample_deter
scheduler.set_timesteps(_lowercase )
# make sure that the first t is uneven
for i, t in enumerate(scheduler.timesteps[3:] ):
lowercase_ : int = model(_lowercase , _lowercase )
lowercase_ : Any = scheduler.step(_lowercase , _lowercase , _lowercase ).prev_sample
lowercase_ : Any = torch.mean(torch.abs(_lowercase ) )
assert abs(result_mean.item() - 0.25_74 ) < 1E-3
def lowerCamelCase__ ( self ) -> Dict:
for timesteps in [25, 50, 100, 999, 1000]:
self.check_over_configs(num_train_timesteps=_lowercase )
def lowerCamelCase__ ( self ) -> List[str]:
# make sure that iterating over schedulers with same config names gives same results
# for defaults
lowercase_ : str = DPMSolverSinglestepScheduler(**self.get_scheduler_config() )
lowercase_ : List[str] = self.full_loop(scheduler=_lowercase )
lowercase_ : Tuple = torch.mean(torch.abs(_lowercase ) )
assert abs(result_mean.item() - 0.27_91 ) < 1E-3
lowercase_ : Optional[Any] = DEISMultistepScheduler.from_config(scheduler.config )
lowercase_ : Tuple = DPMSolverMultistepScheduler.from_config(scheduler.config )
lowercase_ : int = UniPCMultistepScheduler.from_config(scheduler.config )
lowercase_ : Dict = DPMSolverSinglestepScheduler.from_config(scheduler.config )
lowercase_ : Any = self.full_loop(scheduler=_lowercase )
lowercase_ : Tuple = torch.mean(torch.abs(_lowercase ) )
assert abs(result_mean.item() - 0.27_91 ) < 1E-3
def lowerCamelCase__ ( self ) -> Dict:
self.check_over_configs(thresholding=_lowercase )
for order in [1, 2, 3]:
for solver_type in ["midpoint", "heun"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=_lowercase , prediction_type=_lowercase , sample_max_value=_lowercase , algorithm_type='dpmsolver++' , solver_order=_lowercase , solver_type=_lowercase , )
def lowerCamelCase__ ( self ) -> Optional[int]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_lowercase )
def lowerCamelCase__ ( self ) -> Dict:
for algorithm_type in ["dpmsolver", "dpmsolver++"]:
for solver_type in ["midpoint", "heun"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=_lowercase , solver_type=_lowercase , prediction_type=_lowercase , algorithm_type=_lowercase , )
lowercase_ : Union[str, Any] = self.full_loop(
solver_order=_lowercase , solver_type=_lowercase , prediction_type=_lowercase , algorithm_type=_lowercase , )
assert not torch.isnan(_lowercase ).any(), "Samples have nan numbers"
def lowerCamelCase__ ( self ) -> int:
self.check_over_configs(lower_order_final=_lowercase )
self.check_over_configs(lower_order_final=_lowercase )
def lowerCamelCase__ ( self ) -> Union[str, Any]:
self.check_over_configs(lambda_min_clipped=-float('inf' ) )
self.check_over_configs(lambda_min_clipped=-5.1 )
def lowerCamelCase__ ( self ) -> Tuple:
self.check_over_configs(variance_type=_lowercase )
self.check_over_configs(variance_type='learned_range' )
def lowerCamelCase__ ( self ) -> Dict:
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]:
self.check_over_forward(num_inference_steps=_lowercase , time_step=0 )
def lowerCamelCase__ ( self ) -> int:
lowercase_ : Tuple = self.full_loop()
lowercase_ : int = torch.mean(torch.abs(_lowercase ) )
assert abs(result_mean.item() - 0.27_91 ) < 1E-3
def lowerCamelCase__ ( self ) -> Any:
lowercase_ : Optional[Any] = self.full_loop(use_karras_sigmas=_lowercase )
lowercase_ : int = torch.mean(torch.abs(_lowercase ) )
assert abs(result_mean.item() - 0.22_48 ) < 1E-3
def lowerCamelCase__ ( self ) -> Tuple:
lowercase_ : Union[str, Any] = self.full_loop(prediction_type='v_prediction' )
lowercase_ : str = torch.mean(torch.abs(_lowercase ) )
assert abs(result_mean.item() - 0.14_53 ) < 1E-3
def lowerCamelCase__ ( self ) -> Optional[Any]:
lowercase_ : Tuple = self.full_loop(prediction_type='v_prediction' , use_karras_sigmas=_lowercase )
lowercase_ : List[str] = torch.mean(torch.abs(_lowercase ) )
assert abs(result_mean.item() - 0.06_49 ) < 1E-3
def lowerCamelCase__ ( self ) -> Dict:
lowercase_ : Optional[int] = self.scheduler_classes[0]
lowercase_ : List[str] = self.get_scheduler_config(thresholding=_lowercase , dynamic_thresholding_ratio=0 )
lowercase_ : Optional[int] = scheduler_class(**_lowercase )
lowercase_ : str = 10
lowercase_ : List[str] = self.dummy_model()
lowercase_ : int = self.dummy_sample_deter.half()
scheduler.set_timesteps(_lowercase )
for i, t in enumerate(scheduler.timesteps ):
lowercase_ : Optional[int] = model(_lowercase , _lowercase )
lowercase_ : int = scheduler.step(_lowercase , _lowercase , _lowercase ).prev_sample
assert sample.dtype == torch.floataa
| 711 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class __magic_name__ ( metaclass=UpperCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = ['transformers', 'torch', 'note_seq']
def __init__( self , *_lowercase , **_lowercase ) -> Dict:
requires_backends(self , ['transformers', 'torch', 'note_seq'] )
@classmethod
def lowerCamelCase__ ( cls , *_lowercase , **_lowercase ) -> List[str]:
requires_backends(cls , ['transformers', 'torch', 'note_seq'] )
@classmethod
def lowerCamelCase__ ( cls , *_lowercase , **_lowercase ) -> Dict:
requires_backends(cls , ['transformers', 'torch', 'note_seq'] )
| 7 | 0 |
A: Optional[Any] = tuple[float, float, float]
A: Union[str, Any] = tuple[float, float, float]
def _UpperCAmelCase ( a : Pointad , a : Pointad ) -> Vectorad:
"""simple docstring"""
lowercase_ : int = end_pointa[0] - end_pointa[0]
lowercase_ : Tuple = end_pointa[1] - end_pointa[1]
lowercase_ : Any = end_pointa[2] - end_pointa[2]
return (x, y, z)
def _UpperCAmelCase ( a : Vectorad , a : Vectorad ) -> Vectorad:
"""simple docstring"""
lowercase_ : Dict = ab[1] * ac[2] - ab[2] * ac[1] # *i
lowercase_ : Union[str, Any] = (ab[0] * ac[2] - ab[2] * ac[0]) * -1 # *j
lowercase_ : Optional[Any] = ab[0] * ac[1] - ab[1] * ac[0] # *k
return (x, y, z)
def _UpperCAmelCase ( a : Vectorad , a : int ) -> bool:
"""simple docstring"""
return tuple(round(a , a ) for x in vector ) == (0, 0, 0)
def _UpperCAmelCase ( a : Pointad , a : Pointad , a : Pointad , a : int = 1_0 ) -> bool:
"""simple docstring"""
lowercase_ : Union[str, Any] = create_vector(a , a )
lowercase_ : Union[str, Any] = create_vector(a , a )
return is_zero_vector(get_ad_vectors_cross(a , a ) , a )
| 712 |
'''simple docstring'''
def _UpperCAmelCase ( a : str , a : str ) -> float:
"""simple docstring"""
def get_matched_characters(a : str , a : str ) -> str:
lowercase_ : Union[str, Any] = []
lowercase_ : Tuple = min(len(_stra ) , len(_stra ) ) // 2
for i, l in enumerate(_stra ):
lowercase_ : Optional[int] = int(max(0 , i - limit ) )
lowercase_ : Union[str, Any] = int(min(i + limit + 1 , len(_stra ) ) )
if l in _stra[left:right]:
matched.append(a )
lowercase_ : Union[str, Any] = f"{_stra[0:_stra.index(a )]} {_stra[_stra.index(a ) + 1:]}"
return "".join(a )
# matching characters
lowercase_ : Union[str, Any] = get_matched_characters(a , a )
lowercase_ : Optional[Any] = get_matched_characters(a , a )
lowercase_ : Optional[int] = len(a )
# transposition
lowercase_ : Dict = (
len([(ca, ca) for ca, ca in zip(a , a ) if ca != ca] ) // 2
)
if not match_count:
lowercase_ : List[str] = 0.0
else:
lowercase_ : Any = (
1
/ 3
* (
match_count / len(a )
+ match_count / len(a )
+ (match_count - transpositions) / match_count
)
)
# common prefix up to 4 characters
lowercase_ : Optional[Any] = 0
for ca, ca in zip(stra[:4] , stra[:4] ):
if ca == ca:
prefix_len += 1
else:
break
return jaro + 0.1 * prefix_len * (1 - jaro)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(jaro_winkler("hello", "world"))
| 7 | 0 |
def _UpperCAmelCase ( a : int ) -> bool:
"""simple docstring"""
if p < 2:
raise ValueError('p should not be less than 2!' )
elif p == 2:
return True
lowercase_ : str = 4
lowercase_ : Union[str, Any] = (1 << p) - 1
for _ in range(p - 2 ):
lowercase_ : Optional[int] = ((s * s) - 2) % m
return s == 0
if __name__ == "__main__":
print(lucas_lehmer_test(7))
print(lucas_lehmer_test(1_1))
| 713 |
'''simple docstring'''
from __future__ import annotations
def _UpperCAmelCase ( a : int = 4 ) -> list[list[int]]:
"""simple docstring"""
lowercase_ : Tuple = abs(a ) or 4
return [[1 + x + y * row_size for x in range(a )] for y in range(a )]
def _UpperCAmelCase ( a : list[list[int]] ) -> list[list[int]]:
"""simple docstring"""
return reverse_row(transpose(a ) )
# OR.. transpose(reverse_column(matrix))
def _UpperCAmelCase ( a : list[list[int]] ) -> list[list[int]]:
"""simple docstring"""
return reverse_row(reverse_column(a ) )
# OR.. reverse_column(reverse_row(matrix))
def _UpperCAmelCase ( a : list[list[int]] ) -> list[list[int]]:
"""simple docstring"""
return reverse_column(transpose(a ) )
# OR.. transpose(reverse_row(matrix))
def _UpperCAmelCase ( a : list[list[int]] ) -> list[list[int]]:
"""simple docstring"""
lowercase_ : Any = [list(a ) for x in zip(*a )]
return matrix
def _UpperCAmelCase ( a : list[list[int]] ) -> list[list[int]]:
"""simple docstring"""
lowercase_ : List[str] = matrix[::-1]
return matrix
def _UpperCAmelCase ( a : list[list[int]] ) -> list[list[int]]:
"""simple docstring"""
lowercase_ : str = [x[::-1] for x in matrix]
return matrix
def _UpperCAmelCase ( a : list[list[int]] ) -> None:
"""simple docstring"""
for i in matrix:
print(*a )
if __name__ == "__main__":
A: Dict = make_matrix()
print("\norigin:\n")
print_matrix(matrix)
print("\nrotate 90 counterclockwise:\n")
print_matrix(rotate_aa(matrix))
A: List[Any] = make_matrix()
print("\norigin:\n")
print_matrix(matrix)
print("\nrotate 180:\n")
print_matrix(rotate_aaa(matrix))
A: List[str] = make_matrix()
print("\norigin:\n")
print_matrix(matrix)
print("\nrotate 270 counterclockwise:\n")
print_matrix(rotate_aaa(matrix))
| 7 | 0 |
'''simple docstring'''
import argparse
import glob
import importlib.util
import os
import re
import black
from doc_builder.style_doc import style_docstrings_in_code
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
A: Dict = "src/diffusers"
A: Any = "."
# This is to make sure the diffusers module imported is the one in the repo.
A: Dict = importlib.util.spec_from_file_location(
"diffusers",
os.path.join(DIFFUSERS_PATH, "__init__.py"),
submodule_search_locations=[DIFFUSERS_PATH],
)
A: Any = spec.loader.load_module()
def _UpperCAmelCase ( a : str , a : Any ) -> Tuple:
"""simple docstring"""
return line.startswith(a ) or len(a ) <= 1 or re.search(R'^\s*\)(\s*->.*:|:)\s*$' , a ) is not None
def _UpperCAmelCase ( a : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
lowercase_ : Tuple = object_name.split('.' )
lowercase_ : str = 0
# First let's find the module where our object lives.
lowercase_ : Any = parts[i]
while i < len(a ) and not os.path.isfile(os.path.join(a , f"{module}.py" ) ):
i += 1
if i < len(a ):
lowercase_ : Union[str, Any] = os.path.join(a , parts[i] )
if i >= len(a ):
raise ValueError(f"`object_name` should begin with the name of a module of diffusers but got {object_name}." )
with open(os.path.join(a , f"{module}.py" ) , 'r' , encoding='utf-8' , newline='\n' ) as f:
lowercase_ : int = f.readlines()
# Now let's find the class / func in the code!
lowercase_ : Optional[int] = ''
lowercase_ : Any = 0
for name in parts[i + 1 :]:
while (
line_index < len(a ) and re.search(Rf"^{indent}(class|def)\s+{name}(\(|\:)" , lines[line_index] ) is None
):
line_index += 1
indent += " "
line_index += 1
if line_index >= len(a ):
raise ValueError(f" {object_name} does not match any function or class in {module}." )
# We found the beginning of the class / func, now let's find the end (when the indent diminishes).
lowercase_ : str = line_index
while line_index < len(a ) and _should_continue(lines[line_index] , a ):
line_index += 1
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
lowercase_ : Union[str, Any] = lines[start_index:line_index]
return "".join(a )
A: List[str] = re.compile(r"^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)")
A: Optional[int] = re.compile(r"^\s*(\S+)->(\S+)(\s+.*|$)")
A: Dict = re.compile(r"<FILL\s+[^>]*>")
def _UpperCAmelCase ( a : Union[str, Any] ) -> List[str]:
"""simple docstring"""
lowercase_ : str = code.split('\n' )
lowercase_ : Optional[int] = 0
while idx < len(a ) and len(lines[idx] ) == 0:
idx += 1
if idx < len(a ):
return re.search(R'^(\s*)\S' , lines[idx] ).groups()[0]
return ""
def _UpperCAmelCase ( a : Optional[Any] ) -> Tuple:
"""simple docstring"""
lowercase_ : str = len(get_indent(a ) ) > 0
if has_indent:
lowercase_ : List[str] = f"class Bla:\n{code}"
lowercase_ : Tuple = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_1_9 , preview=a )
lowercase_ : List[str] = black.format_str(a , mode=a )
lowercase_ : Any = style_docstrings_in_code(a )
return result[len('class Bla:\n' ) :] if has_indent else result
def _UpperCAmelCase ( a : Optional[Any] , a : int=False ) -> Any:
"""simple docstring"""
with open(a , 'r' , encoding='utf-8' , newline='\n' ) as f:
lowercase_ : str = f.readlines()
lowercase_ : int = []
lowercase_ : Optional[Any] = 0
# Not a for loop cause `lines` is going to change (if `overwrite=True`).
while line_index < len(a ):
lowercase_ : Optional[int] = _re_copy_warning.search(lines[line_index] )
if search is None:
line_index += 1
continue
# There is some copied code here, let's retrieve the original.
lowercase_ : Any = search.groups()
lowercase_ : int = find_code_in_diffusers(a )
lowercase_ : Optional[int] = get_indent(a )
lowercase_ : Any = line_index + 1 if indent == theoretical_indent else line_index + 2
lowercase_ : List[Any] = theoretical_indent
lowercase_ : Optional[Any] = start_index
# Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment.
lowercase_ : Tuple = True
while line_index < len(a ) and should_continue:
line_index += 1
if line_index >= len(a ):
break
lowercase_ : Any = lines[line_index]
lowercase_ : Tuple = _should_continue(a , a ) and re.search(f"^{indent}# End copy" , a ) is None
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
lowercase_ : Any = lines[start_index:line_index]
lowercase_ : Any = ''.join(a )
# Remove any nested `Copied from` comments to avoid circular copies
lowercase_ : str = [line for line in theoretical_code.split('\n' ) if _re_copy_warning.search(a ) is None]
lowercase_ : str = '\n'.join(a )
# Before comparing, use the `replace_pattern` on the original code.
if len(a ) > 0:
lowercase_ : Optional[Any] = replace_pattern.replace('with' , '' ).split(',' )
lowercase_ : int = [_re_replace_pattern.search(a ) for p in patterns]
for pattern in patterns:
if pattern is None:
continue
lowercase_ : str = pattern.groups()
lowercase_ : Dict = re.sub(a , a , a )
if option.strip() == "all-casing":
lowercase_ : str = re.sub(obja.lower() , obja.lower() , a )
lowercase_ : List[Any] = re.sub(obja.upper() , obja.upper() , a )
# Blackify after replacement. To be able to do that, we need the header (class or function definition)
# from the previous line
lowercase_ : Any = blackify(lines[start_index - 1] + theoretical_code )
lowercase_ : int = theoretical_code[len(lines[start_index - 1] ) :]
# Test for a diff and act accordingly.
if observed_code != theoretical_code:
diffs.append([object_name, start_index] )
if overwrite:
lowercase_ : Tuple = lines[:start_index] + [theoretical_code] + lines[line_index:]
lowercase_ : List[Any] = start_index + 1
if overwrite and len(a ) > 0:
# Warn the user a file has been modified.
print(f"Detected changes, rewriting {filename}." )
with open(a , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.writelines(a )
return diffs
def _UpperCAmelCase ( a : bool = False ) -> Optional[int]:
"""simple docstring"""
lowercase_ : Union[str, Any] = glob.glob(os.path.join(a , '**/*.py' ) , recursive=a )
lowercase_ : List[str] = []
for filename in all_files:
lowercase_ : Tuple = is_copy_consistent(a , a )
diffs += [f"- {filename}: copy does not match {d[0]} at line {d[1]}" for d in new_diffs]
if not overwrite and len(a ) > 0:
lowercase_ : Tuple = '\n'.join(a )
raise Exception(
'Found the following copy inconsistencies:\n'
+ diff
+ '\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them.' )
if __name__ == "__main__":
A: Any = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
A: str = parser.parse_args()
check_copies(args.fix_and_overwrite) | 714 |
'''simple docstring'''
# Usage:
# ./gen-card-facebook-wmt19.py
import os
from pathlib import Path
def _UpperCAmelCase ( a : Dict , a : Optional[int] , a : Tuple ) -> Optional[int]:
"""simple docstring"""
lowercase_ : Any = {
'en': 'Machine learning is great, isn\'t it?',
'ru': 'Машинное обучение - это здорово, не так ли?',
'de': 'Maschinelles Lernen ist großartig, oder?',
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
lowercase_ : List[str] = {
'ru-en': ['[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)', '39.20'],
'en-ru': ['[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)', '33.47'],
'en-de': ['[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)', '42.83'],
'de-en': ['[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)', '41.35'],
}
lowercase_ : Optional[Any] = f"{src_lang}-{tgt_lang}"
lowercase_ : Optional[Any] = f"\n---\nlanguage: \n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt19\n- facebook\nlicense: apache-2.0\ndatasets:\n- wmt19\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.\n\nFor more details, please see, [Facebook FAIR's WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).\n\nThe abbreviation FSMT stands for FairSeqMachineTranslation\n\nAll four models are available:\n\n* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)\n* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)\n* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)\n* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = \"facebook/wmt19-{src_lang}-{tgt_lang}\"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = \"{texts[src_lang]}\"\ninput_ids = tokenizer.encode(input, return_tensors=\"pt\")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n- The original (and this ported model) doesn't seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)\n\n## Training data\n\nPretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).\n\n## Eval results\n\npair | fairseq | transformers\n-------|---------|----------\n{pair} | {scores[pair][0]} | {scores[pair][1]}\n\nThe score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn't support:\n- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).\n- re-ranking\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=15\nmkdir -p $DATA_DIR\nsacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\nnote: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt19/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)\n\n\n### BibTeX entry and citation info\n\n```bibtex\n@inproceedings{{...,\n year={{2020}},\n title={{Facebook FAIR's WMT19 News Translation Task Submission}},\n author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},\n booktitle={{Proc. of WMT}},\n}}\n```\n\n\n## TODO\n\n- port model ensemble (fairseq uses 4 model checkpoints)\n\n"
os.makedirs(a , exist_ok=a )
lowercase_ : int = os.path.join(a , 'README.md' )
print(f"Generating {path}" )
with open(a , 'w' , encoding='utf-8' ) as f:
f.write(a )
# make sure we are under the root of the project
A: List[str] = Path(__file__).resolve().parent.parent.parent
A: List[str] = repo_dir / "model_cards"
for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
A , A , A: Any = model_name.split("-")
A: int = model_cards_dir / "facebook" / model_name
write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
| 7 | 0 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_xlnet import XLNetTokenizer
else:
A: List[Any] = None
A: Dict = logging.get_logger(__name__)
A: Dict = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
A: List[str] = {
"vocab_file": {
"xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model",
"xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model",
},
"tokenizer_file": {
"xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/tokenizer.json",
"xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/tokenizer.json",
},
}
A: Optional[Any] = {
"xlnet-base-cased": None,
"xlnet-large-cased": None,
}
A: str = "▁"
# Segments (not really needed)
A: str = 0
A: Any = 1
A: Union[str, Any] = 2
A: str = 3
A: List[str] = 4
class __magic_name__ ( UpperCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ : Any = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 'left'
SCREAMING_SNAKE_CASE_ : Dict = XLNetTokenizer
def __init__( self , _lowercase=None , _lowercase=None , _lowercase=False , _lowercase=True , _lowercase=False , _lowercase="<s>" , _lowercase="</s>" , _lowercase="<unk>" , _lowercase="<sep>" , _lowercase="<pad>" , _lowercase="<cls>" , _lowercase="<mask>" , _lowercase=["<eop>", "<eod>"] , **_lowercase , ) -> int:
# Mask token behave like a normal word, i.e. include the space before it
lowercase_ : Dict = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else mask_token
super().__init__(
vocab_file=_lowercase , tokenizer_file=_lowercase , do_lower_case=_lowercase , remove_space=_lowercase , keep_accents=_lowercase , bos_token=_lowercase , eos_token=_lowercase , unk_token=_lowercase , sep_token=_lowercase , pad_token=_lowercase , cls_token=_lowercase , mask_token=_lowercase , additional_special_tokens=_lowercase , **_lowercase , )
lowercase_ : Optional[int] = 3
lowercase_ : List[str] = do_lower_case
lowercase_ : Tuple = remove_space
lowercase_ : List[Any] = keep_accents
lowercase_ : int = vocab_file
lowercase_ : List[Any] = False if not self.vocab_file else True
def lowerCamelCase__ ( self , _lowercase , _lowercase = None ) -> List[int]:
lowercase_ : str = [self.sep_token_id]
lowercase_ : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def lowerCamelCase__ ( self , _lowercase , _lowercase = None ) -> List[int]:
lowercase_ : int = [self.sep_token_id]
lowercase_ : str = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def lowerCamelCase__ ( self , _lowercase , _lowercase = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(_lowercase ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
lowercase_ : Any = os.path.join(
_lowercase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowercase ):
copyfile(self.vocab_file , _lowercase )
return (out_vocab_file,)
| 715 |
'''simple docstring'''
import json
import logging
import os
import socket
import git
import numpy as np
import torch
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - PID: %(process)d - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
)
A: Tuple = logging.getLogger(__name__)
def _UpperCAmelCase ( a : str ) -> List[Any]:
"""simple docstring"""
lowercase_ : List[str] = git.Repo(search_parent_directories=a )
lowercase_ : Union[str, Any] = {
'repo_id': str(a ),
'repo_sha': str(repo.head.object.hexsha ),
'repo_branch': str(repo.active_branch ),
}
with open(os.path.join(a , 'git_log.json' ) , 'w' ) as f:
json.dump(a , a , indent=4 )
def _UpperCAmelCase ( a : str ) -> Union[str, Any]:
"""simple docstring"""
if params.n_gpu <= 0:
lowercase_ : int = 0
lowercase_ : Union[str, Any] = -1
lowercase_ : List[str] = True
lowercase_ : Optional[Any] = False
return
assert torch.cuda.is_available()
logger.info('Initializing GPUs' )
if params.n_gpu > 1:
assert params.local_rank != -1
lowercase_ : Dict = int(os.environ['WORLD_SIZE'] )
lowercase_ : Union[str, Any] = int(os.environ['N_GPU_NODE'] )
lowercase_ : Optional[int] = int(os.environ['RANK'] )
# number of nodes / node ID
lowercase_ : int = params.world_size // params.n_gpu_per_node
lowercase_ : str = params.global_rank // params.n_gpu_per_node
lowercase_ : Dict = True
assert params.n_nodes == int(os.environ['N_NODES'] )
assert params.node_id == int(os.environ['NODE_RANK'] )
# local job (single GPU)
else:
assert params.local_rank == -1
lowercase_ : str = 1
lowercase_ : Dict = 0
lowercase_ : Tuple = 0
lowercase_ : List[Any] = 0
lowercase_ : int = 1
lowercase_ : Tuple = 1
lowercase_ : str = False
# sanity checks
assert params.n_nodes >= 1
assert 0 <= params.node_id < params.n_nodes
assert 0 <= params.local_rank <= params.global_rank < params.world_size
assert params.world_size == params.n_nodes * params.n_gpu_per_node
# define whether this is the master process / if we are in multi-node distributed mode
lowercase_ : List[str] = params.node_id == 0 and params.local_rank == 0
lowercase_ : Optional[Any] = params.n_nodes > 1
# summary
lowercase_ : int = f"--- Global rank: {params.global_rank} - "
logger.info(PREFIX + 'Number of nodes: %i' % params.n_nodes )
logger.info(PREFIX + 'Node ID : %i' % params.node_id )
logger.info(PREFIX + 'Local rank : %i' % params.local_rank )
logger.info(PREFIX + 'World size : %i' % params.world_size )
logger.info(PREFIX + 'GPUs per node : %i' % params.n_gpu_per_node )
logger.info(PREFIX + 'Master : %s' % str(params.is_master ) )
logger.info(PREFIX + 'Multi-node : %s' % str(params.multi_node ) )
logger.info(PREFIX + 'Multi-GPU : %s' % str(params.multi_gpu ) )
logger.info(PREFIX + 'Hostname : %s' % socket.gethostname() )
# set GPU device
torch.cuda.set_device(params.local_rank )
# initialize multi-GPU
if params.multi_gpu:
logger.info('Initializing PyTorch distributed' )
torch.distributed.init_process_group(
init_method='env://' , backend='nccl' , )
def _UpperCAmelCase ( a : Dict ) -> Optional[int]:
"""simple docstring"""
np.random.seed(args.seed )
torch.manual_seed(args.seed )
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed )
| 7 | 0 |
'''simple docstring'''
A: List[Any] = "\n# Installazione di Transformers\n! pip install transformers datasets\n# Per installare dalla fonte invece dell'ultima versione rilasciata, commenta il comando sopra e\n# rimuovi la modalità commento al comando seguente.\n# ! pip install git+https://github.com/huggingface/transformers.git\n"
A: Optional[int] = [{"type": "code", "content": INSTALL_CONTENT}]
A: Dict = {
"{processor_class}": "FakeProcessorClass",
"{model_class}": "FakeModelClass",
"{object_class}": "FakeObjectClass",
}
| 716 |
'''simple docstring'''
import os
from distutils.util import strtobool
def _UpperCAmelCase ( a : Any , a : int ) -> Any:
"""simple docstring"""
for e in env_keys:
lowercase_ : Optional[Any] = int(os.environ.get(a , -1 ) )
if val >= 0:
return val
return default
def _UpperCAmelCase ( a : List[Any] , a : Dict=False ) -> Optional[Any]:
"""simple docstring"""
lowercase_ : Optional[int] = os.environ.get(a , str(a ) )
return strtobool(a ) == 1 # As its name indicates `strtobool` actually returns an int...
def _UpperCAmelCase ( a : List[Any] , a : Dict="no" ) -> str:
"""simple docstring"""
lowercase_ : List[Any] = os.environ.get(a , str(a ) )
return value
| 7 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A: Optional[int] = {
"configuration_upernet": ["UperNetConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A: Optional[Any] = [
"UperNetForSemanticSegmentation",
"UperNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_upernet import UperNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_upernet import UperNetForSemanticSegmentation, UperNetPreTrainedModel
else:
import sys
A: Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 717 |
'''simple docstring'''
from typing import Dict, Optional
import numpy as np
import datasets
A: int = "\nIoU is the area of overlap between the predicted segmentation and the ground truth divided by the area of union\nbetween the predicted segmentation and the ground truth. For binary (two classes) or multi-class segmentation,\nthe mean IoU of the image is calculated by taking the IoU of each class and averaging them.\n"
A: List[str] = "\nArgs:\n predictions (`List[ndarray]`):\n List of predicted segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n references (`List[ndarray]`):\n List of ground truth segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n num_labels (`int`):\n Number of classes (categories).\n ignore_index (`int`):\n Index that will be ignored during evaluation.\n nan_to_num (`int`, *optional*):\n If specified, NaN values will be replaced by the number defined by the user.\n label_map (`dict`, *optional*):\n If specified, dictionary mapping old label indices to new label indices.\n reduce_labels (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background,\n and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by 255.\n\nReturns:\n `Dict[str, float | ndarray]` comprising various elements:\n - *mean_iou* (`float`):\n Mean Intersection-over-Union (IoU averaged over all categories).\n - *mean_accuracy* (`float`):\n Mean accuracy (averaged over all categories).\n - *overall_accuracy* (`float`):\n Overall accuracy on all images.\n - *per_category_accuracy* (`ndarray` of shape `(num_labels,)`):\n Per category accuracy.\n - *per_category_iou* (`ndarray` of shape `(num_labels,)`):\n Per category IoU.\n\nExamples:\n\n >>> import numpy as np\n\n >>> mean_iou = datasets.load_metric(\"mean_iou\")\n\n >>> # suppose one has 3 different segmentation maps predicted\n >>> predicted_1 = np.array([[1, 2], [3, 4], [5, 255]])\n >>> actual_1 = np.array([[0, 3], [5, 4], [6, 255]])\n\n >>> predicted_2 = np.array([[2, 7], [9, 2], [3, 6]])\n >>> actual_2 = np.array([[1, 7], [9, 2], [3, 6]])\n\n >>> predicted_3 = np.array([[2, 2, 3], [8, 2, 4], [3, 255, 2]])\n >>> actual_3 = np.array([[1, 2, 2], [8, 2, 1], [3, 255, 1]])\n\n >>> predicted = [predicted_1, predicted_2, predicted_3]\n >>> ground_truth = [actual_1, actual_2, actual_3]\n\n >>> results = mean_iou.compute(predictions=predicted, references=ground_truth, num_labels=10, ignore_index=255, reduce_labels=False)\n >>> print(results) # doctest: +NORMALIZE_WHITESPACE\n {'mean_iou': 0.47750000000000004, 'mean_accuracy': 0.5916666666666666, 'overall_accuracy': 0.5263157894736842, 'per_category_iou': array([0. , 0. , 0.375, 0.4 , 0.5 , 0. , 0.5 , 1. , 1. , 1. ]), 'per_category_accuracy': array([0. , 0. , 0.75 , 0.66666667, 1. , 0. , 0.5 , 1. , 1. , 1. ])}\n"
A: Union[str, Any] = "\\n@software{MMSegmentation_Contributors_OpenMMLab_Semantic_Segmentation_2020,\nauthor = {{MMSegmentation Contributors}},\nlicense = {Apache-2.0},\nmonth = {7},\ntitle = {{OpenMMLab Semantic Segmentation Toolbox and Benchmark}},\nurl = {https://github.com/open-mmlab/mmsegmentation},\nyear = {2020}\n}"
def _UpperCAmelCase ( a : str , a : Union[str, Any] , a : Dict , a : bool , a : Optional[Dict[int, int]] = None , a : bool = False , ) -> str:
"""simple docstring"""
if label_map is not None:
for old_id, new_id in label_map.items():
lowercase_ : Union[str, Any] = new_id
# turn into Numpy arrays
lowercase_ : List[Any] = np.array(a )
lowercase_ : Optional[Any] = np.array(a )
if reduce_labels:
lowercase_ : Any = 2_5_5
lowercase_ : Dict = label - 1
lowercase_ : List[Any] = 2_5_5
lowercase_ : Any = label != ignore_index
lowercase_ : List[Any] = np.not_equal(a , a )
lowercase_ : Optional[int] = pred_label[mask]
lowercase_ : Union[str, Any] = np.array(a )[mask]
lowercase_ : Optional[int] = pred_label[pred_label == label]
lowercase_ : Optional[int] = np.histogram(a , bins=a , range=(0, num_labels - 1) )[0]
lowercase_ : Optional[int] = np.histogram(a , bins=a , range=(0, num_labels - 1) )[0]
lowercase_ : Dict = np.histogram(a , bins=a , range=(0, num_labels - 1) )[0]
lowercase_ : Optional[Any] = area_pred_label + area_label - area_intersect
return area_intersect, area_union, area_pred_label, area_label
def _UpperCAmelCase ( a : int , a : Optional[Any] , a : Optional[int] , a : bool , a : Optional[Dict[int, int]] = None , a : bool = False , ) -> Dict:
"""simple docstring"""
lowercase_ : Dict = np.zeros((num_labels,) , dtype=np.floataa )
lowercase_ : List[str] = np.zeros((num_labels,) , dtype=np.floataa )
lowercase_ : List[Any] = np.zeros((num_labels,) , dtype=np.floataa )
lowercase_ : str = np.zeros((num_labels,) , dtype=np.floataa )
for result, gt_seg_map in zip(a , a ):
lowercase_ , lowercase_ , lowercase_ , lowercase_ : Tuple = intersect_and_union(
a , a , a , a , a , a )
total_area_intersect += area_intersect
total_area_union += area_union
total_area_pred_label += area_pred_label
total_area_label += area_label
return total_area_intersect, total_area_union, total_area_pred_label, total_area_label
def _UpperCAmelCase ( a : Optional[Any] , a : List[str] , a : Optional[Any] , a : bool , a : Optional[int] = None , a : Optional[Dict[int, int]] = None , a : bool = False , ) -> Optional[int]:
"""simple docstring"""
lowercase_ , lowercase_ , lowercase_ , lowercase_ : Optional[Any] = total_intersect_and_union(
a , a , a , a , a , a )
# compute metrics
lowercase_ : str = {}
lowercase_ : str = total_area_intersect.sum() / total_area_label.sum()
lowercase_ : Optional[Any] = total_area_intersect / total_area_union
lowercase_ : List[Any] = total_area_intersect / total_area_label
lowercase_ : Any = np.nanmean(a )
lowercase_ : Optional[Any] = np.nanmean(a )
lowercase_ : int = all_acc
lowercase_ : Union[str, Any] = iou
lowercase_ : Optional[Any] = acc
if nan_to_num is not None:
lowercase_ : Optional[int] = {metric: np.nan_to_num(a , nan=a ) for metric, metric_value in metrics.items()}
return metrics
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class __magic_name__ ( datasets.Metric ):
"""simple docstring"""
def lowerCamelCase__ ( self ) -> int:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
# 1st Seq - height dim, 2nd - width dim
{
'predictions': datasets.Sequence(datasets.Sequence(datasets.Value('uint16' ) ) ),
'references': datasets.Sequence(datasets.Sequence(datasets.Value('uint16' ) ) ),
} ) , reference_urls=[
'https://github.com/open-mmlab/mmsegmentation/blob/71c201b1813267d78764f306a297ca717827c4bf/mmseg/core/evaluation/metrics.py'
] , )
def lowerCamelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase = None , _lowercase = None , _lowercase = False , ) -> Tuple:
lowercase_ : Optional[int] = mean_iou(
results=_lowercase , gt_seg_maps=_lowercase , num_labels=_lowercase , ignore_index=_lowercase , nan_to_num=_lowercase , label_map=_lowercase , reduce_labels=_lowercase , )
return iou_result
| 7 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
A: int = {
"configuration_squeezebert": [
"SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"SqueezeBertConfig",
"SqueezeBertOnnxConfig",
],
"tokenization_squeezebert": ["SqueezeBertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A: Optional[Any] = ["SqueezeBertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A: Tuple = [
"SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"SqueezeBertForMaskedLM",
"SqueezeBertForMultipleChoice",
"SqueezeBertForQuestionAnswering",
"SqueezeBertForSequenceClassification",
"SqueezeBertForTokenClassification",
"SqueezeBertModel",
"SqueezeBertModule",
"SqueezeBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_squeezebert import (
SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
SqueezeBertConfig,
SqueezeBertOnnxConfig,
)
from .tokenization_squeezebert import SqueezeBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_squeezebert import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
SqueezeBertModule,
SqueezeBertPreTrainedModel,
)
else:
import sys
A: Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 718 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A: Dict = logging.get_logger(__name__)
A: Optional[Any] = {
"google/vit-base-patch16-224": "https://huggingface.co/vit-base-patch16-224/resolve/main/config.json",
# See all ViT models at https://huggingface.co/models?filter=vit
}
class __magic_name__ ( UpperCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = 'vit'
def __init__( self , _lowercase=768 , _lowercase=12 , _lowercase=12 , _lowercase=3072 , _lowercase="gelu" , _lowercase=0.0 , _lowercase=0.0 , _lowercase=0.02 , _lowercase=1E-1_2 , _lowercase=224 , _lowercase=16 , _lowercase=3 , _lowercase=True , _lowercase=16 , **_lowercase , ) -> List[str]:
super().__init__(**_lowercase )
lowercase_ : Optional[int] = hidden_size
lowercase_ : str = num_hidden_layers
lowercase_ : str = num_attention_heads
lowercase_ : int = intermediate_size
lowercase_ : List[Any] = hidden_act
lowercase_ : Any = hidden_dropout_prob
lowercase_ : List[str] = attention_probs_dropout_prob
lowercase_ : str = initializer_range
lowercase_ : List[str] = layer_norm_eps
lowercase_ : Any = image_size
lowercase_ : Tuple = patch_size
lowercase_ : Optional[Any] = num_channels
lowercase_ : str = qkv_bias
lowercase_ : List[str] = encoder_stride
class __magic_name__ ( UpperCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = version.parse('1.11' )
@property
def lowerCamelCase__ ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def lowerCamelCase__ ( self ) -> float:
return 1E-4
| 7 | 0 |
import os
from pathlib import Path
def _UpperCAmelCase ( a : Dict , a : Optional[int] , a : Tuple ) -> Optional[int]:
"""simple docstring"""
lowercase_ : Any = {
'en': 'Machine learning is great, isn\'t it?',
'ru': 'Машинное обучение - это здорово, не так ли?',
'de': 'Maschinelles Lernen ist großartig, oder?',
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
lowercase_ : List[str] = {
'ru-en': ['[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)', '39.20'],
'en-ru': ['[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)', '33.47'],
'en-de': ['[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)', '42.83'],
'de-en': ['[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)', '41.35'],
}
lowercase_ : Optional[Any] = f"{src_lang}-{tgt_lang}"
lowercase_ : Optional[Any] = f"\n---\nlanguage: \n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt19\n- facebook\nlicense: apache-2.0\ndatasets:\n- wmt19\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.\n\nFor more details, please see, [Facebook FAIR's WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).\n\nThe abbreviation FSMT stands for FairSeqMachineTranslation\n\nAll four models are available:\n\n* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)\n* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)\n* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)\n* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = \"facebook/wmt19-{src_lang}-{tgt_lang}\"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = \"{texts[src_lang]}\"\ninput_ids = tokenizer.encode(input, return_tensors=\"pt\")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n- The original (and this ported model) doesn't seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)\n\n## Training data\n\nPretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).\n\n## Eval results\n\npair | fairseq | transformers\n-------|---------|----------\n{pair} | {scores[pair][0]} | {scores[pair][1]}\n\nThe score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn't support:\n- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).\n- re-ranking\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=15\nmkdir -p $DATA_DIR\nsacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\nnote: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt19/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)\n\n\n### BibTeX entry and citation info\n\n```bibtex\n@inproceedings{{...,\n year={{2020}},\n title={{Facebook FAIR's WMT19 News Translation Task Submission}},\n author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},\n booktitle={{Proc. of WMT}},\n}}\n```\n\n\n## TODO\n\n- port model ensemble (fairseq uses 4 model checkpoints)\n\n"
os.makedirs(a , exist_ok=a )
lowercase_ : int = os.path.join(a , 'README.md' )
print(f"Generating {path}" )
with open(a , 'w' , encoding='utf-8' ) as f:
f.write(a )
# make sure we are under the root of the project
A: List[str] = Path(__file__).resolve().parent.parent.parent
A: List[str] = repo_dir / "model_cards"
for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
A: Any = model_name.split("-")
A: int = model_cards_dir / "facebook" / model_name
write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
| 719 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A: int = logging.get_logger(__name__)
A: int = {
"bigcode/gpt_bigcode-santacoder": "https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json",
}
class __magic_name__ ( UpperCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = 'gpt_bigcode'
SCREAMING_SNAKE_CASE_ : int = ['past_key_values']
SCREAMING_SNAKE_CASE_ : Any = {
'hidden_size': 'n_embd',
'max_position_embeddings': 'n_positions',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self , _lowercase=5_0257 , _lowercase=1024 , _lowercase=768 , _lowercase=12 , _lowercase=12 , _lowercase=None , _lowercase="gelu_pytorch_tanh" , _lowercase=0.1 , _lowercase=0.1 , _lowercase=0.1 , _lowercase=1E-5 , _lowercase=0.02 , _lowercase=True , _lowercase=True , _lowercase=5_0256 , _lowercase=5_0256 , _lowercase=True , _lowercase=True , _lowercase=True , **_lowercase , ) -> Any:
lowercase_ : Tuple = vocab_size
lowercase_ : str = n_positions
lowercase_ : List[str] = n_embd
lowercase_ : str = n_layer
lowercase_ : Optional[Any] = n_head
lowercase_ : Optional[int] = n_inner
lowercase_ : Union[str, Any] = activation_function
lowercase_ : Dict = resid_pdrop
lowercase_ : str = embd_pdrop
lowercase_ : Optional[Any] = attn_pdrop
lowercase_ : List[Any] = layer_norm_epsilon
lowercase_ : Optional[int] = initializer_range
lowercase_ : List[Any] = scale_attn_weights
lowercase_ : Any = use_cache
lowercase_ : List[str] = attention_softmax_in_fpaa
lowercase_ : Any = scale_attention_softmax_in_fpaa
lowercase_ : Optional[Any] = multi_query
lowercase_ : Optional[Any] = bos_token_id
lowercase_ : Optional[Any] = eos_token_id
super().__init__(bos_token_id=_lowercase , eos_token_id=_lowercase , **_lowercase )
| 7 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A: List[str] = {
"configuration_timesformer": ["TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "TimesformerConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A: Dict = [
"TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TimesformerModel",
"TimesformerForVideoClassification",
"TimesformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timesformer import (
TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimesformerForVideoClassification,
TimesformerModel,
TimesformerPreTrainedModel,
)
else:
import sys
A: Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 720 |
'''simple docstring'''
import unittest
from diffusers.pipelines.pipeline_utils import is_safetensors_compatible
class __magic_name__ ( unittest.TestCase ):
"""simple docstring"""
def lowerCamelCase__ ( self ) -> Optional[Any]:
lowercase_ : Tuple = [
'safety_checker/pytorch_model.bin',
'safety_checker/model.safetensors',
'vae/diffusion_pytorch_model.bin',
'vae/diffusion_pytorch_model.safetensors',
'text_encoder/pytorch_model.bin',
'text_encoder/model.safetensors',
'unet/diffusion_pytorch_model.bin',
'unet/diffusion_pytorch_model.safetensors',
]
self.assertTrue(is_safetensors_compatible(_lowercase ) )
def lowerCamelCase__ ( self ) -> Dict:
lowercase_ : Tuple = [
'unet/diffusion_pytorch_model.bin',
'unet/diffusion_pytorch_model.safetensors',
]
self.assertTrue(is_safetensors_compatible(_lowercase ) )
def lowerCamelCase__ ( self ) -> str:
lowercase_ : int = [
'safety_checker/pytorch_model.bin',
'safety_checker/model.safetensors',
'vae/diffusion_pytorch_model.bin',
'vae/diffusion_pytorch_model.safetensors',
'text_encoder/pytorch_model.bin',
'text_encoder/model.safetensors',
'unet/diffusion_pytorch_model.bin',
# Removed: 'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(_lowercase ) )
def lowerCamelCase__ ( self ) -> Any:
lowercase_ : Dict = [
'text_encoder/pytorch_model.bin',
'text_encoder/model.safetensors',
]
self.assertTrue(is_safetensors_compatible(_lowercase ) )
def lowerCamelCase__ ( self ) -> Optional[Any]:
lowercase_ : List[Any] = [
'safety_checker/pytorch_model.bin',
'safety_checker/model.safetensors',
'vae/diffusion_pytorch_model.bin',
'vae/diffusion_pytorch_model.safetensors',
'text_encoder/pytorch_model.bin',
# Removed: 'text_encoder/model.safetensors',
'unet/diffusion_pytorch_model.bin',
'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(_lowercase ) )
def lowerCamelCase__ ( self ) -> Optional[int]:
lowercase_ : str = [
'safety_checker/pytorch_model.fp16.bin',
'safety_checker/model.fp16.safetensors',
'vae/diffusion_pytorch_model.fp16.bin',
'vae/diffusion_pytorch_model.fp16.safetensors',
'text_encoder/pytorch_model.fp16.bin',
'text_encoder/model.fp16.safetensors',
'unet/diffusion_pytorch_model.fp16.bin',
'unet/diffusion_pytorch_model.fp16.safetensors',
]
lowercase_ : Union[str, Any] = 'fp16'
self.assertTrue(is_safetensors_compatible(_lowercase , variant=_lowercase ) )
def lowerCamelCase__ ( self ) -> int:
lowercase_ : Optional[int] = [
'unet/diffusion_pytorch_model.fp16.bin',
'unet/diffusion_pytorch_model.fp16.safetensors',
]
lowercase_ : Dict = 'fp16'
self.assertTrue(is_safetensors_compatible(_lowercase , variant=_lowercase ) )
def lowerCamelCase__ ( self ) -> int:
# pass variant but use the non-variant filenames
lowercase_ : Optional[int] = [
'unet/diffusion_pytorch_model.bin',
'unet/diffusion_pytorch_model.safetensors',
]
lowercase_ : Dict = 'fp16'
self.assertTrue(is_safetensors_compatible(_lowercase , variant=_lowercase ) )
def lowerCamelCase__ ( self ) -> Union[str, Any]:
lowercase_ : int = [
'safety_checker/pytorch_model.fp16.bin',
'safety_checker/model.fp16.safetensors',
'vae/diffusion_pytorch_model.fp16.bin',
'vae/diffusion_pytorch_model.fp16.safetensors',
'text_encoder/pytorch_model.fp16.bin',
'text_encoder/model.fp16.safetensors',
'unet/diffusion_pytorch_model.fp16.bin',
# Removed: 'unet/diffusion_pytorch_model.fp16.safetensors',
]
lowercase_ : Dict = 'fp16'
self.assertFalse(is_safetensors_compatible(_lowercase , variant=_lowercase ) )
def lowerCamelCase__ ( self ) -> int:
lowercase_ : str = [
'text_encoder/pytorch_model.fp16.bin',
'text_encoder/model.fp16.safetensors',
]
lowercase_ : str = 'fp16'
self.assertTrue(is_safetensors_compatible(_lowercase , variant=_lowercase ) )
def lowerCamelCase__ ( self ) -> List[str]:
# pass variant but use the non-variant filenames
lowercase_ : List[Any] = [
'text_encoder/pytorch_model.bin',
'text_encoder/model.safetensors',
]
lowercase_ : Dict = 'fp16'
self.assertTrue(is_safetensors_compatible(_lowercase , variant=_lowercase ) )
def lowerCamelCase__ ( self ) -> List[str]:
lowercase_ : Union[str, Any] = [
'safety_checker/pytorch_model.fp16.bin',
'safety_checker/model.fp16.safetensors',
'vae/diffusion_pytorch_model.fp16.bin',
'vae/diffusion_pytorch_model.fp16.safetensors',
'text_encoder/pytorch_model.fp16.bin',
# 'text_encoder/model.fp16.safetensors',
'unet/diffusion_pytorch_model.fp16.bin',
'unet/diffusion_pytorch_model.fp16.safetensors',
]
lowercase_ : Dict = 'fp16'
self.assertFalse(is_safetensors_compatible(_lowercase , variant=_lowercase ) )
| 7 | 0 |
'''simple docstring'''
import argparse
import json
import os
import sys
import tempfile
import unittest
from argparse import Namespace
from dataclasses import dataclass, field
from enum import Enum
from pathlib import Path
from typing import List, Literal, Optional
import yaml
from transformers import HfArgumentParser, TrainingArguments
from transformers.hf_argparser import make_choice_type_function, string_to_bool
# Since Python 3.10, we can use the builtin `|` operator for Union types
# See PEP 604: https://peps.python.org/pep-0604
A: Optional[int] = sys.version_info >= (3, 1_0)
def _UpperCAmelCase ( a : Tuple=None , a : List[str]=None ) -> Dict:
"""simple docstring"""
return field(default_factory=lambda: default , metadata=a )
@dataclass
class __magic_name__ :
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int
SCREAMING_SNAKE_CASE_ : float
SCREAMING_SNAKE_CASE_ : str
SCREAMING_SNAKE_CASE_ : bool
@dataclass
class __magic_name__ :
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = 4_2
SCREAMING_SNAKE_CASE_ : str = field(default='toto', metadata={'help': 'help message'} )
@dataclass
class __magic_name__ :
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : bool = False
SCREAMING_SNAKE_CASE_ : bool = True
SCREAMING_SNAKE_CASE_ : Optional[bool] = None
class __magic_name__ ( UpperCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = 'titi'
SCREAMING_SNAKE_CASE_ : Any = 'toto'
class __magic_name__ ( UpperCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = 'titi'
SCREAMING_SNAKE_CASE_ : Optional[int] = 'toto'
SCREAMING_SNAKE_CASE_ : List[Any] = 4_2
@dataclass
class __magic_name__ :
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : BasicEnum = "toto"
def lowerCamelCase__ ( self ) -> Union[str, Any]:
lowercase_ : str = BasicEnum(self.foo )
@dataclass
class __magic_name__ :
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : MixedTypeEnum = "toto"
def lowerCamelCase__ ( self ) -> str:
lowercase_ : Dict = MixedTypeEnum(self.foo )
@dataclass
class __magic_name__ :
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = None
SCREAMING_SNAKE_CASE_ : Optional[float] = field(default=UpperCAmelCase_, metadata={'help': 'help message'} )
SCREAMING_SNAKE_CASE_ : Optional[str] = None
SCREAMING_SNAKE_CASE_ : Optional[List[str]] = list_field(default=[] )
SCREAMING_SNAKE_CASE_ : Optional[List[int]] = list_field(default=[] )
@dataclass
class __magic_name__ :
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[int] = list_field(default=[] )
SCREAMING_SNAKE_CASE_ : List[int] = list_field(default=[1, 2, 3] )
SCREAMING_SNAKE_CASE_ : List[str] = list_field(default=['Hallo', 'Bonjour', 'Hello'] )
SCREAMING_SNAKE_CASE_ : List[float] = list_field(default=[0.1, 0.2, 0.3] )
@dataclass
class __magic_name__ :
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[int] = field()
SCREAMING_SNAKE_CASE_ : str = field()
SCREAMING_SNAKE_CASE_ : BasicEnum = field()
def lowerCamelCase__ ( self ) -> Tuple:
lowercase_ : Dict = BasicEnum(self.required_enum )
@dataclass
class __magic_name__ :
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int
SCREAMING_SNAKE_CASE_ : "BasicEnum" = field()
SCREAMING_SNAKE_CASE_ : "Optional[bool]" = None
SCREAMING_SNAKE_CASE_ : "str" = field(default='toto', metadata={'help': 'help message'} )
SCREAMING_SNAKE_CASE_ : "List[str]" = list_field(default=['Hallo', 'Bonjour', 'Hello'] )
if is_python_no_less_than_3_10:
@dataclass
class __magic_name__ :
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : bool = False
SCREAMING_SNAKE_CASE_ : bool = True
SCREAMING_SNAKE_CASE_ : bool | None = None
@dataclass
class __magic_name__ :
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int | None = None
SCREAMING_SNAKE_CASE_ : float | None = field(default=UpperCAmelCase_, metadata={'help': 'help message'} )
SCREAMING_SNAKE_CASE_ : str | None = None
SCREAMING_SNAKE_CASE_ : list[str] | None = list_field(default=[] )
SCREAMING_SNAKE_CASE_ : list[int] | None = list_field(default=[] )
class __magic_name__ ( unittest.TestCase ):
"""simple docstring"""
def lowerCamelCase__ ( self , _lowercase , _lowercase ) -> Optional[int]:
self.assertEqual(len(a._actions ) , len(b._actions ) )
for x, y in zip(a._actions , b._actions ):
lowercase_ : List[str] = {k: v for k, v in vars(_lowercase ).items() if k != 'container'}
lowercase_ : List[str] = {k: v for k, v in vars(_lowercase ).items() if k != 'container'}
# Choices with mixed type have custom function as "type"
# So we need to compare results directly for equality
if xx.get('choices' , _lowercase ) and yy.get('choices' , _lowercase ):
for expected_choice in yy["choices"] + xx["choices"]:
self.assertEqual(xx['type'](_lowercase ) , yy['type'](_lowercase ) )
del xx["type"], yy["type"]
self.assertEqual(_lowercase , _lowercase )
def lowerCamelCase__ ( self ) -> List[str]:
lowercase_ : Optional[int] = HfArgumentParser(_lowercase )
lowercase_ : List[str] = argparse.ArgumentParser()
expected.add_argument('--foo' , type=_lowercase , required=_lowercase )
expected.add_argument('--bar' , type=_lowercase , required=_lowercase )
expected.add_argument('--baz' , type=_lowercase , required=_lowercase )
expected.add_argument('--flag' , type=_lowercase , default=_lowercase , const=_lowercase , nargs='?' )
self.argparsersEqual(_lowercase , _lowercase )
lowercase_ : List[Any] = ['--foo', '1', '--baz', 'quux', '--bar', '0.5']
(lowercase_ ) : Any = parser.parse_args_into_dataclasses(_lowercase , look_for_args_file=_lowercase )
self.assertFalse(example.flag )
def lowerCamelCase__ ( self ) -> List[Any]:
lowercase_ : Union[str, Any] = HfArgumentParser(_lowercase )
lowercase_ : Union[str, Any] = argparse.ArgumentParser()
expected.add_argument('--foo' , default=42 , type=_lowercase )
expected.add_argument('--baz' , default='toto' , type=_lowercase , help='help message' )
self.argparsersEqual(_lowercase , _lowercase )
def lowerCamelCase__ ( self ) -> str:
lowercase_ : Any = argparse.ArgumentParser()
expected.add_argument('--foo' , type=_lowercase , default=_lowercase , const=_lowercase , nargs='?' )
expected.add_argument('--baz' , type=_lowercase , default=_lowercase , const=_lowercase , nargs='?' )
# A boolean no_* argument always has to come after its "default: True" regular counter-part
# and its default must be set to False
expected.add_argument('--no_baz' , action='store_false' , default=_lowercase , dest='baz' )
expected.add_argument('--opt' , type=_lowercase , default=_lowercase )
lowercase_ : Any = [WithDefaultBoolExample]
if is_python_no_less_than_3_10:
dataclass_types.append(_lowercase )
for dataclass_type in dataclass_types:
lowercase_ : List[str] = HfArgumentParser(_lowercase )
self.argparsersEqual(_lowercase , _lowercase )
lowercase_ : Dict = parser.parse_args([] )
self.assertEqual(_lowercase , Namespace(foo=_lowercase , baz=_lowercase , opt=_lowercase ) )
lowercase_ : Any = parser.parse_args(['--foo', '--no_baz'] )
self.assertEqual(_lowercase , Namespace(foo=_lowercase , baz=_lowercase , opt=_lowercase ) )
lowercase_ : Optional[Any] = parser.parse_args(['--foo', '--baz'] )
self.assertEqual(_lowercase , Namespace(foo=_lowercase , baz=_lowercase , opt=_lowercase ) )
lowercase_ : Any = parser.parse_args(['--foo', 'True', '--baz', 'True', '--opt', 'True'] )
self.assertEqual(_lowercase , Namespace(foo=_lowercase , baz=_lowercase , opt=_lowercase ) )
lowercase_ : str = parser.parse_args(['--foo', 'False', '--baz', 'False', '--opt', 'False'] )
self.assertEqual(_lowercase , Namespace(foo=_lowercase , baz=_lowercase , opt=_lowercase ) )
def lowerCamelCase__ ( self ) -> int:
lowercase_ : Any = HfArgumentParser(_lowercase )
lowercase_ : Union[str, Any] = argparse.ArgumentParser()
expected.add_argument(
'--foo' , default='toto' , choices=['titi', 'toto', 42] , type=make_choice_type_function(['titi', 'toto', 42] ) , )
self.argparsersEqual(_lowercase , _lowercase )
lowercase_ : Tuple = parser.parse_args([] )
self.assertEqual(args.foo , 'toto' )
lowercase_ : List[str] = parser.parse_args_into_dataclasses([] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.toto )
lowercase_ : Any = parser.parse_args(['--foo', 'titi'] )
self.assertEqual(args.foo , 'titi' )
lowercase_ : List[str] = parser.parse_args_into_dataclasses(['--foo', 'titi'] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.titi )
lowercase_ : Optional[Any] = parser.parse_args(['--foo', '42'] )
self.assertEqual(args.foo , 42 )
lowercase_ : Any = parser.parse_args_into_dataclasses(['--foo', '42'] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo )
def lowerCamelCase__ ( self ) -> int:
@dataclass
class __magic_name__ :
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Literal["titi", "toto", 4_2] = "toto"
lowercase_ : Tuple = HfArgumentParser(_lowercase )
lowercase_ : Dict = argparse.ArgumentParser()
expected.add_argument(
'--foo' , default='toto' , choices=('titi', 'toto', 42) , type=make_choice_type_function(['titi', 'toto', 42] ) , )
self.argparsersEqual(_lowercase , _lowercase )
lowercase_ : Any = parser.parse_args([] )
self.assertEqual(args.foo , 'toto' )
lowercase_ : Dict = parser.parse_args(['--foo', 'titi'] )
self.assertEqual(args.foo , 'titi' )
lowercase_ : int = parser.parse_args(['--foo', '42'] )
self.assertEqual(args.foo , 42 )
def lowerCamelCase__ ( self ) -> str:
lowercase_ : Union[str, Any] = HfArgumentParser(_lowercase )
lowercase_ : Optional[int] = argparse.ArgumentParser()
expected.add_argument('--foo_int' , nargs='+' , default=[] , type=_lowercase )
expected.add_argument('--bar_int' , nargs='+' , default=[1, 2, 3] , type=_lowercase )
expected.add_argument('--foo_str' , nargs='+' , default=['Hallo', 'Bonjour', 'Hello'] , type=_lowercase )
expected.add_argument('--foo_float' , nargs='+' , default=[0.1, 0.2, 0.3] , type=_lowercase )
self.argparsersEqual(_lowercase , _lowercase )
lowercase_ : int = parser.parse_args([] )
self.assertEqual(
_lowercase , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=['Hallo', 'Bonjour', 'Hello'] , foo_float=[0.1, 0.2, 0.3] ) , )
lowercase_ : Any = parser.parse_args('--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7'.split() )
self.assertEqual(_lowercase , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=['a', 'b', 'c'] , foo_float=[0.1, 0.7] ) )
def lowerCamelCase__ ( self ) -> int:
lowercase_ : Union[str, Any] = argparse.ArgumentParser()
expected.add_argument('--foo' , default=_lowercase , type=_lowercase )
expected.add_argument('--bar' , default=_lowercase , type=_lowercase , help='help message' )
expected.add_argument('--baz' , default=_lowercase , type=_lowercase )
expected.add_argument('--ces' , nargs='+' , default=[] , type=_lowercase )
expected.add_argument('--des' , nargs='+' , default=[] , type=_lowercase )
lowercase_ : Any = [OptionalExample]
if is_python_no_less_than_3_10:
dataclass_types.append(_lowercase )
for dataclass_type in dataclass_types:
lowercase_ : List[Any] = HfArgumentParser(_lowercase )
self.argparsersEqual(_lowercase , _lowercase )
lowercase_ : Any = parser.parse_args([] )
self.assertEqual(_lowercase , Namespace(foo=_lowercase , bar=_lowercase , baz=_lowercase , ces=[] , des=[] ) )
lowercase_ : Optional[Any] = parser.parse_args('--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3'.split() )
self.assertEqual(_lowercase , Namespace(foo=12 , bar=3.14 , baz='42' , ces=['a', 'b', 'c'] , des=[1, 2, 3] ) )
def lowerCamelCase__ ( self ) -> List[str]:
lowercase_ : List[str] = HfArgumentParser(_lowercase )
lowercase_ : Dict = argparse.ArgumentParser()
expected.add_argument('--required_list' , nargs='+' , type=_lowercase , required=_lowercase )
expected.add_argument('--required_str' , type=_lowercase , required=_lowercase )
expected.add_argument(
'--required_enum' , type=make_choice_type_function(['titi', 'toto'] ) , choices=['titi', 'toto'] , required=_lowercase , )
self.argparsersEqual(_lowercase , _lowercase )
def lowerCamelCase__ ( self ) -> Union[str, Any]:
lowercase_ : Tuple = HfArgumentParser(_lowercase )
lowercase_ : List[Any] = argparse.ArgumentParser()
expected.add_argument('--foo' , type=_lowercase , required=_lowercase )
expected.add_argument(
'--required_enum' , type=make_choice_type_function(['titi', 'toto'] ) , choices=['titi', 'toto'] , required=_lowercase , )
expected.add_argument('--opt' , type=_lowercase , default=_lowercase )
expected.add_argument('--baz' , default='toto' , type=_lowercase , help='help message' )
expected.add_argument('--foo_str' , nargs='+' , default=['Hallo', 'Bonjour', 'Hello'] , type=_lowercase )
self.argparsersEqual(_lowercase , _lowercase )
def lowerCamelCase__ ( self ) -> Union[str, Any]:
lowercase_ : Optional[int] = HfArgumentParser(_lowercase )
lowercase_ : Dict = {
'foo': 12,
'bar': 3.14,
'baz': '42',
'flag': True,
}
lowercase_ : List[str] = parser.parse_dict(_lowercase )[0]
lowercase_ : List[Any] = BasicExample(**_lowercase )
self.assertEqual(_lowercase , _lowercase )
def lowerCamelCase__ ( self ) -> Dict:
lowercase_ : Dict = HfArgumentParser(_lowercase )
lowercase_ : Optional[int] = {
'foo': 12,
'bar': 3.14,
'baz': '42',
'flag': True,
'extra': 42,
}
self.assertRaises(_lowercase , parser.parse_dict , _lowercase , allow_extra_keys=_lowercase )
def lowerCamelCase__ ( self ) -> Union[str, Any]:
lowercase_ : Dict = HfArgumentParser(_lowercase )
lowercase_ : List[str] = {
'foo': 12,
'bar': 3.14,
'baz': '42',
'flag': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase_ : Any = os.path.join(_lowercase , 'temp_json' )
os.mkdir(_lowercase )
with open(temp_local_path + '.json' , 'w+' ) as f:
json.dump(_lowercase , _lowercase )
lowercase_ : int = parser.parse_yaml_file(Path(temp_local_path + '.json' ) )[0]
lowercase_ : List[str] = BasicExample(**_lowercase )
self.assertEqual(_lowercase , _lowercase )
def lowerCamelCase__ ( self ) -> Tuple:
lowercase_ : List[str] = HfArgumentParser(_lowercase )
lowercase_ : List[Any] = {
'foo': 12,
'bar': 3.14,
'baz': '42',
'flag': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase_ : str = os.path.join(_lowercase , 'temp_yaml' )
os.mkdir(_lowercase )
with open(temp_local_path + '.yaml' , 'w+' ) as f:
yaml.dump(_lowercase , _lowercase )
lowercase_ : List[Any] = parser.parse_yaml_file(Path(temp_local_path + '.yaml' ) )[0]
lowercase_ : Optional[Any] = BasicExample(**_lowercase )
self.assertEqual(_lowercase , _lowercase )
def lowerCamelCase__ ( self ) -> List[str]:
lowercase_ : str = HfArgumentParser(_lowercase )
self.assertIsNotNone(_lowercase )
| 721 |
'''simple docstring'''
import argparse
A: List[Any] = "docs/source/_static/js/custom.js"
def _UpperCAmelCase ( a : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
with open(a , encoding='utf-8' , newline='\n' ) as f:
lowercase_ : List[Any] = f.readlines()
lowercase_ : Dict = 0
# First let's put the right version
while not lines[index].startswith('const stableVersion =' ):
index += 1
lowercase_ : Dict = f"const stableVersion = \"v{version}\"\n"
# Then update the dictionary
while not lines[index].startswith('const versionMapping = {' ):
index += 1
# We go until the end
while not lines[index].startswith('}' ):
index += 1
# We add the new version at the end
lines[index - 1] += f" \"v{version}\": \"v{version}\",\n"
with open(a , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.writelines(a )
if __name__ == "__main__":
A: str = argparse.ArgumentParser()
parser.add_argument("--version", help="Release version.")
A: List[str] = parser.parse_args()
update_custom_js(args.version)
| 7 | 0 |
'''simple docstring'''
import argparse
import json
import os
import time
import zipfile
from get_ci_error_statistics import download_artifact, get_artifacts_links
from transformers import logging
A: str = logging.get_logger(__name__)
def _UpperCAmelCase ( a : List[str] , a : List[str] ) -> Optional[int]:
"""simple docstring"""
lowercase_ : int = set()
lowercase_ : int = []
def parse_line(a : Tuple ):
for line in fp:
if isinstance(a , a ):
lowercase_ : Dict = line.decode('UTF-8' )
if "warnings summary (final)" in line:
continue
# This means we are outside the body of a warning
elif not line.startswith(' ' ):
# process a single warning and move it to `selected_warnings`.
if len(a ) > 0:
lowercase_ : Optional[Any] = '\n'.join(a )
# Only keep the warnings specified in `targets`
if any(f": {x}: " in warning for x in targets ):
selected_warnings.add(a )
buffer.clear()
continue
else:
lowercase_ : Optional[int] = line.strip()
buffer.append(a )
if from_gh:
for filename in os.listdir(a ):
lowercase_ : Dict = os.path.join(a , a )
if not os.path.isdir(a ):
# read the file
if filename != "warnings.txt":
continue
with open(a ) as fp:
parse_line(a )
else:
try:
with zipfile.ZipFile(a ) as z:
for filename in z.namelist():
if not os.path.isdir(a ):
# read the file
if filename != "warnings.txt":
continue
with z.open(a ) as fp:
parse_line(a )
except Exception:
logger.warning(
f"{artifact_path} is either an invalid zip file or something else wrong. This file is skipped." )
return selected_warnings
def _UpperCAmelCase ( a : str , a : int ) -> Dict:
"""simple docstring"""
lowercase_ : int = set()
lowercase_ : Union[str, Any] = [os.path.join(a , a ) for p in os.listdir(a ) if (p.endswith('.zip' ) or from_gh)]
for p in paths:
selected_warnings.update(extract_warnings_from_single_artifact(a , a ) )
return selected_warnings
if __name__ == "__main__":
def _UpperCAmelCase ( a : List[Any] ) -> Optional[Any]:
"""simple docstring"""
return values.split(',' )
A: Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--workflow_run_id", type=str, required=True, help="A GitHub Actions workflow run id.")
parser.add_argument(
"--output_dir",
type=str,
required=True,
help="Where to store the downloaded artifacts and other result files.",
)
parser.add_argument("--token", default=None, type=str, help="A token that has actions:read permission.")
# optional parameters
parser.add_argument(
"--targets",
default="DeprecationWarning,UserWarning,FutureWarning",
type=list_str,
help="Comma-separated list of target warning(s) which we want to extract.",
)
parser.add_argument(
"--from_gh",
action="store_true",
help="If running from a GitHub action workflow and collecting warnings from its artifacts.",
)
A: Dict = parser.parse_args()
A: Optional[Any] = args.from_gh
if from_gh:
# The artifacts have to be downloaded using `actions/download-artifact@v3`
pass
else:
os.makedirs(args.output_dir, exist_ok=True)
# get download links
A: Dict = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, "artifacts.json"), "w", encoding="UTF-8") as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
# download artifacts
for idx, (name, url) in enumerate(artifacts.items()):
print(name)
print(url)
print("=" * 8_0)
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
# extract warnings from artifacts
A: Optional[Any] = extract_warnings(args.output_dir, args.targets)
A: Any = sorted(selected_warnings)
with open(os.path.join(args.output_dir, "selected_warnings.json"), "w", encoding="UTF-8") as fp:
json.dump(selected_warnings, fp, ensure_ascii=False, indent=4)
| 700 |
'''simple docstring'''
def _UpperCAmelCase ( a : list[list[float]] ) -> list[list[float]]:
"""simple docstring"""
lowercase_ : list[list[float]] = []
for data in source_data:
for i, el in enumerate(a ):
if len(a ) < i + 1:
data_lists.append([] )
data_lists[i].append(float(a ) )
return data_lists
def _UpperCAmelCase ( a : list[list[float]] , a : list[int] ) -> list[list[float]]:
"""simple docstring"""
lowercase_ : list[list[float]] = []
for dlist, weight in zip(a , a ):
lowercase_ : Tuple = min(a )
lowercase_ : Any = max(a )
lowercase_ : list[float] = []
# for weight 0 score is 1 - actual score
if weight == 0:
for item in dlist:
try:
score.append(1 - ((item - mind) / (maxd - mind)) )
except ZeroDivisionError:
score.append(1 )
elif weight == 1:
for item in dlist:
try:
score.append((item - mind) / (maxd - mind) )
except ZeroDivisionError:
score.append(0 )
# weight not 0 or 1
else:
lowercase_ : str = f"Invalid weight of {weight:f} provided"
raise ValueError(a )
score_lists.append(a )
return score_lists
def _UpperCAmelCase ( a : list[list[float]] ) -> list[float]:
"""simple docstring"""
lowercase_ : list[float] = [0 for i in range(len(score_lists[0] ) )]
for slist in score_lists:
for j, ele in enumerate(a ):
lowercase_ : List[Any] = final_scores[j] + ele
return final_scores
def _UpperCAmelCase ( a : list[list[float]] , a : list[int] ) -> list[list[float]]:
"""simple docstring"""
lowercase_ : int = get_data(a )
lowercase_ : Optional[int] = calculate_each_score(a , a )
lowercase_ : Dict = generate_final_scores(a )
# append scores to source data
for i, ele in enumerate(a ):
source_data[i].append(a )
return source_data
| 7 | 0 |
'''simple docstring'''
from collections.abc import Callable
def _UpperCAmelCase ( a : Callable[[float], float] , a : float , a : float ) -> float:
"""simple docstring"""
lowercase_ : float = a
lowercase_ : float = b
if function(a ) == 0: # one of the a or b is a root for the function
return a
elif function(a ) == 0:
return b
elif (
function(a ) * function(a ) > 0
): # if none of these are root and they are both positive or negative,
# then this algorithm can't find the root
raise ValueError('could not find root in given interval.' )
else:
lowercase_ : float = start + (end - start) / 2.0
while abs(start - mid ) > 1_0**-7: # until precisely equals to 10^-7
if function(a ) == 0:
return mid
elif function(a ) * function(a ) < 0:
lowercase_ : Dict = mid
else:
lowercase_ : Optional[Any] = mid
lowercase_ : Any = start + (end - start) / 2.0
return mid
def _UpperCAmelCase ( a : float ) -> float:
"""simple docstring"""
return x**3 - 2 * x - 5
if __name__ == "__main__":
print(bisection(f, 1, 1_0_0_0))
import doctest
doctest.testmod()
| 701 |
'''simple docstring'''
def _UpperCAmelCase ( a : int , a : int ) -> int:
"""simple docstring"""
while second != 0:
lowercase_ : Any = first & second
first ^= second
lowercase_ : List[str] = c << 1
return first
if __name__ == "__main__":
import doctest
doctest.testmod()
A: Union[str, Any] = int(input("Enter the first number: ").strip())
A: Union[str, Any] = int(input("Enter the second number: ").strip())
print(f"""{add(first, second) = }""")
| 7 | 0 |
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
from ....audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ....feature_extraction_sequence_utils import SequenceFeatureExtractor
from ....feature_extraction_utils import BatchFeature
from ....file_utils import PaddingStrategy, TensorType
from ....utils import logging
A: Optional[Any] = logging.get_logger(__name__)
class __magic_name__ ( UpperCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = ['input_features', 'attention_mask']
def __init__( self , _lowercase=80 , _lowercase=1_6000 , _lowercase=0.0 , _lowercase=10 , _lowercase=25 , _lowercase="hamming_window" , _lowercase=3_2768.0 , _lowercase=0.97 , _lowercase=1.0 , _lowercase=True , _lowercase=True , _lowercase=False , **_lowercase , ) -> int:
super().__init__(feature_size=_lowercase , sampling_rate=_lowercase , padding_value=_lowercase , **_lowercase )
lowercase_ : str = feature_size
lowercase_ : Optional[Any] = sampling_rate
lowercase_ : str = padding_value
lowercase_ : Optional[int] = hop_length
lowercase_ : Optional[int] = win_length
lowercase_ : Optional[int] = frame_signal_scale
lowercase_ : List[Any] = preemphasis_coeff
lowercase_ : str = mel_floor
lowercase_ : Tuple = normalize_means
lowercase_ : List[Any] = normalize_vars
lowercase_ : Optional[int] = win_function
lowercase_ : List[Any] = return_attention_mask
lowercase_ : Optional[int] = win_length * sampling_rate // 1000
lowercase_ : Dict = hop_length * sampling_rate // 1000
lowercase_ : Union[str, Any] = optimal_fft_length(self.sample_size )
lowercase_ : str = (self.n_fft // 2) + 1
def lowerCamelCase__ ( self , _lowercase ) -> np.ndarray:
if self.win_function == "hamming_window":
lowercase_ : Any = window_function(window_length=self.sample_size , name=self.win_function , periodic=_lowercase )
else:
lowercase_ : Optional[int] = window_function(window_length=self.sample_size , name=self.win_function )
lowercase_ : Union[str, Any] = mel_filter_bank(
num_frequency_bins=self.n_freqs , num_mel_filters=self.feature_size , min_frequency=0.0 , max_frequency=self.sampling_rate / 2.0 , sampling_rate=self.sampling_rate , )
lowercase_ : List[Any] = spectrogram(
one_waveform * self.frame_signal_scale , window=_lowercase , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , center=_lowercase , preemphasis=self.preemphasis_coeff , mel_filters=_lowercase , mel_floor=self.mel_floor , log_mel='log' , )
return msfc_features.T
def lowerCamelCase__ ( self , _lowercase , _lowercase , _lowercase ) -> Optional[int]:
# make sure we normalize float32 arrays
if self.normalize_means:
lowercase_ : Any = x[:input_length].mean(axis=0 )
lowercase_ : Optional[Any] = np.subtract(_lowercase , _lowercase )
if self.normalize_vars:
lowercase_ : List[Any] = x[:input_length].std(axis=0 )
lowercase_ : List[str] = np.divide(_lowercase , _lowercase )
if input_length < x.shape[0]:
lowercase_ : List[Any] = padding_value
# make sure array is in float32
lowercase_ : Optional[Any] = x.astype(np.floataa )
return x
def lowerCamelCase__ ( self , _lowercase , _lowercase = None ) -> List[np.ndarray]:
lowercase_ : List[str] = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [self._normalize_one(_lowercase , _lowercase , self.padding_value ) for x, n in zip(_lowercase , _lowercase )]
def __call__( self , _lowercase , _lowercase = False , _lowercase = None , _lowercase = False , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = None , **_lowercase , ) -> BatchFeature:
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"The model corresponding to this feature extractor: {self} was trained using a sampling rate of"
f" {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with"
f" {self.sampling_rate} and not {sampling_rate}." )
else:
logger.warning(
'It is strongly recommended to pass the ``sampling_rate`` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
lowercase_ : Optional[Any] = isinstance(_lowercase , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f"Only mono-channel audio is supported for input to {self}" )
lowercase_ : Optional[Any] = is_batched_numpy or (
isinstance(_lowercase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
lowercase_ : str = [np.asarray(_lowercase , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(_lowercase , np.ndarray ):
lowercase_ : str = np.asarray(_lowercase , dtype=np.floataa )
elif isinstance(_lowercase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
lowercase_ : List[str] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
lowercase_ : List[Any] = [raw_speech]
# extract fbank features
lowercase_ : str = [self._extract_mfsc_features(_lowercase ) for one_waveform in raw_speech]
# convert into correct format for padding
lowercase_ : Any = BatchFeature({'input_features': features} )
lowercase_ : Any = self.pad(
_lowercase , padding=_lowercase , max_length=_lowercase , truncation=_lowercase , pad_to_multiple_of=_lowercase , return_attention_mask=_lowercase , **_lowercase , )
# make sure list is in array format
lowercase_ : str = padded_inputs.get('input_features' )
if isinstance(input_features[0] , _lowercase ):
lowercase_ : Optional[Any] = [np.asarray(_lowercase , dtype=np.floataa ) for feature in input_features]
lowercase_ : str = padded_inputs.get('attention_mask' )
if attention_mask is not None:
lowercase_ : Dict = [np.asarray(_lowercase , dtype=np.intaa ) for array in attention_mask]
if self.normalize_means or self.normalize_vars:
lowercase_ : Optional[int] = (
np.array(_lowercase , dtype=np.intaa )
if self._get_padding_strategies(_lowercase , max_length=_lowercase ) is not PaddingStrategy.DO_NOT_PAD
and padding
else None
)
lowercase_ : Any = self.normalize(
padded_inputs['input_features'] , attention_mask=_lowercase )
if return_tensors is not None:
lowercase_ : Optional[int] = padded_inputs.convert_to_tensors(_lowercase )
return padded_inputs
| 702 |
'''simple docstring'''
class __magic_name__ :
"""simple docstring"""
def __init__( self , _lowercase ) -> Union[str, Any]:
lowercase_ : Dict = n
lowercase_ : Dict = [None] * self.n
lowercase_ : Tuple = 0 # index of the first element
lowercase_ : List[Any] = 0
lowercase_ : List[Any] = 0
def __len__( self ) -> int:
return self.size
def lowerCamelCase__ ( self ) -> bool:
return self.size == 0
def lowerCamelCase__ ( self ) -> List[Any]:
return False if self.is_empty() else self.array[self.front]
def lowerCamelCase__ ( self , _lowercase ) -> Any:
if self.size >= self.n:
raise Exception('QUEUE IS FULL' )
lowercase_ : Tuple = data
lowercase_ : List[Any] = (self.rear + 1) % self.n
self.size += 1
return self
def lowerCamelCase__ ( self ) -> Any:
if self.size == 0:
raise Exception('UNDERFLOW' )
lowercase_ : Dict = self.array[self.front]
lowercase_ : Tuple = None
lowercase_ : int = (self.front + 1) % self.n
self.size -= 1
return temp
| 7 | 0 |
'''simple docstring'''
from typing import List, Union
import numpy as np
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_DEPTH_ESTIMATION_MAPPING
A: Optional[Any] = logging.get_logger(__name__)
@add_end_docstrings(UpperCAmelCase_ )
class __magic_name__ ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self , *_lowercase , **_lowercase ) -> List[Any]:
super().__init__(*_lowercase , **_lowercase )
requires_backends(self , 'vision' )
self.check_model_type(_lowercase )
def __call__( self , _lowercase , **_lowercase ) -> int:
return super().__call__(_lowercase , **_lowercase )
def lowerCamelCase__ ( self , **_lowercase ) -> Optional[int]:
return {}, {}, {}
def lowerCamelCase__ ( self , _lowercase ) -> Union[str, Any]:
lowercase_ : List[Any] = load_image(_lowercase )
lowercase_ : str = image.size
lowercase_ : Union[str, Any] = self.image_processor(images=_lowercase , return_tensors=self.framework )
return model_inputs
def lowerCamelCase__ ( self , _lowercase ) -> Optional[Any]:
lowercase_ : Union[str, Any] = self.model(**_lowercase )
return model_outputs
def lowerCamelCase__ ( self , _lowercase ) -> Tuple:
lowercase_ : int = model_outputs.predicted_depth
lowercase_ : Optional[int] = torch.nn.functional.interpolate(
predicted_depth.unsqueeze(1 ) , size=self.image_size[::-1] , mode='bicubic' , align_corners=_lowercase )
lowercase_ : str = prediction.squeeze().cpu().numpy()
lowercase_ : List[str] = (output * 255 / np.max(_lowercase )).astype('uint8' )
lowercase_ : Union[str, Any] = Image.fromarray(_lowercase )
lowercase_ : List[Any] = {}
lowercase_ : Dict = predicted_depth
lowercase_ : Optional[Any] = depth
return output_dict
| 703 |
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
A: List[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
A: Union[str, Any] = "\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline\n >>> from diffusers.utils import load_image\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... \"kandinsky-community/kandinsky-2-2-prior\", torch_dtype=torch.float16\n ... )\n >>> pipe_prior.to(\"cuda\")\n\n >>> prompt = \"A red cartoon frog, 4k\"\n >>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False)\n\n >>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained(\n ... \"kandinsky-community/kandinsky-2-2-decoder\", torch_dtype=torch.float16\n ... )\n >>> pipe.to(\"cuda\")\n\n >>> init_image = load_image(\n ... \"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main\"\n ... \"/kandinsky/frog.png\"\n ... )\n\n >>> image = pipe(\n ... image=init_image,\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=100,\n ... strength=0.2,\n ... ).images\n\n >>> image[0].save(\"red_frog.png\")\n ```\n"
def _UpperCAmelCase ( a : Tuple , a : Union[str, Any] , a : List[Any]=8 ) -> Dict:
"""simple docstring"""
lowercase_ : List[Any] = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
lowercase_ : List[str] = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
def _UpperCAmelCase ( a : Any , a : Dict=5_1_2 , a : Optional[Any]=5_1_2 ) -> Tuple:
"""simple docstring"""
lowercase_ : int = pil_image.resize((w, h) , resample=Image.BICUBIC , reducing_gap=1 )
lowercase_ : int = np.array(pil_image.convert('RGB' ) )
lowercase_ : Optional[int] = arr.astype(np.floataa ) / 1_27.5 - 1
lowercase_ : Any = np.transpose(a , [2, 0, 1] )
lowercase_ : Any = torch.from_numpy(a ).unsqueeze(0 )
return image
class __magic_name__ ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self , _lowercase , _lowercase , _lowercase , ) -> List[Any]:
super().__init__()
self.register_modules(
unet=_lowercase , scheduler=_lowercase , movq=_lowercase , )
lowercase_ : Dict = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def lowerCamelCase__ ( self , _lowercase , _lowercase , _lowercase ) -> int:
# get the original timestep using init_timestep
lowercase_ : List[Any] = min(int(num_inference_steps * strength ) , _lowercase )
lowercase_ : Tuple = max(num_inference_steps - init_timestep , 0 )
lowercase_ : Optional[Any] = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def lowerCamelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase=None ) -> Any:
if not isinstance(_lowercase , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(_lowercase )}" )
lowercase_ : Dict = image.to(device=_lowercase , dtype=_lowercase )
lowercase_ : Dict = batch_size * num_images_per_prompt
if image.shape[1] == 4:
lowercase_ : str = image
else:
if isinstance(_lowercase , _lowercase ) and len(_lowercase ) != batch_size:
raise ValueError(
f"You have passed a list of generators of length {len(_lowercase )}, but requested an effective batch"
f" size of {batch_size}. Make sure the batch size matches the length of the generators." )
elif isinstance(_lowercase , _lowercase ):
lowercase_ : List[Any] = [
self.movq.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(_lowercase )
]
lowercase_ : Union[str, Any] = torch.cat(_lowercase , dim=0 )
else:
lowercase_ : Union[str, Any] = self.movq.encode(_lowercase ).latent_dist.sample(_lowercase )
lowercase_ : str = self.movq.config.scaling_factor * init_latents
lowercase_ : int = torch.cat([init_latents] , dim=0 )
lowercase_ : Dict = init_latents.shape
lowercase_ : Dict = randn_tensor(_lowercase , generator=_lowercase , device=_lowercase , dtype=_lowercase )
# get latents
lowercase_ : List[str] = self.scheduler.add_noise(_lowercase , _lowercase , _lowercase )
lowercase_ : Optional[Any] = init_latents
return latents
def lowerCamelCase__ ( self , _lowercase=0 ) -> int:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
lowercase_ : List[Any] = torch.device(f"cuda:{gpu_id}" )
lowercase_ : Optional[Any] = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(_lowercase , _lowercase )
def lowerCamelCase__ ( self , _lowercase=0 ) -> int:
if is_accelerate_available() and is_accelerate_version('>=' , '0.17.0.dev0' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.' )
lowercase_ : List[Any] = torch.device(f"cuda:{gpu_id}" )
if self.device.type != "cpu":
self.to('cpu' , silence_dtype_warnings=_lowercase )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
lowercase_ : Tuple = None
for cpu_offloaded_model in [self.unet, self.movq]:
lowercase_ , lowercase_ : Dict = cpu_offload_with_hook(_lowercase , _lowercase , prev_module_hook=_lowercase )
# We'll offload the last model manually.
lowercase_ : List[str] = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def lowerCamelCase__ ( self ) -> List[str]:
if not hasattr(self.unet , '_hf_hook' ):
return self.device
for module in self.unet.modules():
if (
hasattr(_lowercase , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(_lowercase )
def __call__( self , _lowercase , _lowercase , _lowercase , _lowercase = 512 , _lowercase = 512 , _lowercase = 100 , _lowercase = 4.0 , _lowercase = 0.3 , _lowercase = 1 , _lowercase = None , _lowercase = "pil" , _lowercase = True , ) -> str:
lowercase_ : List[Any] = self._execution_device
lowercase_ : List[Any] = guidance_scale > 1.0
if isinstance(_lowercase , _lowercase ):
lowercase_ : Union[str, Any] = torch.cat(_lowercase , dim=0 )
lowercase_ : Optional[Any] = image_embeds.shape[0]
if isinstance(_lowercase , _lowercase ):
lowercase_ : List[str] = torch.cat(_lowercase , dim=0 )
if do_classifier_free_guidance:
lowercase_ : List[str] = image_embeds.repeat_interleave(_lowercase , dim=0 )
lowercase_ : Dict = negative_image_embeds.repeat_interleave(_lowercase , dim=0 )
lowercase_ : Dict = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=_lowercase )
if not isinstance(_lowercase , _lowercase ):
lowercase_ : Union[str, Any] = [image]
if not all(isinstance(_lowercase , (PIL.Image.Image, torch.Tensor) ) for i in image ):
raise ValueError(
f"Input is in incorrect format: {[type(_lowercase ) for i in image]}. Currently, we only support PIL image and pytorch tensor" )
lowercase_ : List[Any] = torch.cat([prepare_image(_lowercase , _lowercase , _lowercase ) for i in image] , dim=0 )
lowercase_ : Dict = image.to(dtype=image_embeds.dtype , device=_lowercase )
lowercase_ : Dict = self.movq.encode(_lowercase )['latents']
lowercase_ : Optional[Any] = latents.repeat_interleave(_lowercase , dim=0 )
self.scheduler.set_timesteps(_lowercase , device=_lowercase )
lowercase_ , lowercase_ : str = self.get_timesteps(_lowercase , _lowercase , _lowercase )
lowercase_ : int = timesteps[:1].repeat(batch_size * num_images_per_prompt )
lowercase_ , lowercase_ : Union[str, Any] = downscale_height_and_width(_lowercase , _lowercase , self.movq_scale_factor )
lowercase_ : List[str] = self.prepare_latents(
_lowercase , _lowercase , _lowercase , _lowercase , image_embeds.dtype , _lowercase , _lowercase )
for i, t in enumerate(self.progress_bar(_lowercase ) ):
# expand the latents if we are doing classifier free guidance
lowercase_ : int = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowercase_ : str = {'image_embeds': image_embeds}
lowercase_ : str = self.unet(
sample=_lowercase , timestep=_lowercase , encoder_hidden_states=_lowercase , added_cond_kwargs=_lowercase , return_dict=_lowercase , )[0]
if do_classifier_free_guidance:
lowercase_ , lowercase_ : Dict = noise_pred.split(latents.shape[1] , dim=1 )
lowercase_ , lowercase_ : Optional[int] = noise_pred.chunk(2 )
lowercase_ , lowercase_ : Tuple = variance_pred.chunk(2 )
lowercase_ : List[str] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
lowercase_ : Tuple = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , 'variance_type' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
lowercase_ , lowercase_ : List[str] = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
lowercase_ : Dict = self.scheduler.step(
_lowercase , _lowercase , _lowercase , generator=_lowercase , )[0]
# post-processing
lowercase_ : Any = self.movq.decode(_lowercase , force_not_quantize=_lowercase )['sample']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f"Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}" )
if output_type in ["np", "pil"]:
lowercase_ : Dict = image * 0.5 + 0.5
lowercase_ : Dict = image.clamp(0 , 1 )
lowercase_ : int = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
lowercase_ : int = self.numpy_to_pil(_lowercase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_lowercase )
| 7 | 0 |
'''simple docstring'''
import requests
A: Tuple = "YOUR API KEY"
def _UpperCAmelCase ( a : str , a : str = giphy_api_key ) -> list:
"""simple docstring"""
lowercase_ : Dict = '+'.join(query.split() )
lowercase_ : str = f"https://api.giphy.com/v1/gifs/search?q={formatted_query}&api_key={api_key}"
lowercase_ : str = requests.get(a ).json()['data']
return [gif["url"] for gif in gifs]
if __name__ == "__main__":
print("\n".join(get_gifs("space ship")))
| 704 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ..utils import _LazyModule
A: int = {
"config": [
"EXTERNAL_DATA_FORMAT_SIZE_LIMIT",
"OnnxConfig",
"OnnxConfigWithPast",
"OnnxSeq2SeqConfigWithPast",
"PatchingSpec",
],
"convert": ["export", "validate_model_outputs"],
"features": ["FeaturesManager"],
"utils": ["ParameterFormat", "compute_serialized_parameters_size"],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
A: Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 7 | 0 |
'''simple docstring'''
def _UpperCAmelCase ( a : list ) -> list:
"""simple docstring"""
for i in range(len(a ) - 1 , 0 , -1 ):
lowercase_ : Any = False
for j in range(a , 0 , -1 ):
if unsorted[j] < unsorted[j - 1]:
lowercase_ : Any = unsorted[j - 1], unsorted[j]
lowercase_ : int = True
for j in range(a ):
if unsorted[j] > unsorted[j + 1]:
lowercase_ : Union[str, Any] = unsorted[j + 1], unsorted[j]
lowercase_ : Optional[Any] = True
if not swapped:
break
return unsorted
if __name__ == "__main__":
import doctest
doctest.testmod()
A: Union[str, Any] = input("Enter numbers separated by a comma:\n").strip()
A: Tuple = [int(item) for item in user_input.split(",")]
print(f"""{cocktail_shaker_sort(unsorted) = }""")
| 705 |
'''simple docstring'''
import io
import json
import unittest
from parameterized import parameterized
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device
from utils import calculate_bleu
A: Any = get_tests_dir() + "/test_data/fsmt/fsmt_val_data.json"
with io.open(filename, "r", encoding="utf-8") as f:
A: List[Any] = json.load(f)
@require_torch
class __magic_name__ ( unittest.TestCase ):
"""simple docstring"""
def lowerCamelCase__ ( self , _lowercase ) -> Tuple:
return FSMTTokenizer.from_pretrained(_lowercase )
def lowerCamelCase__ ( self , _lowercase ) -> Optional[int]:
lowercase_ : str = FSMTForConditionalGeneration.from_pretrained(_lowercase ).to(_lowercase )
if torch_device == "cuda":
model.half()
return model
@parameterized.expand(
[
['en-ru', 26.0],
['ru-en', 22.0],
['en-de', 22.0],
['de-en', 29.0],
] )
@slow
def lowerCamelCase__ ( self , _lowercase , _lowercase ) -> Optional[int]:
# note: this test is not testing the best performance since it only evals a small batch
# but it should be enough to detect a regression in the output quality
lowercase_ : Optional[Any] = f"facebook/wmt19-{pair}"
lowercase_ : str = self.get_tokenizer(_lowercase )
lowercase_ : Any = self.get_model(_lowercase )
lowercase_ : Any = bleu_data[pair]['src']
lowercase_ : Any = bleu_data[pair]['tgt']
lowercase_ : Dict = tokenizer(_lowercase , return_tensors='pt' , truncation=_lowercase , padding='longest' ).to(_lowercase )
lowercase_ : str = model.generate(
input_ids=batch.input_ids , num_beams=8 , )
lowercase_ : Any = tokenizer.batch_decode(
_lowercase , skip_special_tokens=_lowercase , clean_up_tokenization_spaces=_lowercase )
lowercase_ : Union[str, Any] = calculate_bleu(_lowercase , _lowercase )
print(_lowercase )
self.assertGreaterEqual(scores['bleu'] , _lowercase )
| 7 | 0 |
'''simple docstring'''
from __future__ import annotations
def _UpperCAmelCase ( a : str , a : str ) -> bool:
"""simple docstring"""
lowercase_ : Union[str, Any] = get_failure_array(a )
# 2) Step through text searching for pattern
lowercase_ : Dict = 0, 0 # index into text, pattern
while i < len(a ):
if pattern[j] == text[i]:
if j == (len(a ) - 1):
return True
j += 1
# if this is a prefix in our pattern
# just go back far enough to continue
elif j > 0:
lowercase_ : Optional[Any] = failure[j - 1]
continue
i += 1
return False
def _UpperCAmelCase ( a : str ) -> list[int]:
"""simple docstring"""
lowercase_ : int = [0]
lowercase_ : List[Any] = 0
lowercase_ : Union[str, Any] = 1
while j < len(a ):
if pattern[i] == pattern[j]:
i += 1
elif i > 0:
lowercase_ : Dict = failure[i - 1]
continue
j += 1
failure.append(a )
return failure
if __name__ == "__main__":
# Test 1)
A: Optional[int] = "abc1abc12"
A: Optional[int] = "alskfjaldsabc1abc1abc12k23adsfabcabc"
A: List[Any] = "alskfjaldsk23adsfabcabc"
assert kmp(pattern, texta) and not kmp(pattern, texta)
# Test 2)
A: List[Any] = "ABABX"
A: List[Any] = "ABABZABABYABABX"
assert kmp(pattern, text)
# Test 3)
A: Union[str, Any] = "AAAB"
A: Union[str, Any] = "ABAAAAAB"
assert kmp(pattern, text)
# Test 4)
A: Optional[int] = "abcdabcy"
A: Union[str, Any] = "abcxabcdabxabcdabcdabcy"
assert kmp(pattern, text)
# Test 5)
A: Tuple = "aabaabaaa"
assert get_failure_array(pattern) == [0, 1, 0, 1, 2, 3, 4, 5, 2]
| 706 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A: int = {
"configuration_trajectory_transformer": [
"TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"TrajectoryTransformerConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A: Union[str, Any] = [
"TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TrajectoryTransformerModel",
"TrajectoryTransformerPreTrainedModel",
"load_tf_weights_in_trajectory_transformer",
]
if TYPE_CHECKING:
from .configuration_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TrajectoryTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TrajectoryTransformerModel,
TrajectoryTransformerPreTrainedModel,
load_tf_weights_in_trajectory_transformer,
)
else:
import sys
A: int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 7 | 0 |
'''simple docstring'''
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from transformers import (
VisualBertConfig,
VisualBertForMultipleChoice,
VisualBertForPreTraining,
VisualBertForQuestionAnswering,
VisualBertForVisualReasoning,
)
from transformers.utils import logging
logging.set_verbosity_info()
A: Optional[Any] = logging.get_logger(__name__)
A: List[str] = [
("bert.bert", "visual_bert"),
("bert.cls", "cls"),
("bert.classifier", "cls"),
("token_type_embeddings_visual", "visual_token_type_embeddings"),
("position_embeddings_visual", "visual_position_embeddings"),
("projection", "visual_projection"),
]
A: Dict = [
"nlvr2_coco_pre_trained.th",
"nlvr2_fine_tuned.th",
"nlvr2_pre_trained.th",
"vcr_coco_pre_train.th",
"vcr_fine_tune.th",
"vcr_pre_train.th",
"vqa_coco_pre_trained.th",
"vqa_fine_tuned.th",
"vqa_pre_trained.th",
]
def _UpperCAmelCase ( a : Dict ) -> Optional[int]:
"""simple docstring"""
lowercase_ : int = torch.load(a , map_location='cpu' )
return sd
def _UpperCAmelCase ( a : List[Any] , a : Tuple , a : Tuple=rename_keys_prefix ) -> List[str]:
"""simple docstring"""
lowercase_ : Dict = OrderedDict()
lowercase_ : Tuple = torch.arange(config.max_position_embeddings ).expand((1, -1) )
# detector_d = OrderedDict()
for key in d:
if "detector" in key:
# detector_d[key.replace('detector.','')] = d[key]
continue
lowercase_ : Optional[Any] = key
for name_pair in rename_keys_prefix:
lowercase_ : Optional[Any] = new_key.replace(name_pair[0] , name_pair[1] )
lowercase_ : int = d[key]
if key == "bert.cls.predictions.decoder.weight":
# Old bert code didn't have `decoder.bias`, but was added separately
lowercase_ : List[Any] = new_d['cls.predictions.bias']
return new_d
@torch.no_grad()
def _UpperCAmelCase ( a : Optional[Any] , a : Any ) -> Any:
"""simple docstring"""
assert (
checkpoint_path.split('/' )[-1] in ACCEPTABLE_CHECKPOINTS
), f"The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}."
# Get Config
if "pre" in checkpoint_path:
lowercase_ : Any = 'pretraining'
if "vcr" in checkpoint_path:
lowercase_ : Dict = {'visual_embedding_dim': 5_1_2}
elif "vqa_advanced" in checkpoint_path:
lowercase_ : Tuple = {'visual_embedding_dim': 2_0_4_8}
elif "vqa" in checkpoint_path:
lowercase_ : List[Any] = {'visual_embedding_dim': 2_0_4_8}
elif "nlvr" in checkpoint_path:
lowercase_ : Optional[int] = {'visual_embedding_dim': 1_0_2_4}
else:
raise NotImplementedError(f"No implementation found for `{checkpoint_path}`." )
else:
if "vcr" in checkpoint_path:
lowercase_ : Tuple = {'visual_embedding_dim': 5_1_2}
lowercase_ : List[Any] = 'multichoice'
elif "vqa_advanced" in checkpoint_path:
lowercase_ : Tuple = {'visual_embedding_dim': 2_0_4_8}
lowercase_ : str = 'vqa_advanced'
elif "vqa" in checkpoint_path:
lowercase_ : List[str] = {'visual_embedding_dim': 2_0_4_8, 'num_labels': 3_1_2_9}
lowercase_ : Optional[Any] = 'vqa'
elif "nlvr" in checkpoint_path:
lowercase_ : int = {
'visual_embedding_dim': 1_0_2_4,
'num_labels': 2,
}
lowercase_ : List[Any] = 'nlvr'
lowercase_ : Any = VisualBertConfig(**a )
# Load State Dict
lowercase_ : Any = load_state_dict(a )
lowercase_ : Optional[Any] = get_new_dict(a , a )
if model_type == "pretraining":
lowercase_ : Union[str, Any] = VisualBertForPreTraining(a )
elif model_type == "vqa":
lowercase_ : int = VisualBertForQuestionAnswering(a )
elif model_type == "nlvr":
lowercase_ : Union[str, Any] = VisualBertForVisualReasoning(a )
elif model_type == "multichoice":
lowercase_ : List[Any] = VisualBertForMultipleChoice(a )
model.load_state_dict(a )
# Save Checkpoints
Path(a ).mkdir(exist_ok=a )
model.save_pretrained(a )
if __name__ == "__main__":
A: List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("orig_checkpoint_path", type=str, help="A path to .th on local filesystem.")
parser.add_argument("pytorch_dump_folder_path", type=str, help="Path to the output PyTorch model.")
A: Union[str, Any] = parser.parse_args()
convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
| 707 |
'''simple docstring'''
def _UpperCAmelCase ( a : str ) -> str:
"""simple docstring"""
lowercase_ : Dict = 0
# if input_string is "aba" than new_input_string become "a|b|a"
lowercase_ : Dict = ''
lowercase_ : Any = ''
# append each character + "|" in new_string for range(0, length-1)
for i in input_string[: len(a ) - 1]:
new_input_string += i + "|"
# append last character
new_input_string += input_string[-1]
# we will store the starting and ending of previous furthest ending palindromic
# substring
lowercase_ , lowercase_ : Dict = 0, 0
# length[i] shows the length of palindromic substring with center i
lowercase_ : List[Any] = [1 for i in range(len(a ) )]
# for each character in new_string find corresponding palindromic string
lowercase_ : Dict = 0
for j in range(len(a ) ):
lowercase_ : Tuple = 1 if j > r else min(length[l + r - j] // 2 , r - j + 1 )
while (
j - k >= 0
and j + k < len(a )
and new_input_string[k + j] == new_input_string[j - k]
):
k += 1
lowercase_ : int = 2 * k - 1
# does this string is ending after the previously explored end (that is r) ?
# if yes the update the new r to the last index of this
if j + k - 1 > r:
lowercase_ : Tuple = j - k + 1 # noqa: E741
lowercase_ : Tuple = j + k - 1
# update max_length and start position
if max_length < length[j]:
lowercase_ : Tuple = length[j]
lowercase_ : List[Any] = j
# create that string
lowercase_ : str = new_input_string[start - max_length // 2 : start + max_length // 2 + 1]
for i in s:
if i != "|":
output_string += i
return output_string
if __name__ == "__main__":
import doctest
doctest.testmod()
| 7 | 0 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class __magic_name__ ( UpperCAmelCase_, unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = ShapEPipeline
SCREAMING_SNAKE_CASE_ : List[Any] = ['prompt']
SCREAMING_SNAKE_CASE_ : Tuple = ['prompt']
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [
'num_images_per_prompt',
'num_inference_steps',
'generator',
'latents',
'guidance_scale',
'frame_size',
'output_type',
'return_dict',
]
SCREAMING_SNAKE_CASE_ : int = False
@property
def snake_case__ ( self ) -> Dict:
return 32
@property
def snake_case__ ( self ) -> int:
return 32
@property
def snake_case__ ( self ) -> List[str]:
return self.time_input_dim * 4
@property
def snake_case__ ( self ) -> str:
return 8
@property
def snake_case__ ( self ) -> Tuple:
lowercase_ : str = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
return tokenizer
@property
def snake_case__ ( self ) -> Optional[Any]:
torch.manual_seed(0 )
lowercase_ : Optional[int] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(_lowercase )
@property
def snake_case__ ( self ) -> Any:
torch.manual_seed(0 )
lowercase_ : Union[str, Any] = {
'num_attention_heads': 2,
'attention_head_dim': 16,
'embedding_dim': self.time_input_dim,
'num_embeddings': 32,
'embedding_proj_dim': self.text_embedder_hidden_size,
'time_embed_dim': self.time_embed_dim,
'num_layers': 1,
'clip_embed_dim': self.time_input_dim * 2,
'additional_embeddings': 0,
'time_embed_act_fn': 'gelu',
'norm_in_type': 'layer',
'encoder_hid_proj_type': None,
'added_emb_type': None,
}
lowercase_ : Union[str, Any] = PriorTransformer(**_lowercase )
return model
@property
def snake_case__ ( self ) -> Union[str, Any]:
torch.manual_seed(0 )
lowercase_ : List[str] = {
'param_shapes': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'd_latent': self.time_input_dim,
'd_hidden': self.renderer_dim,
'n_output': 12,
'background': (
0.1,
0.1,
0.1,
),
}
lowercase_ : Dict = ShapERenderer(**_lowercase )
return model
def snake_case__ ( self ) -> Tuple:
lowercase_ : Optional[int] = self.dummy_prior
lowercase_ : Optional[Any] = self.dummy_text_encoder
lowercase_ : Optional[int] = self.dummy_tokenizer
lowercase_ : List[Any] = self.dummy_renderer
lowercase_ : Any = HeunDiscreteScheduler(
beta_schedule='exp' , num_train_timesteps=1024 , prediction_type='sample' , use_karras_sigmas=_lowercase , clip_sample=_lowercase , clip_sample_range=1.0 , )
lowercase_ : str = {
'prior': prior,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'renderer': renderer,
'scheduler': scheduler,
}
return components
def snake_case__ ( self , _lowercase , _lowercase=0 ) -> Tuple:
if str(_lowercase ).startswith('mps' ):
lowercase_ : Union[str, Any] = torch.manual_seed(_lowercase )
else:
lowercase_ : int = torch.Generator(device=_lowercase ).manual_seed(_lowercase )
lowercase_ : Union[str, Any] = {
'prompt': 'horse',
'generator': generator,
'num_inference_steps': 1,
'frame_size': 32,
'output_type': 'np',
}
return inputs
def snake_case__ ( self ) -> Dict:
lowercase_ : Optional[Any] = 'cpu'
lowercase_ : List[Any] = self.get_dummy_components()
lowercase_ : Optional[Any] = self.pipeline_class(**_lowercase )
lowercase_ : List[str] = pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
lowercase_ : Dict = pipe(**self.get_dummy_inputs(_lowercase ) )
lowercase_ : Dict = output.images[0]
lowercase_ : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
lowercase_ : int = np.array(
[
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def snake_case__ ( self ) -> List[str]:
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def snake_case__ ( self ) -> Union[str, Any]:
lowercase_ : str = torch_device == 'cpu'
lowercase_ : str = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=_lowercase , relax_max_difference=_lowercase , )
def snake_case__ ( self ) -> Optional[Any]:
lowercase_ : int = self.get_dummy_components()
lowercase_ : Any = self.pipeline_class(**_lowercase )
lowercase_ : Optional[int] = pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
lowercase_ : List[Any] = 1
lowercase_ : int = 2
lowercase_ : Union[str, Any] = self.get_dummy_inputs(_lowercase )
for key in inputs.keys():
if key in self.batch_params:
lowercase_ : List[Any] = batch_size * [inputs[key]]
lowercase_ : Tuple = pipe(**_lowercase , num_images_per_prompt=_lowercase )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class __magic_name__ ( unittest.TestCase ):
"""simple docstring"""
def snake_case__ ( self ) -> Tuple:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case__ ( self ) -> Union[str, Any]:
lowercase_ : str = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/shap_e/test_shap_e_np_out.npy' )
lowercase_ : int = ShapEPipeline.from_pretrained('openai/shap-e' )
lowercase_ : str = pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
lowercase_ : Optional[Any] = torch.Generator(device=_lowercase ).manual_seed(0 )
lowercase_ : Union[str, Any] = pipe(
'a shark' , generator=_lowercase , guidance_scale=15.0 , num_inference_steps=64 , frame_size=64 , output_type='np' , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(_lowercase , _lowercase )
| 708 |
'''simple docstring'''
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class __magic_name__ ( UpperCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ''
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 'hf-legacy' # "hf://"" is reserved for hffs
def __init__( self , _lowercase = None , _lowercase = None , **_lowercase , ) -> Optional[Any]:
super().__init__(self , **_lowercase )
lowercase_ : int = repo_info
lowercase_ : List[Any] = token
lowercase_ : Union[str, Any] = None
def lowerCamelCase__ ( self ) -> Optional[Any]:
if self.dir_cache is None:
lowercase_ : Optional[Any] = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
lowercase_ : str = {
'name': hf_file.rfilename,
'size': None,
'type': 'file',
}
self.dir_cache.update(
{
str(_lowercase ): {'name': str(_lowercase ), 'size': None, 'type': 'directory'}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def lowerCamelCase__ ( self , _lowercase , _lowercase = "rb" , **_lowercase , ) -> Dict:
if not isinstance(self.repo_info , _lowercase ):
raise NotImplementedError(f"Open is only implemented for dataset repositories, but got {self.repo_info}" )
lowercase_ : Optional[int] = hf_hub_url(self.repo_info.id , _lowercase , revision=self.repo_info.sha )
return fsspec.open(
_lowercase , mode=_lowercase , headers=get_authentication_headers_for_url(_lowercase , use_auth_token=self.token ) , client_kwargs={'trust_env': True} , ).open()
def lowerCamelCase__ ( self , _lowercase , **_lowercase ) -> Tuple:
self._get_dirs()
lowercase_ : str = self._strip_protocol(_lowercase )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(_lowercase )
def lowerCamelCase__ ( self , _lowercase , _lowercase=False , **_lowercase ) -> List[str]:
self._get_dirs()
lowercase_ : List[str] = PurePosixPath(path.strip('/' ) )
lowercase_ : List[str] = {}
for p, f in self.dir_cache.items():
lowercase_ : Tuple = PurePosixPath(p.strip('/' ) )
lowercase_ : Optional[int] = p.parent
if root == path:
lowercase_ : List[str] = f
lowercase_ : List[str] = list(paths.values() )
if detail:
return out
else:
return sorted(f['name'] for f in out )
| 7 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import _LazyModule
A: Dict = {"tokenization_wav2vec2_phoneme": ["Wav2Vec2PhonemeCTCTokenizer"]}
if TYPE_CHECKING:
from .tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizer
else:
import sys
A: List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 709 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
BertTokenizer,
ViltConfig,
ViltForImageAndTextRetrieval,
ViltForImagesAndTextClassification,
ViltForMaskedLM,
ViltForQuestionAnswering,
ViltImageProcessor,
ViltProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
A: List[Any] = logging.get_logger(__name__)
def _UpperCAmelCase ( a : Any , a : Dict=False , a : Union[str, Any]=False , a : Tuple=False ) -> List[str]:
"""simple docstring"""
lowercase_ : int = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"transformer.blocks.{i}.norm1.weight", f"vilt.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((f"transformer.blocks.{i}.norm1.bias", f"vilt.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append(
(f"transformer.blocks.{i}.attn.proj.weight", f"vilt.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append(
(f"transformer.blocks.{i}.attn.proj.bias", f"vilt.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((f"transformer.blocks.{i}.norm2.weight", f"vilt.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((f"transformer.blocks.{i}.norm2.bias", f"vilt.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append(
(f"transformer.blocks.{i}.mlp.fc1.weight", f"vilt.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((f"transformer.blocks.{i}.mlp.fc1.bias", f"vilt.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((f"transformer.blocks.{i}.mlp.fc2.weight", f"vilt.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((f"transformer.blocks.{i}.mlp.fc2.bias", f"vilt.encoder.layer.{i}.output.dense.bias") )
# embeddings
rename_keys.extend(
[
# text embeddings
('text_embeddings.word_embeddings.weight', 'vilt.embeddings.text_embeddings.word_embeddings.weight'),
(
'text_embeddings.position_embeddings.weight',
'vilt.embeddings.text_embeddings.position_embeddings.weight',
),
('text_embeddings.position_ids', 'vilt.embeddings.text_embeddings.position_ids'),
(
'text_embeddings.token_type_embeddings.weight',
'vilt.embeddings.text_embeddings.token_type_embeddings.weight',
),
('text_embeddings.LayerNorm.weight', 'vilt.embeddings.text_embeddings.LayerNorm.weight'),
('text_embeddings.LayerNorm.bias', 'vilt.embeddings.text_embeddings.LayerNorm.bias'),
# patch embeddings
('transformer.cls_token', 'vilt.embeddings.cls_token'),
('transformer.patch_embed.proj.weight', 'vilt.embeddings.patch_embeddings.projection.weight'),
('transformer.patch_embed.proj.bias', 'vilt.embeddings.patch_embeddings.projection.bias'),
('transformer.pos_embed', 'vilt.embeddings.position_embeddings'),
# token type embeddings
('token_type_embeddings.weight', 'vilt.embeddings.token_type_embeddings.weight'),
] )
# final layernorm + pooler
rename_keys.extend(
[
('transformer.norm.weight', 'vilt.layernorm.weight'),
('transformer.norm.bias', 'vilt.layernorm.bias'),
('pooler.dense.weight', 'vilt.pooler.dense.weight'),
('pooler.dense.bias', 'vilt.pooler.dense.bias'),
] )
# classifier head(s)
if vqa_model:
# classification head
rename_keys.extend(
[
('vqa_classifier.0.weight', 'classifier.0.weight'),
('vqa_classifier.0.bias', 'classifier.0.bias'),
('vqa_classifier.1.weight', 'classifier.1.weight'),
('vqa_classifier.1.bias', 'classifier.1.bias'),
('vqa_classifier.3.weight', 'classifier.3.weight'),
('vqa_classifier.3.bias', 'classifier.3.bias'),
] )
elif nlvr_model:
# classification head
rename_keys.extend(
[
('nlvr2_classifier.0.weight', 'classifier.0.weight'),
('nlvr2_classifier.0.bias', 'classifier.0.bias'),
('nlvr2_classifier.1.weight', 'classifier.1.weight'),
('nlvr2_classifier.1.bias', 'classifier.1.bias'),
('nlvr2_classifier.3.weight', 'classifier.3.weight'),
('nlvr2_classifier.3.bias', 'classifier.3.bias'),
] )
else:
pass
return rename_keys
def _UpperCAmelCase ( a : Dict , a : Tuple ) -> Dict:
"""simple docstring"""
for i in range(config.num_hidden_layers ):
lowercase_ : Optional[int] = 'vilt.'
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowercase_ : str = state_dict.pop(f"transformer.blocks.{i}.attn.qkv.weight" )
lowercase_ : int = state_dict.pop(f"transformer.blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
lowercase_ : Dict = in_proj_weight[
: config.hidden_size, :
]
lowercase_ : List[str] = in_proj_bias[: config.hidden_size]
lowercase_ : int = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowercase_ : Optional[int] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowercase_ : Tuple = in_proj_weight[
-config.hidden_size :, :
]
lowercase_ : Dict = in_proj_bias[-config.hidden_size :]
def _UpperCAmelCase ( a : List[str] ) -> Optional[int]:
"""simple docstring"""
lowercase_ : Union[str, Any] = ['head.weight', 'head.bias']
for k in ignore_keys:
state_dict.pop(a , a )
def _UpperCAmelCase ( a : Optional[Any] , a : Tuple , a : Union[str, Any] ) -> Tuple:
"""simple docstring"""
lowercase_ : List[Any] = dct.pop(a )
lowercase_ : Dict = val
@torch.no_grad()
def _UpperCAmelCase ( a : List[Any] , a : List[Any] ) -> Optional[Any]:
"""simple docstring"""
lowercase_ : str = ViltConfig(image_size=3_8_4 , patch_size=3_2 , tie_word_embeddings=a )
lowercase_ : int = False
lowercase_ : Union[str, Any] = False
lowercase_ : List[str] = False
lowercase_ : str = False
if "vqa" in checkpoint_url:
lowercase_ : str = True
lowercase_ : Optional[int] = 3_1_2_9
lowercase_ : Any = 'huggingface/label-files'
lowercase_ : Optional[Any] = 'vqa2-id2label.json'
lowercase_ : int = json.load(open(hf_hub_download(a , a , repo_type='dataset' ) , 'r' ) )
lowercase_ : Optional[int] = {int(a ): v for k, v in idalabel.items()}
lowercase_ : List[Any] = idalabel
lowercase_ : str = {v: k for k, v in idalabel.items()}
lowercase_ : List[Any] = ViltForQuestionAnswering(a )
elif "nlvr" in checkpoint_url:
lowercase_ : Dict = True
lowercase_ : List[str] = 2
lowercase_ : Tuple = {0: 'False', 1: 'True'}
lowercase_ : Optional[int] = {v: k for k, v in config.idalabel.items()}
lowercase_ : int = 3
lowercase_ : Any = ViltForImagesAndTextClassification(a )
elif "irtr" in checkpoint_url:
lowercase_ : Union[str, Any] = True
lowercase_ : Dict = ViltForImageAndTextRetrieval(a )
elif "mlm_itm" in checkpoint_url:
lowercase_ : int = True
lowercase_ : Tuple = ViltForMaskedLM(a )
else:
raise ValueError('Unknown model type' )
# load state_dict of original model, remove and rename some keys
lowercase_ : List[Any] = torch.hub.load_state_dict_from_url(a , map_location='cpu' )['state_dict']
lowercase_ : Union[str, Any] = create_rename_keys(a , a , a , a )
for src, dest in rename_keys:
rename_key(a , a , a )
read_in_q_k_v(a , a )
if mlm_model or irtr_model:
lowercase_ : str = ['itm_score.fc.weight', 'itm_score.fc.bias']
for k in ignore_keys:
state_dict.pop(a , a )
# load state dict into HuggingFace model
model.eval()
if mlm_model:
lowercase_ , lowercase_ : Dict = model.load_state_dict(a , strict=a )
assert missing_keys == ["mlm_score.decoder.bias"]
else:
model.load_state_dict(a )
# Define processor
lowercase_ : Optional[int] = ViltImageProcessor(size=3_8_4 )
lowercase_ : Optional[int] = BertTokenizer.from_pretrained('bert-base-uncased' )
lowercase_ : Any = ViltProcessor(a , a )
# Forward pass on example inputs (image + text)
if nlvr_model:
lowercase_ : Union[str, Any] = Image.open(requests.get('https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg' , stream=a ).raw )
lowercase_ : Optional[Any] = Image.open(requests.get('https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg' , stream=a ).raw )
lowercase_ : Any = (
'The left image contains twice the number of dogs as the right image, and at least two dogs in total are'
' standing.'
)
lowercase_ : Union[str, Any] = processor(a , a , return_tensors='pt' )
lowercase_ : List[str] = processor(a , a , return_tensors='pt' )
lowercase_ : Union[str, Any] = model(
input_ids=encoding_a.input_ids , pixel_values=encoding_a.pixel_values , pixel_values_a=encoding_a.pixel_values , )
else:
lowercase_ : List[str] = Image.open(requests.get('http://images.cocodataset.org/val2017/000000039769.jpg' , stream=a ).raw )
if mlm_model:
lowercase_ : Dict = 'a bunch of [MASK] laying on a [MASK].'
else:
lowercase_ : List[Any] = 'How many cats are there?'
lowercase_ : List[Any] = processor(a , a , return_tensors='pt' )
lowercase_ : Optional[int] = model(**a )
# Verify outputs
if mlm_model:
lowercase_ : Union[str, Any] = torch.Size([1, 1_1, 3_0_5_2_2] )
lowercase_ : Optional[Any] = torch.tensor([-12.50_61, -12.51_23, -12.51_74] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , a , atol=1e-4 )
# verify masked token prediction equals "cats"
lowercase_ : int = outputs.logits[0, 4, :].argmax(-1 ).item()
assert tokenizer.decode([predicted_id] ) == "cats"
elif vqa_model:
lowercase_ : Optional[Any] = torch.Size([1, 3_1_2_9] )
lowercase_ : Tuple = torch.tensor([-15.94_95, -18.14_72, -10.30_41] )
assert torch.allclose(outputs.logits[0, :3] , a , atol=1e-4 )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , a , atol=1e-4 )
# verify vqa prediction equals "2"
lowercase_ : Union[str, Any] = outputs.logits.argmax(-1 ).item()
assert model.config.idalabel[predicted_idx] == "2"
elif nlvr_model:
lowercase_ : Optional[Any] = torch.Size([1, 2] )
lowercase_ : Optional[Any] = torch.tensor([-2.87_21, 2.12_91] )
assert torch.allclose(outputs.logits[0, :3] , a , atol=1e-4 )
assert outputs.logits.shape == expected_shape
Path(a ).mkdir(exist_ok=a )
print(f"Saving model and processor to {pytorch_dump_folder_path}" )
model.save_pretrained(a )
processor.save_pretrained(a )
if __name__ == "__main__":
A: Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://github.com/dandelin/ViLT/releases/download/200k/vilt_200k_mlm_itm.ckpt",
type=str,
help="URL of the checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
A: Union[str, Any] = parser.parse_args()
convert_vilt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 7 | 0 |
'''simple docstring'''
import unittest
from transformers import SqueezeBertConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
)
class __magic_name__ ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self , _lowercase , _lowercase=13 , _lowercase=7 , _lowercase=True , _lowercase=True , _lowercase=False , _lowercase=True , _lowercase=99 , _lowercase=32 , _lowercase=5 , _lowercase=4 , _lowercase=64 , _lowercase="gelu" , _lowercase=0.1 , _lowercase=0.1 , _lowercase=512 , _lowercase=16 , _lowercase=2 , _lowercase=0.02 , _lowercase=3 , _lowercase=4 , _lowercase=None , _lowercase=2 , _lowercase=2 , _lowercase=2 , _lowercase=2 , _lowercase=4 , _lowercase=1 , ) -> Dict:
lowercase_ : Tuple = parent
lowercase_ : Any = batch_size
lowercase_ : Dict = seq_length
lowercase_ : int = is_training
lowercase_ : Tuple = use_input_mask
lowercase_ : Tuple = use_token_type_ids
lowercase_ : List[Any] = use_labels
lowercase_ : List[Any] = vocab_size
lowercase_ : List[str] = hidden_size
lowercase_ : Optional[int] = num_hidden_layers
lowercase_ : Dict = num_attention_heads
lowercase_ : Tuple = intermediate_size
lowercase_ : str = hidden_act
lowercase_ : str = hidden_dropout_prob
lowercase_ : List[Any] = attention_probs_dropout_prob
lowercase_ : List[str] = max_position_embeddings
lowercase_ : Any = type_vocab_size
lowercase_ : str = type_sequence_label_size
lowercase_ : Optional[Any] = initializer_range
lowercase_ : List[str] = num_labels
lowercase_ : Dict = num_choices
lowercase_ : Union[str, Any] = scope
lowercase_ : Any = q_groups
lowercase_ : Dict = k_groups
lowercase_ : Optional[int] = v_groups
lowercase_ : str = post_attention_groups
lowercase_ : Union[str, Any] = intermediate_groups
lowercase_ : Any = output_groups
def lowerCamelCase__ ( self ) -> Dict:
lowercase_ : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase_ : Dict = None
if self.use_input_mask:
lowercase_ : Any = random_attention_mask([self.batch_size, self.seq_length] )
lowercase_ : Dict = None
lowercase_ : int = None
lowercase_ : List[str] = None
if self.use_labels:
lowercase_ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase_ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase_ : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices )
lowercase_ : str = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCamelCase__ ( self ) -> Union[str, Any]:
return SqueezeBertConfig(
embedding_size=self.hidden_size , vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , attention_probs_dropout_prob=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , q_groups=self.q_groups , k_groups=self.k_groups , v_groups=self.v_groups , post_attention_groups=self.post_attention_groups , intermediate_groups=self.intermediate_groups , output_groups=self.output_groups , )
def lowerCamelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) -> List[Any]:
lowercase_ : Tuple = SqueezeBertModel(config=_lowercase )
model.to(_lowercase )
model.eval()
lowercase_ : List[Any] = model(_lowercase , _lowercase )
lowercase_ : Tuple = model(_lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) -> str:
lowercase_ : Dict = SqueezeBertForMaskedLM(config=_lowercase )
model.to(_lowercase )
model.eval()
lowercase_ : List[str] = model(_lowercase , attention_mask=_lowercase , labels=_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) -> Union[str, Any]:
lowercase_ : Optional[Any] = SqueezeBertForQuestionAnswering(config=_lowercase )
model.to(_lowercase )
model.eval()
lowercase_ : Optional[int] = model(
_lowercase , attention_mask=_lowercase , start_positions=_lowercase , end_positions=_lowercase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCamelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) -> Tuple:
lowercase_ : Tuple = self.num_labels
lowercase_ : Any = SqueezeBertForSequenceClassification(_lowercase )
model.to(_lowercase )
model.eval()
lowercase_ : List[str] = model(_lowercase , attention_mask=_lowercase , labels=_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) -> Union[str, Any]:
lowercase_ : str = self.num_labels
lowercase_ : Any = SqueezeBertForTokenClassification(config=_lowercase )
model.to(_lowercase )
model.eval()
lowercase_ : Dict = model(_lowercase , attention_mask=_lowercase , labels=_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCamelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) -> int:
lowercase_ : Any = self.num_choices
lowercase_ : Any = SqueezeBertForMultipleChoice(config=_lowercase )
model.to(_lowercase )
model.eval()
lowercase_ : Tuple = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase_ : Union[str, Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase_ : Dict = model(
_lowercase , attention_mask=_lowercase , labels=_lowercase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCamelCase__ ( self ) -> Optional[Any]:
lowercase_ : str = self.prepare_config_and_inputs()
(lowercase_) : Optional[int] = config_and_inputs
lowercase_ : str = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class __magic_name__ ( UpperCAmelCase_, UpperCAmelCase_, unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = (
(
SqueezeBertModel,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
)
if is_torch_available()
else None
)
SCREAMING_SNAKE_CASE_ : List[str] = (
{
'feature-extraction': SqueezeBertModel,
'fill-mask': SqueezeBertForMaskedLM,
'question-answering': SqueezeBertForQuestionAnswering,
'text-classification': SqueezeBertForSequenceClassification,
'token-classification': SqueezeBertForTokenClassification,
'zero-shot': SqueezeBertForSequenceClassification,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE_ : Tuple = False
SCREAMING_SNAKE_CASE_ : int = True
SCREAMING_SNAKE_CASE_ : List[Any] = False
def lowerCamelCase__ ( self ) -> Optional[int]:
lowercase_ : Tuple = SqueezeBertModelTester(self )
lowercase_ : Optional[int] = ConfigTester(self , config_class=_lowercase , dim=37 )
def lowerCamelCase__ ( self ) -> List[Any]:
self.config_tester.run_common_tests()
def lowerCamelCase__ ( self ) -> int:
lowercase_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_model(*_lowercase )
def lowerCamelCase__ ( self ) -> List[str]:
lowercase_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_masked_lm(*_lowercase )
def lowerCamelCase__ ( self ) -> List[str]:
lowercase_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_question_answering(*_lowercase )
def lowerCamelCase__ ( self ) -> int:
lowercase_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_sequence_classification(*_lowercase )
def lowerCamelCase__ ( self ) -> str:
lowercase_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_token_classification(*_lowercase )
def lowerCamelCase__ ( self ) -> Dict:
lowercase_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_multiple_choice(*_lowercase )
@slow
def lowerCamelCase__ ( self ) -> Any:
for model_name in SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ : Tuple = SqueezeBertModel.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
@require_sentencepiece
@require_tokenizers
@require_torch
class __magic_name__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowerCamelCase__ ( self ) -> Optional[int]:
lowercase_ : Union[str, Any] = SqueezeBertForSequenceClassification.from_pretrained('squeezebert/squeezebert-mnli' )
lowercase_ : Union[str, Any] = torch.tensor([[1, 2_9414, 232, 328, 740, 1140, 1_2695, 69, 13, 1588, 2]] )
lowercase_ : Optional[int] = model(_lowercase )[0]
lowercase_ : str = torch.Size((1, 3) )
self.assertEqual(output.shape , _lowercase )
lowercase_ : str = torch.tensor([[0.64_01, -0.03_49, -0.60_41]] )
self.assertTrue(torch.allclose(_lowercase , _lowercase , atol=1E-4 ) )
| 710 |
'''simple docstring'''
def _UpperCAmelCase ( a : list ) -> list:
"""simple docstring"""
for i in range(len(a ) - 1 , 0 , -1 ):
lowercase_ : Any = False
for j in range(a , 0 , -1 ):
if unsorted[j] < unsorted[j - 1]:
lowercase_ , lowercase_ : Any = unsorted[j - 1], unsorted[j]
lowercase_ : int = True
for j in range(a ):
if unsorted[j] > unsorted[j + 1]:
lowercase_ , lowercase_ : Union[str, Any] = unsorted[j + 1], unsorted[j]
lowercase_ : Optional[Any] = True
if not swapped:
break
return unsorted
if __name__ == "__main__":
import doctest
doctest.testmod()
A: Union[str, Any] = input("Enter numbers separated by a comma:\n").strip()
A: Tuple = [int(item) for item in user_input.split(",")]
print(f"""{cocktail_shaker_sort(unsorted) = }""")
| 7 | 0 |
'''simple docstring'''
import os
import shutil
from pathlib import Path
from typing import Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging
if is_onnx_available():
import onnxruntime as ort
A: List[Any] = logging.get_logger(__name__)
A: List[Any] = {
"tensor(bool)": np.bool_,
"tensor(int8)": np.inta,
"tensor(uint8)": np.uinta,
"tensor(int16)": np.intaa,
"tensor(uint16)": np.uintaa,
"tensor(int32)": np.intaa,
"tensor(uint32)": np.uintaa,
"tensor(int64)": np.intaa,
"tensor(uint64)": np.uintaa,
"tensor(float16)": np.floataa,
"tensor(float)": np.floataa,
"tensor(double)": np.floataa,
}
class __magic_name__ :
"""simple docstring"""
def __init__( self , _lowercase=None , **_lowercase ) -> int:
logger.info('`diffusers.OnnxRuntimeModel` is experimental and might change in the future.' )
lowercase_ : Optional[Any] = model
lowercase_ : str = kwargs.get('model_save_dir' , _lowercase )
lowercase_ : Union[str, Any] = kwargs.get('latest_model_name' , _lowercase )
def __call__( self , **_lowercase ) -> Optional[Any]:
lowercase_ : str = {k: np.array(_lowercase ) for k, v in kwargs.items()}
return self.model.run(_lowercase , _lowercase )
@staticmethod
def lowerCamelCase__ ( _lowercase , _lowercase=None , _lowercase=None ) -> Union[str, Any]:
if provider is None:
logger.info('No onnxruntime provider specified, using CPUExecutionProvider' )
lowercase_ : Optional[int] = 'CPUExecutionProvider'
return ort.InferenceSession(_lowercase , providers=[provider] , sess_options=_lowercase )
def lowerCamelCase__ ( self , _lowercase , _lowercase = None , **_lowercase ) -> Dict:
lowercase_ : Any = file_name if file_name is not None else ONNX_WEIGHTS_NAME
lowercase_ : Any = self.model_save_dir.joinpath(self.latest_model_name )
lowercase_ : Dict = Path(_lowercase ).joinpath(_lowercase )
try:
shutil.copyfile(_lowercase , _lowercase )
except shutil.SameFileError:
pass
# copy external weights (for models >2GB)
lowercase_ : Optional[Any] = self.model_save_dir.joinpath(_lowercase )
if src_path.exists():
lowercase_ : Optional[int] = Path(_lowercase ).joinpath(_lowercase )
try:
shutil.copyfile(_lowercase , _lowercase )
except shutil.SameFileError:
pass
def lowerCamelCase__ ( self , _lowercase , **_lowercase , ) -> List[str]:
if os.path.isfile(_lowercase ):
logger.error(f"Provided path ({save_directory}) should be a directory, not a file" )
return
os.makedirs(_lowercase , exist_ok=_lowercase )
# saving model weights/files
self._save_pretrained(_lowercase , **_lowercase )
@classmethod
def lowerCamelCase__ ( cls , _lowercase , _lowercase = None , _lowercase = None , _lowercase = False , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = None , **_lowercase , ) -> Any:
lowercase_ : Dict = file_name if file_name is not None else ONNX_WEIGHTS_NAME
# load model from local directory
if os.path.isdir(_lowercase ):
lowercase_ : Optional[int] = OnnxRuntimeModel.load_model(
os.path.join(_lowercase , _lowercase ) , provider=_lowercase , sess_options=_lowercase )
lowercase_ : str = Path(_lowercase )
# load model from hub
else:
# download model
lowercase_ : Optional[Any] = hf_hub_download(
repo_id=_lowercase , filename=_lowercase , use_auth_token=_lowercase , revision=_lowercase , cache_dir=_lowercase , force_download=_lowercase , )
lowercase_ : Union[str, Any] = Path(_lowercase ).parent
lowercase_ : Optional[int] = Path(_lowercase ).name
lowercase_ : List[Any] = OnnxRuntimeModel.load_model(_lowercase , provider=_lowercase , sess_options=_lowercase )
return cls(model=_lowercase , **_lowercase )
@classmethod
def lowerCamelCase__ ( cls , _lowercase , _lowercase = True , _lowercase = None , _lowercase = None , **_lowercase , ) -> Optional[int]:
lowercase_ : List[str] = None
if len(str(_lowercase ).split('@' ) ) == 2:
lowercase_ : int = model_id.split('@' )
return cls._from_pretrained(
model_id=_lowercase , revision=_lowercase , cache_dir=_lowercase , force_download=_lowercase , use_auth_token=_lowercase , **_lowercase , )
| 711 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class __magic_name__ ( metaclass=UpperCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = ['transformers', 'torch', 'note_seq']
def __init__( self , *_lowercase , **_lowercase ) -> Dict:
requires_backends(self , ['transformers', 'torch', 'note_seq'] )
@classmethod
def lowerCamelCase__ ( cls , *_lowercase , **_lowercase ) -> List[str]:
requires_backends(cls , ['transformers', 'torch', 'note_seq'] )
@classmethod
def lowerCamelCase__ ( cls , *_lowercase , **_lowercase ) -> Dict:
requires_backends(cls , ['transformers', 'torch', 'note_seq'] )
| 7 | 0 |
def _UpperCAmelCase ( a : Tuple ) -> str:
"""simple docstring"""
lowercase_ : Tuple = len(a )
for i in range(length - 1 ):
lowercase_ : str = i
for k in range(i + 1 , a ):
if collection[k] < collection[least]:
lowercase_ : Optional[int] = k
if least != i:
lowercase_ : List[Any] = (collection[i], collection[least])
return collection
if __name__ == "__main__":
A: int = input("Enter numbers separated by a comma:\n").strip()
A: int = [int(item) for item in user_input.split(",")]
print(selection_sort(unsorted))
| 712 |
'''simple docstring'''
def _UpperCAmelCase ( a : str , a : str ) -> float:
"""simple docstring"""
def get_matched_characters(a : str , a : str ) -> str:
lowercase_ : Union[str, Any] = []
lowercase_ : Tuple = min(len(_stra ) , len(_stra ) ) // 2
for i, l in enumerate(_stra ):
lowercase_ : Optional[int] = int(max(0 , i - limit ) )
lowercase_ : Union[str, Any] = int(min(i + limit + 1 , len(_stra ) ) )
if l in _stra[left:right]:
matched.append(a )
lowercase_ : Union[str, Any] = f"{_stra[0:_stra.index(a )]} {_stra[_stra.index(a ) + 1:]}"
return "".join(a )
# matching characters
lowercase_ : Union[str, Any] = get_matched_characters(a , a )
lowercase_ : Optional[Any] = get_matched_characters(a , a )
lowercase_ : Optional[int] = len(a )
# transposition
lowercase_ : Dict = (
len([(ca, ca) for ca, ca in zip(a , a ) if ca != ca] ) // 2
)
if not match_count:
lowercase_ : List[str] = 0.0
else:
lowercase_ : Any = (
1
/ 3
* (
match_count / len(a )
+ match_count / len(a )
+ (match_count - transpositions) / match_count
)
)
# common prefix up to 4 characters
lowercase_ : Optional[Any] = 0
for ca, ca in zip(stra[:4] , stra[:4] ):
if ca == ca:
prefix_len += 1
else:
break
return jaro + 0.1 * prefix_len * (1 - jaro)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(jaro_winkler("hello", "world"))
| 7 | 0 |
import os
from typing import List, Optional, Union
from ...tokenization_utils import PreTrainedTokenizer
from ...tokenization_utils_base import AddedToken
from ...utils import logging
A: Dict = logging.get_logger(__name__)
A: Optional[int] = {"vocab_file": "vocab.txt"}
A: Optional[int] = {
"vocab_file": {
"facebook/esm2_t6_8M_UR50D": "https://huggingface.co/facebook/esm2_t6_8M_UR50D/resolve/main/vocab.txt",
"facebook/esm2_t12_35M_UR50D": "https://huggingface.co/facebook/esm2_t12_35M_UR50D/resolve/main/vocab.txt",
},
}
A: Optional[Any] = {
"facebook/esm2_t6_8M_UR50D": 1_0_2_4,
"facebook/esm2_t12_35M_UR50D": 1_0_2_4,
}
def _UpperCAmelCase ( a : Any ) -> List[str]:
"""simple docstring"""
with open(a , 'r' ) as f:
lowercase_ : int = f.read().splitlines()
return [l.strip() for l in lines]
class __magic_name__ ( UpperCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ : int = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ : List[str] = ['input_ids', 'attention_mask']
def __init__( self , _lowercase , _lowercase="<unk>" , _lowercase="<cls>" , _lowercase="<pad>" , _lowercase="<mask>" , _lowercase="<eos>" , **_lowercase , ) -> Any:
super().__init__(**_lowercase )
lowercase_ : Dict = load_vocab_file(_lowercase )
lowercase_ : Tuple = dict(enumerate(self.all_tokens ) )
lowercase_ : Dict = {tok: ind for ind, tok in enumerate(self.all_tokens )}
lowercase_ : Optional[Any] = unk_token
lowercase_ : Dict = cls_token
lowercase_ : List[Any] = pad_token
lowercase_ : List[Any] = mask_token
lowercase_ : Dict = eos_token
lowercase_ : Dict = self.all_tokens
self._create_trie(self.unique_no_split_tokens )
def lowerCamelCase__ ( self , _lowercase ) -> str:
return self._id_to_token.get(_lowercase , self.unk_token )
def lowerCamelCase__ ( self , _lowercase ) -> int:
return self._token_to_id.get(_lowercase , self._token_to_id.get(self.unk_token ) )
def lowerCamelCase__ ( self , _lowercase , **_lowercase ) -> List[Any]:
return text.split()
def lowerCamelCase__ ( self , _lowercase=False ) -> List[Any]:
return len(self._id_to_token )
def lowerCamelCase__ ( self ) -> Any:
return {token: i for i, token in enumerate(self.all_tokens )}
def lowerCamelCase__ ( self , _lowercase ) -> int:
return self._token_to_id.get(_lowercase , self._token_to_id.get(self.unk_token ) )
def lowerCamelCase__ ( self , _lowercase ) -> str:
return self._id_to_token.get(_lowercase , self.unk_token )
def lowerCamelCase__ ( self , _lowercase , _lowercase = None ) -> List[int]:
lowercase_ : Union[str, Any] = [self.cls_token_id]
lowercase_ : Union[str, Any] = [self.eos_token_id] # No sep token in ESM vocabulary
if token_ids_a is None:
if self.eos_token_id is None:
return cls + token_ids_a
else:
return cls + token_ids_a + sep
elif self.eos_token_id is None:
raise ValueError('Cannot tokenize multiple sequences when EOS token is not set!' )
return cls + token_ids_a + sep + token_ids_a + sep # Multiple inputs always have an EOS token
def lowerCamelCase__ ( self , _lowercase , _lowercase = None , _lowercase = False ) -> List[int]:
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'You should not supply a second sequence if the provided sequence of '
'ids is already formatted with special tokens for the model.' )
return [1 if token in self.all_special_ids else 0 for token in token_ids_a]
lowercase_ : Optional[int] = [1] + ([0] * len(_lowercase )) + [1]
if token_ids_a is not None:
mask += [0] * len(_lowercase ) + [1]
return mask
def lowerCamelCase__ ( self , _lowercase , _lowercase ) -> Tuple:
lowercase_ : Any = os.path.join(_lowercase , (filename_prefix + '-' if filename_prefix else '') + 'vocab.txt' )
with open(_lowercase , 'w' ) as f:
f.write('\n'.join(self.all_tokens ) )
return (vocab_file,)
@property
def lowerCamelCase__ ( self ) -> int:
return self.get_vocab_size(with_added_tokens=_lowercase )
def lowerCamelCase__ ( self , _lowercase , _lowercase = False ) -> int:
return super()._add_tokens(_lowercase , special_tokens=_lowercase )
| 713 |
'''simple docstring'''
from __future__ import annotations
def _UpperCAmelCase ( a : int = 4 ) -> list[list[int]]:
"""simple docstring"""
lowercase_ : Tuple = abs(a ) or 4
return [[1 + x + y * row_size for x in range(a )] for y in range(a )]
def _UpperCAmelCase ( a : list[list[int]] ) -> list[list[int]]:
"""simple docstring"""
return reverse_row(transpose(a ) )
# OR.. transpose(reverse_column(matrix))
def _UpperCAmelCase ( a : list[list[int]] ) -> list[list[int]]:
"""simple docstring"""
return reverse_row(reverse_column(a ) )
# OR.. reverse_column(reverse_row(matrix))
def _UpperCAmelCase ( a : list[list[int]] ) -> list[list[int]]:
"""simple docstring"""
return reverse_column(transpose(a ) )
# OR.. transpose(reverse_row(matrix))
def _UpperCAmelCase ( a : list[list[int]] ) -> list[list[int]]:
"""simple docstring"""
lowercase_ : Any = [list(a ) for x in zip(*a )]
return matrix
def _UpperCAmelCase ( a : list[list[int]] ) -> list[list[int]]:
"""simple docstring"""
lowercase_ : List[str] = matrix[::-1]
return matrix
def _UpperCAmelCase ( a : list[list[int]] ) -> list[list[int]]:
"""simple docstring"""
lowercase_ : str = [x[::-1] for x in matrix]
return matrix
def _UpperCAmelCase ( a : list[list[int]] ) -> None:
"""simple docstring"""
for i in matrix:
print(*a )
if __name__ == "__main__":
A: Dict = make_matrix()
print("\norigin:\n")
print_matrix(matrix)
print("\nrotate 90 counterclockwise:\n")
print_matrix(rotate_aa(matrix))
A: List[Any] = make_matrix()
print("\norigin:\n")
print_matrix(matrix)
print("\nrotate 180:\n")
print_matrix(rotate_aaa(matrix))
A: List[str] = make_matrix()
print("\norigin:\n")
print_matrix(matrix)
print("\nrotate 270 counterclockwise:\n")
print_matrix(rotate_aaa(matrix))
| 7 | 0 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
BertTokenizer,
ViltConfig,
ViltForImageAndTextRetrieval,
ViltForImagesAndTextClassification,
ViltForMaskedLM,
ViltForQuestionAnswering,
ViltImageProcessor,
ViltProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
A: List[Any] = logging.get_logger(__name__)
def _UpperCAmelCase ( a : Any , a : Dict=False , a : Union[str, Any]=False , a : Tuple=False ) -> List[str]:
"""simple docstring"""
lowercase_ : int = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"transformer.blocks.{i}.norm1.weight", f"vilt.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((f"transformer.blocks.{i}.norm1.bias", f"vilt.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append(
(f"transformer.blocks.{i}.attn.proj.weight", f"vilt.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append(
(f"transformer.blocks.{i}.attn.proj.bias", f"vilt.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((f"transformer.blocks.{i}.norm2.weight", f"vilt.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((f"transformer.blocks.{i}.norm2.bias", f"vilt.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append(
(f"transformer.blocks.{i}.mlp.fc1.weight", f"vilt.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((f"transformer.blocks.{i}.mlp.fc1.bias", f"vilt.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((f"transformer.blocks.{i}.mlp.fc2.weight", f"vilt.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((f"transformer.blocks.{i}.mlp.fc2.bias", f"vilt.encoder.layer.{i}.output.dense.bias") )
# embeddings
rename_keys.extend(
[
# text embeddings
('text_embeddings.word_embeddings.weight', 'vilt.embeddings.text_embeddings.word_embeddings.weight'),
(
'text_embeddings.position_embeddings.weight',
'vilt.embeddings.text_embeddings.position_embeddings.weight',
),
('text_embeddings.position_ids', 'vilt.embeddings.text_embeddings.position_ids'),
(
'text_embeddings.token_type_embeddings.weight',
'vilt.embeddings.text_embeddings.token_type_embeddings.weight',
),
('text_embeddings.LayerNorm.weight', 'vilt.embeddings.text_embeddings.LayerNorm.weight'),
('text_embeddings.LayerNorm.bias', 'vilt.embeddings.text_embeddings.LayerNorm.bias'),
# patch embeddings
('transformer.cls_token', 'vilt.embeddings.cls_token'),
('transformer.patch_embed.proj.weight', 'vilt.embeddings.patch_embeddings.projection.weight'),
('transformer.patch_embed.proj.bias', 'vilt.embeddings.patch_embeddings.projection.bias'),
('transformer.pos_embed', 'vilt.embeddings.position_embeddings'),
# token type embeddings
('token_type_embeddings.weight', 'vilt.embeddings.token_type_embeddings.weight'),
] )
# final layernorm + pooler
rename_keys.extend(
[
('transformer.norm.weight', 'vilt.layernorm.weight'),
('transformer.norm.bias', 'vilt.layernorm.bias'),
('pooler.dense.weight', 'vilt.pooler.dense.weight'),
('pooler.dense.bias', 'vilt.pooler.dense.bias'),
] )
# classifier head(s)
if vqa_model:
# classification head
rename_keys.extend(
[
('vqa_classifier.0.weight', 'classifier.0.weight'),
('vqa_classifier.0.bias', 'classifier.0.bias'),
('vqa_classifier.1.weight', 'classifier.1.weight'),
('vqa_classifier.1.bias', 'classifier.1.bias'),
('vqa_classifier.3.weight', 'classifier.3.weight'),
('vqa_classifier.3.bias', 'classifier.3.bias'),
] )
elif nlvr_model:
# classification head
rename_keys.extend(
[
('nlvr2_classifier.0.weight', 'classifier.0.weight'),
('nlvr2_classifier.0.bias', 'classifier.0.bias'),
('nlvr2_classifier.1.weight', 'classifier.1.weight'),
('nlvr2_classifier.1.bias', 'classifier.1.bias'),
('nlvr2_classifier.3.weight', 'classifier.3.weight'),
('nlvr2_classifier.3.bias', 'classifier.3.bias'),
] )
else:
pass
return rename_keys
def _UpperCAmelCase ( a : Dict , a : Tuple ) -> Dict:
"""simple docstring"""
for i in range(config.num_hidden_layers ):
lowercase_ : Optional[int] = 'vilt.'
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowercase_ : str = state_dict.pop(f"transformer.blocks.{i}.attn.qkv.weight" )
lowercase_ : int = state_dict.pop(f"transformer.blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
lowercase_ : Dict = in_proj_weight[
: config.hidden_size, :
]
lowercase_ : List[str] = in_proj_bias[: config.hidden_size]
lowercase_ : int = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowercase_ : Optional[int] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowercase_ : Tuple = in_proj_weight[
-config.hidden_size :, :
]
lowercase_ : Dict = in_proj_bias[-config.hidden_size :]
def _UpperCAmelCase ( a : List[str] ) -> Optional[int]:
"""simple docstring"""
lowercase_ : Union[str, Any] = ['head.weight', 'head.bias']
for k in ignore_keys:
state_dict.pop(a , a )
def _UpperCAmelCase ( a : Optional[Any] , a : Tuple , a : Union[str, Any] ) -> Tuple:
"""simple docstring"""
lowercase_ : List[Any] = dct.pop(a )
lowercase_ : Dict = val
@torch.no_grad()
def _UpperCAmelCase ( a : List[Any] , a : List[Any] ) -> Optional[Any]:
"""simple docstring"""
lowercase_ : str = ViltConfig(image_size=3_8_4 , patch_size=3_2 , tie_word_embeddings=a )
lowercase_ : int = False
lowercase_ : Union[str, Any] = False
lowercase_ : List[str] = False
lowercase_ : str = False
if "vqa" in checkpoint_url:
lowercase_ : str = True
lowercase_ : Optional[int] = 3_1_2_9
lowercase_ : Any = 'huggingface/label-files'
lowercase_ : Optional[Any] = 'vqa2-id2label.json'
lowercase_ : int = json.load(open(hf_hub_download(a , a , repo_type='dataset' ) , 'r' ) )
lowercase_ : Optional[int] = {int(a ): v for k, v in idalabel.items()}
lowercase_ : List[Any] = idalabel
lowercase_ : str = {v: k for k, v in idalabel.items()}
lowercase_ : List[Any] = ViltForQuestionAnswering(a )
elif "nlvr" in checkpoint_url:
lowercase_ : Dict = True
lowercase_ : List[str] = 2
lowercase_ : Tuple = {0: 'False', 1: 'True'}
lowercase_ : Optional[int] = {v: k for k, v in config.idalabel.items()}
lowercase_ : int = 3
lowercase_ : Any = ViltForImagesAndTextClassification(a )
elif "irtr" in checkpoint_url:
lowercase_ : Union[str, Any] = True
lowercase_ : Dict = ViltForImageAndTextRetrieval(a )
elif "mlm_itm" in checkpoint_url:
lowercase_ : int = True
lowercase_ : Tuple = ViltForMaskedLM(a )
else:
raise ValueError('Unknown model type' )
# load state_dict of original model, remove and rename some keys
lowercase_ : List[Any] = torch.hub.load_state_dict_from_url(a , map_location='cpu' )['state_dict']
lowercase_ : Union[str, Any] = create_rename_keys(a , a , a , a )
for src, dest in rename_keys:
rename_key(a , a , a )
read_in_q_k_v(a , a )
if mlm_model or irtr_model:
lowercase_ : str = ['itm_score.fc.weight', 'itm_score.fc.bias']
for k in ignore_keys:
state_dict.pop(a , a )
# load state dict into HuggingFace model
model.eval()
if mlm_model:
lowercase_ : Dict = model.load_state_dict(a , strict=a )
assert missing_keys == ["mlm_score.decoder.bias"]
else:
model.load_state_dict(a )
# Define processor
lowercase_ : Optional[int] = ViltImageProcessor(size=3_8_4 )
lowercase_ : Optional[int] = BertTokenizer.from_pretrained('bert-base-uncased' )
lowercase_ : Any = ViltProcessor(a , a )
# Forward pass on example inputs (image + text)
if nlvr_model:
lowercase_ : Union[str, Any] = Image.open(requests.get('https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg' , stream=a ).raw )
lowercase_ : Optional[Any] = Image.open(requests.get('https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg' , stream=a ).raw )
lowercase_ : Any = (
'The left image contains twice the number of dogs as the right image, and at least two dogs in total are'
' standing.'
)
lowercase_ : Union[str, Any] = processor(a , a , return_tensors='pt' )
lowercase_ : List[str] = processor(a , a , return_tensors='pt' )
lowercase_ : Union[str, Any] = model(
input_ids=encoding_a.input_ids , pixel_values=encoding_a.pixel_values , pixel_values_a=encoding_a.pixel_values , )
else:
lowercase_ : List[str] = Image.open(requests.get('http://images.cocodataset.org/val2017/000000039769.jpg' , stream=a ).raw )
if mlm_model:
lowercase_ : Dict = 'a bunch of [MASK] laying on a [MASK].'
else:
lowercase_ : List[Any] = 'How many cats are there?'
lowercase_ : List[Any] = processor(a , a , return_tensors='pt' )
lowercase_ : Optional[int] = model(**a )
# Verify outputs
if mlm_model:
lowercase_ : Union[str, Any] = torch.Size([1, 1_1, 3_0_5_2_2] )
lowercase_ : Optional[Any] = torch.tensor([-12.50_61, -12.51_23, -12.51_74] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , a , atol=1e-4 )
# verify masked token prediction equals "cats"
lowercase_ : int = outputs.logits[0, 4, :].argmax(-1 ).item()
assert tokenizer.decode([predicted_id] ) == "cats"
elif vqa_model:
lowercase_ : Optional[Any] = torch.Size([1, 3_1_2_9] )
lowercase_ : Tuple = torch.tensor([-15.94_95, -18.14_72, -10.30_41] )
assert torch.allclose(outputs.logits[0, :3] , a , atol=1e-4 )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , a , atol=1e-4 )
# verify vqa prediction equals "2"
lowercase_ : Union[str, Any] = outputs.logits.argmax(-1 ).item()
assert model.config.idalabel[predicted_idx] == "2"
elif nlvr_model:
lowercase_ : Optional[Any] = torch.Size([1, 2] )
lowercase_ : Optional[Any] = torch.tensor([-2.87_21, 2.12_91] )
assert torch.allclose(outputs.logits[0, :3] , a , atol=1e-4 )
assert outputs.logits.shape == expected_shape
Path(a ).mkdir(exist_ok=a )
print(f"Saving model and processor to {pytorch_dump_folder_path}" )
model.save_pretrained(a )
processor.save_pretrained(a )
if __name__ == "__main__":
A: Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://github.com/dandelin/ViLT/releases/download/200k/vilt_200k_mlm_itm.ckpt",
type=str,
help="URL of the checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
A: Union[str, Any] = parser.parse_args()
convert_vilt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path) | 714 |
'''simple docstring'''
# Usage:
# ./gen-card-facebook-wmt19.py
import os
from pathlib import Path
def _UpperCAmelCase ( a : Dict , a : Optional[int] , a : Tuple ) -> Optional[int]:
"""simple docstring"""
lowercase_ : Any = {
'en': 'Machine learning is great, isn\'t it?',
'ru': 'Машинное обучение - это здорово, не так ли?',
'de': 'Maschinelles Lernen ist großartig, oder?',
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
lowercase_ : List[str] = {
'ru-en': ['[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)', '39.20'],
'en-ru': ['[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)', '33.47'],
'en-de': ['[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)', '42.83'],
'de-en': ['[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)', '41.35'],
}
lowercase_ : Optional[Any] = f"{src_lang}-{tgt_lang}"
lowercase_ : Optional[Any] = f"\n---\nlanguage: \n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt19\n- facebook\nlicense: apache-2.0\ndatasets:\n- wmt19\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.\n\nFor more details, please see, [Facebook FAIR's WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).\n\nThe abbreviation FSMT stands for FairSeqMachineTranslation\n\nAll four models are available:\n\n* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)\n* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)\n* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)\n* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = \"facebook/wmt19-{src_lang}-{tgt_lang}\"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = \"{texts[src_lang]}\"\ninput_ids = tokenizer.encode(input, return_tensors=\"pt\")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n- The original (and this ported model) doesn't seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)\n\n## Training data\n\nPretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).\n\n## Eval results\n\npair | fairseq | transformers\n-------|---------|----------\n{pair} | {scores[pair][0]} | {scores[pair][1]}\n\nThe score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn't support:\n- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).\n- re-ranking\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=15\nmkdir -p $DATA_DIR\nsacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\nnote: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt19/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)\n\n\n### BibTeX entry and citation info\n\n```bibtex\n@inproceedings{{...,\n year={{2020}},\n title={{Facebook FAIR's WMT19 News Translation Task Submission}},\n author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},\n booktitle={{Proc. of WMT}},\n}}\n```\n\n\n## TODO\n\n- port model ensemble (fairseq uses 4 model checkpoints)\n\n"
os.makedirs(a , exist_ok=a )
lowercase_ : int = os.path.join(a , 'README.md' )
print(f"Generating {path}" )
with open(a , 'w' , encoding='utf-8' ) as f:
f.write(a )
# make sure we are under the root of the project
A: List[str] = Path(__file__).resolve().parent.parent.parent
A: List[str] = repo_dir / "model_cards"
for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
A , A , A: Any = model_name.split("-")
A: int = model_cards_dir / "facebook" / model_name
write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
| 7 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A: Tuple = logging.get_logger(__name__)
A: Optional[int] = {
"transfo-xl-wt103": "https://huggingface.co/transfo-xl-wt103/resolve/main/config.json",
}
class __magic_name__ ( UpperCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = 'transfo-xl'
SCREAMING_SNAKE_CASE_ : int = ['mems']
SCREAMING_SNAKE_CASE_ : Tuple = {
'n_token': 'vocab_size',
'hidden_size': 'd_model',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self , _lowercase=26_7735 , _lowercase=[2_0000, 4_0000, 20_0000] , _lowercase=1024 , _lowercase=1024 , _lowercase=16 , _lowercase=64 , _lowercase=4096 , _lowercase=4 , _lowercase=False , _lowercase=18 , _lowercase=1600 , _lowercase=1000 , _lowercase=True , _lowercase=True , _lowercase=0 , _lowercase=-1 , _lowercase=True , _lowercase=0.1 , _lowercase=0.0 , _lowercase=True , _lowercase="normal" , _lowercase=0.01 , _lowercase=0.01 , _lowercase=0.02 , _lowercase=1E-5 , _lowercase=0 , **_lowercase , ) -> Optional[Any]:
lowercase_ : Union[str, Any] = vocab_size
lowercase_ : str = []
self.cutoffs.extend(_lowercase )
if proj_share_all_but_first:
lowercase_ : List[Any] = [False] + [True] * len(self.cutoffs )
else:
lowercase_ : List[str] = [False] + [False] * len(self.cutoffs )
lowercase_ : str = d_model
lowercase_ : int = d_embed
lowercase_ : Dict = d_head
lowercase_ : Optional[int] = d_inner
lowercase_ : int = div_val
lowercase_ : List[str] = pre_lnorm
lowercase_ : Any = n_layer
lowercase_ : Dict = n_head
lowercase_ : Tuple = mem_len
lowercase_ : Optional[int] = same_length
lowercase_ : List[str] = attn_type
lowercase_ : Optional[int] = clamp_len
lowercase_ : int = sample_softmax
lowercase_ : List[str] = adaptive
lowercase_ : List[str] = dropout
lowercase_ : Union[str, Any] = dropatt
lowercase_ : int = untie_r
lowercase_ : Optional[Any] = init
lowercase_ : Union[str, Any] = init_range
lowercase_ : Any = proj_init_std
lowercase_ : List[Any] = init_std
lowercase_ : Optional[Any] = layer_norm_epsilon
super().__init__(eos_token_id=_lowercase , **_lowercase )
@property
def lowerCamelCase__ ( self ) -> int:
# Message copied from Transformer-XL documentation
logger.info(f"The model {self.model_type} is one of the few models that has no sequence length limit." )
return -1
@max_position_embeddings.setter
def lowerCamelCase__ ( self , _lowercase ) -> Tuple:
# Message copied from Transformer-XL documentation
raise NotImplementedError(
f"The model {self.model_type} is one of the few models that has no sequence length limit." )
| 715 |
'''simple docstring'''
import json
import logging
import os
import socket
import git
import numpy as np
import torch
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - PID: %(process)d - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
)
A: Tuple = logging.getLogger(__name__)
def _UpperCAmelCase ( a : str ) -> List[Any]:
"""simple docstring"""
lowercase_ : List[str] = git.Repo(search_parent_directories=a )
lowercase_ : Union[str, Any] = {
'repo_id': str(a ),
'repo_sha': str(repo.head.object.hexsha ),
'repo_branch': str(repo.active_branch ),
}
with open(os.path.join(a , 'git_log.json' ) , 'w' ) as f:
json.dump(a , a , indent=4 )
def _UpperCAmelCase ( a : str ) -> Union[str, Any]:
"""simple docstring"""
if params.n_gpu <= 0:
lowercase_ : int = 0
lowercase_ : Union[str, Any] = -1
lowercase_ : List[str] = True
lowercase_ : Optional[Any] = False
return
assert torch.cuda.is_available()
logger.info('Initializing GPUs' )
if params.n_gpu > 1:
assert params.local_rank != -1
lowercase_ : Dict = int(os.environ['WORLD_SIZE'] )
lowercase_ : Union[str, Any] = int(os.environ['N_GPU_NODE'] )
lowercase_ : Optional[int] = int(os.environ['RANK'] )
# number of nodes / node ID
lowercase_ : int = params.world_size // params.n_gpu_per_node
lowercase_ : str = params.global_rank // params.n_gpu_per_node
lowercase_ : Dict = True
assert params.n_nodes == int(os.environ['N_NODES'] )
assert params.node_id == int(os.environ['NODE_RANK'] )
# local job (single GPU)
else:
assert params.local_rank == -1
lowercase_ : str = 1
lowercase_ : Dict = 0
lowercase_ : Tuple = 0
lowercase_ : List[Any] = 0
lowercase_ : int = 1
lowercase_ : Tuple = 1
lowercase_ : str = False
# sanity checks
assert params.n_nodes >= 1
assert 0 <= params.node_id < params.n_nodes
assert 0 <= params.local_rank <= params.global_rank < params.world_size
assert params.world_size == params.n_nodes * params.n_gpu_per_node
# define whether this is the master process / if we are in multi-node distributed mode
lowercase_ : List[str] = params.node_id == 0 and params.local_rank == 0
lowercase_ : Optional[Any] = params.n_nodes > 1
# summary
lowercase_ : int = f"--- Global rank: {params.global_rank} - "
logger.info(PREFIX + 'Number of nodes: %i' % params.n_nodes )
logger.info(PREFIX + 'Node ID : %i' % params.node_id )
logger.info(PREFIX + 'Local rank : %i' % params.local_rank )
logger.info(PREFIX + 'World size : %i' % params.world_size )
logger.info(PREFIX + 'GPUs per node : %i' % params.n_gpu_per_node )
logger.info(PREFIX + 'Master : %s' % str(params.is_master ) )
logger.info(PREFIX + 'Multi-node : %s' % str(params.multi_node ) )
logger.info(PREFIX + 'Multi-GPU : %s' % str(params.multi_gpu ) )
logger.info(PREFIX + 'Hostname : %s' % socket.gethostname() )
# set GPU device
torch.cuda.set_device(params.local_rank )
# initialize multi-GPU
if params.multi_gpu:
logger.info('Initializing PyTorch distributed' )
torch.distributed.init_process_group(
init_method='env://' , backend='nccl' , )
def _UpperCAmelCase ( a : Dict ) -> Optional[int]:
"""simple docstring"""
np.random.seed(args.seed )
torch.manual_seed(args.seed )
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed )
| 7 | 0 |
'''simple docstring'''
from ..utils import is_flax_available, is_torch_available
if is_torch_available():
from .autoencoder_kl import AutoencoderKL
from .controlnet import ControlNetModel
from .dual_transformer_ad import DualTransformeraDModel
from .modeling_utils import ModelMixin
from .prior_transformer import PriorTransformer
from .ta_film_transformer import TaFilmDecoder
from .transformer_ad import TransformeraDModel
from .unet_ad import UNetaDModel
from .unet_ad import UNetaDModel
from .unet_ad_condition import UNetaDConditionModel
from .unet_ad_condition import UNetaDConditionModel
from .vq_model import VQModel
if is_flax_available():
from .controlnet_flax import FlaxControlNetModel
from .unet_ad_condition_flax import FlaxUNetaDConditionModel
from .vae_flax import FlaxAutoencoderKL
| 716 |
'''simple docstring'''
import os
from distutils.util import strtobool
def _UpperCAmelCase ( a : Any , a : int ) -> Any:
"""simple docstring"""
for e in env_keys:
lowercase_ : Optional[Any] = int(os.environ.get(a , -1 ) )
if val >= 0:
return val
return default
def _UpperCAmelCase ( a : List[Any] , a : Dict=False ) -> Optional[Any]:
"""simple docstring"""
lowercase_ : Optional[int] = os.environ.get(a , str(a ) )
return strtobool(a ) == 1 # As its name indicates `strtobool` actually returns an int...
def _UpperCAmelCase ( a : List[Any] , a : Dict="no" ) -> str:
"""simple docstring"""
lowercase_ : List[Any] = os.environ.get(a , str(a ) )
return value
| 7 | 0 |
'''simple docstring'''
def _UpperCAmelCase ( a : str , a : int ) -> str:
"""simple docstring"""
lowercase_ : list[list[str]] = [[] for _ in range(a )]
lowercase_ : Dict = key - 1
if key <= 0:
raise ValueError('Height of grid can\'t be 0 or negative' )
if key == 1 or len(a ) <= key:
return input_string
for position, character in enumerate(a ):
lowercase_ : Dict = position % (lowest * 2) # puts it in bounds
lowercase_ : int = min(a , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append(a )
lowercase_ : List[str] = [''.join(a ) for row in temp_grid]
lowercase_ : Optional[int] = ''.join(a )
return output_string
def _UpperCAmelCase ( a : str , a : int ) -> str:
"""simple docstring"""
lowercase_ : List[Any] = []
lowercase_ : List[str] = key - 1
if key <= 0:
raise ValueError('Height of grid can\'t be 0 or negative' )
if key == 1:
return input_string
lowercase_ : list[list[str]] = [[] for _ in range(a )] # generates template
for position in range(len(a ) ):
lowercase_ : Tuple = position % (lowest * 2) # puts it in bounds
lowercase_ : str = min(a , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append('*' )
lowercase_ : List[Any] = 0
for row in temp_grid: # fills in the characters
lowercase_ : List[str] = input_string[counter : counter + len(a )]
grid.append(list(a ) )
counter += len(a )
lowercase_ : Tuple = '' # reads as zigzag
for position in range(len(a ) ):
lowercase_ : int = position % (lowest * 2) # puts it in bounds
lowercase_ : str = min(a , lowest * 2 - num ) # creates zigzag pattern
output_string += grid[num][0]
grid[num].pop(0 )
return output_string
def _UpperCAmelCase ( a : str ) -> dict[int, str]:
"""simple docstring"""
lowercase_ : Tuple = {}
for key_guess in range(1 , len(a ) ): # tries every key
lowercase_ : str = decrypt(a , a )
return results
if __name__ == "__main__":
import doctest
doctest.testmod()
| 717 |
'''simple docstring'''
from typing import Dict, Optional
import numpy as np
import datasets
A: int = "\nIoU is the area of overlap between the predicted segmentation and the ground truth divided by the area of union\nbetween the predicted segmentation and the ground truth. For binary (two classes) or multi-class segmentation,\nthe mean IoU of the image is calculated by taking the IoU of each class and averaging them.\n"
A: List[str] = "\nArgs:\n predictions (`List[ndarray]`):\n List of predicted segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n references (`List[ndarray]`):\n List of ground truth segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n num_labels (`int`):\n Number of classes (categories).\n ignore_index (`int`):\n Index that will be ignored during evaluation.\n nan_to_num (`int`, *optional*):\n If specified, NaN values will be replaced by the number defined by the user.\n label_map (`dict`, *optional*):\n If specified, dictionary mapping old label indices to new label indices.\n reduce_labels (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background,\n and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by 255.\n\nReturns:\n `Dict[str, float | ndarray]` comprising various elements:\n - *mean_iou* (`float`):\n Mean Intersection-over-Union (IoU averaged over all categories).\n - *mean_accuracy* (`float`):\n Mean accuracy (averaged over all categories).\n - *overall_accuracy* (`float`):\n Overall accuracy on all images.\n - *per_category_accuracy* (`ndarray` of shape `(num_labels,)`):\n Per category accuracy.\n - *per_category_iou* (`ndarray` of shape `(num_labels,)`):\n Per category IoU.\n\nExamples:\n\n >>> import numpy as np\n\n >>> mean_iou = datasets.load_metric(\"mean_iou\")\n\n >>> # suppose one has 3 different segmentation maps predicted\n >>> predicted_1 = np.array([[1, 2], [3, 4], [5, 255]])\n >>> actual_1 = np.array([[0, 3], [5, 4], [6, 255]])\n\n >>> predicted_2 = np.array([[2, 7], [9, 2], [3, 6]])\n >>> actual_2 = np.array([[1, 7], [9, 2], [3, 6]])\n\n >>> predicted_3 = np.array([[2, 2, 3], [8, 2, 4], [3, 255, 2]])\n >>> actual_3 = np.array([[1, 2, 2], [8, 2, 1], [3, 255, 1]])\n\n >>> predicted = [predicted_1, predicted_2, predicted_3]\n >>> ground_truth = [actual_1, actual_2, actual_3]\n\n >>> results = mean_iou.compute(predictions=predicted, references=ground_truth, num_labels=10, ignore_index=255, reduce_labels=False)\n >>> print(results) # doctest: +NORMALIZE_WHITESPACE\n {'mean_iou': 0.47750000000000004, 'mean_accuracy': 0.5916666666666666, 'overall_accuracy': 0.5263157894736842, 'per_category_iou': array([0. , 0. , 0.375, 0.4 , 0.5 , 0. , 0.5 , 1. , 1. , 1. ]), 'per_category_accuracy': array([0. , 0. , 0.75 , 0.66666667, 1. , 0. , 0.5 , 1. , 1. , 1. ])}\n"
A: Union[str, Any] = "\\n@software{MMSegmentation_Contributors_OpenMMLab_Semantic_Segmentation_2020,\nauthor = {{MMSegmentation Contributors}},\nlicense = {Apache-2.0},\nmonth = {7},\ntitle = {{OpenMMLab Semantic Segmentation Toolbox and Benchmark}},\nurl = {https://github.com/open-mmlab/mmsegmentation},\nyear = {2020}\n}"
def _UpperCAmelCase ( a : str , a : Union[str, Any] , a : Dict , a : bool , a : Optional[Dict[int, int]] = None , a : bool = False , ) -> str:
"""simple docstring"""
if label_map is not None:
for old_id, new_id in label_map.items():
lowercase_ : Union[str, Any] = new_id
# turn into Numpy arrays
lowercase_ : List[Any] = np.array(a )
lowercase_ : Optional[Any] = np.array(a )
if reduce_labels:
lowercase_ : Any = 2_5_5
lowercase_ : Dict = label - 1
lowercase_ : List[Any] = 2_5_5
lowercase_ : Any = label != ignore_index
lowercase_ : List[Any] = np.not_equal(a , a )
lowercase_ : Optional[int] = pred_label[mask]
lowercase_ : Union[str, Any] = np.array(a )[mask]
lowercase_ : Optional[int] = pred_label[pred_label == label]
lowercase_ : Optional[int] = np.histogram(a , bins=a , range=(0, num_labels - 1) )[0]
lowercase_ : Optional[int] = np.histogram(a , bins=a , range=(0, num_labels - 1) )[0]
lowercase_ : Dict = np.histogram(a , bins=a , range=(0, num_labels - 1) )[0]
lowercase_ : Optional[Any] = area_pred_label + area_label - area_intersect
return area_intersect, area_union, area_pred_label, area_label
def _UpperCAmelCase ( a : int , a : Optional[Any] , a : Optional[int] , a : bool , a : Optional[Dict[int, int]] = None , a : bool = False , ) -> Dict:
"""simple docstring"""
lowercase_ : Dict = np.zeros((num_labels,) , dtype=np.floataa )
lowercase_ : List[str] = np.zeros((num_labels,) , dtype=np.floataa )
lowercase_ : List[Any] = np.zeros((num_labels,) , dtype=np.floataa )
lowercase_ : str = np.zeros((num_labels,) , dtype=np.floataa )
for result, gt_seg_map in zip(a , a ):
lowercase_ , lowercase_ , lowercase_ , lowercase_ : Tuple = intersect_and_union(
a , a , a , a , a , a )
total_area_intersect += area_intersect
total_area_union += area_union
total_area_pred_label += area_pred_label
total_area_label += area_label
return total_area_intersect, total_area_union, total_area_pred_label, total_area_label
def _UpperCAmelCase ( a : Optional[Any] , a : List[str] , a : Optional[Any] , a : bool , a : Optional[int] = None , a : Optional[Dict[int, int]] = None , a : bool = False , ) -> Optional[int]:
"""simple docstring"""
lowercase_ , lowercase_ , lowercase_ , lowercase_ : Optional[Any] = total_intersect_and_union(
a , a , a , a , a , a )
# compute metrics
lowercase_ : str = {}
lowercase_ : str = total_area_intersect.sum() / total_area_label.sum()
lowercase_ : Optional[Any] = total_area_intersect / total_area_union
lowercase_ : List[Any] = total_area_intersect / total_area_label
lowercase_ : Any = np.nanmean(a )
lowercase_ : Optional[Any] = np.nanmean(a )
lowercase_ : int = all_acc
lowercase_ : Union[str, Any] = iou
lowercase_ : Optional[Any] = acc
if nan_to_num is not None:
lowercase_ : Optional[int] = {metric: np.nan_to_num(a , nan=a ) for metric, metric_value in metrics.items()}
return metrics
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class __magic_name__ ( datasets.Metric ):
"""simple docstring"""
def lowerCamelCase__ ( self ) -> int:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
# 1st Seq - height dim, 2nd - width dim
{
'predictions': datasets.Sequence(datasets.Sequence(datasets.Value('uint16' ) ) ),
'references': datasets.Sequence(datasets.Sequence(datasets.Value('uint16' ) ) ),
} ) , reference_urls=[
'https://github.com/open-mmlab/mmsegmentation/blob/71c201b1813267d78764f306a297ca717827c4bf/mmseg/core/evaluation/metrics.py'
] , )
def lowerCamelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase = None , _lowercase = None , _lowercase = False , ) -> Tuple:
lowercase_ : Optional[int] = mean_iou(
results=_lowercase , gt_seg_maps=_lowercase , num_labels=_lowercase , ignore_index=_lowercase , nan_to_num=_lowercase , label_map=_lowercase , reduce_labels=_lowercase , )
return iou_result
| 7 | 0 |
'''simple docstring'''
import unittest
from datasets import load_dataset
from transformers import BloomTokenizerFast
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __magic_name__ ( UpperCAmelCase_, unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = None
SCREAMING_SNAKE_CASE_ : Tuple = BloomTokenizerFast
SCREAMING_SNAKE_CASE_ : List[str] = BloomTokenizerFast
SCREAMING_SNAKE_CASE_ : Union[str, Any] = True
SCREAMING_SNAKE_CASE_ : Optional[int] = False
SCREAMING_SNAKE_CASE_ : Dict = 'tokenizer_file'
SCREAMING_SNAKE_CASE_ : List[str] = {'bos_token': '<s>', 'eos_token': '</s>', 'unk_token': '<unk>', 'pad_token': '<pad>'}
def lowerCamelCase__ ( self ) -> List[Any]:
super().setUp()
lowercase_ : int = BloomTokenizerFast.from_pretrained('bigscience/tokenizer' )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCamelCase__ ( self , **_lowercase ) -> Any:
kwargs.update(self.special_tokens_map )
return BloomTokenizerFast.from_pretrained(self.tmpdirname , **_lowercase )
def lowerCamelCase__ ( self ) -> Any:
lowercase_ : Optional[int] = self.get_rust_tokenizer()
lowercase_ : Any = ['The quick brown fox</s>', 'jumps over the lazy dog</s>']
lowercase_ : Dict = [[2175, 2_3714, 7_3173, 14_4252, 2], [77, 13_2619, 3478, 368, 10_9586, 3_5433, 2]]
lowercase_ : List[Any] = tokenizer.batch_encode_plus(_lowercase )['input_ids']
self.assertListEqual(_lowercase , _lowercase )
lowercase_ : Optional[Any] = tokenizer.batch_decode(_lowercase )
self.assertListEqual(_lowercase , _lowercase )
def lowerCamelCase__ ( self , _lowercase=6 ) -> Any:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
lowercase_ : int = self.rust_tokenizer_class.from_pretrained(_lowercase , **_lowercase )
# tokenizer_r.pad_token = None # Hotfixing padding = None
# Simple input
lowercase_ : Dict = 'This is a simple input'
lowercase_ : List[str] = ['This is a simple input 1', 'This is a simple input 2']
lowercase_ : List[Any] = ('This is a simple input', 'This is a pair')
lowercase_ : List[str] = [
('This is a simple input 1', 'This is a simple input 2'),
('This is a simple pair 1', 'This is a simple pair 2'),
]
# Simple input tests
try:
tokenizer_r.encode(_lowercase , max_length=_lowercase )
tokenizer_r.encode_plus(_lowercase , max_length=_lowercase )
tokenizer_r.batch_encode_plus(_lowercase , max_length=_lowercase )
tokenizer_r.encode(_lowercase , max_length=_lowercase )
tokenizer_r.batch_encode_plus(_lowercase , max_length=_lowercase )
except ValueError:
self.fail('Bloom Tokenizer should be able to deal with padding' )
lowercase_ : List[Any] = None # Hotfixing padding = None
self.assertRaises(_lowercase , tokenizer_r.encode , _lowercase , max_length=_lowercase , padding='max_length' )
# Simple input
self.assertRaises(_lowercase , tokenizer_r.encode_plus , _lowercase , max_length=_lowercase , padding='max_length' )
# Simple input
self.assertRaises(
_lowercase , tokenizer_r.batch_encode_plus , _lowercase , max_length=_lowercase , padding='max_length' , )
# Pair input
self.assertRaises(_lowercase , tokenizer_r.encode , _lowercase , max_length=_lowercase , padding='max_length' )
# Pair input
self.assertRaises(_lowercase , tokenizer_r.encode_plus , _lowercase , max_length=_lowercase , padding='max_length' )
# Pair input
self.assertRaises(
_lowercase , tokenizer_r.batch_encode_plus , _lowercase , max_length=_lowercase , padding='max_length' , )
def lowerCamelCase__ ( self ) -> int:
lowercase_ : List[str] = self.get_rust_tokenizer()
lowercase_ : Dict = load_dataset('xnli' , 'all_languages' , split='test' , streaming=_lowercase )
lowercase_ : Optional[Any] = next(iter(_lowercase ) )['premise'] # pick up one data
lowercase_ : Any = list(sample_data.values() )
lowercase_ : int = list(map(tokenizer.encode , _lowercase ) )
lowercase_ : Optional[Any] = [tokenizer.decode(_lowercase , clean_up_tokenization_spaces=_lowercase ) for x in output_tokens]
self.assertListEqual(_lowercase , _lowercase )
def lowerCamelCase__ ( self ) -> int:
# The test has to be overriden because BLOOM uses ALiBi positional embeddings that does not have
# any sequence length constraints. This test of the parent class will fail since it relies on the
# maximum sequence length of the positoonal embeddings.
self.assertGreaterEqual(len(self.tokenizer_class.pretrained_vocab_files_map ) , 1 )
self.assertGreaterEqual(len(list(self.tokenizer_class.pretrained_vocab_files_map.values() )[0] ) , 1 )
| 718 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A: Dict = logging.get_logger(__name__)
A: Optional[Any] = {
"google/vit-base-patch16-224": "https://huggingface.co/vit-base-patch16-224/resolve/main/config.json",
# See all ViT models at https://huggingface.co/models?filter=vit
}
class __magic_name__ ( UpperCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = 'vit'
def __init__( self , _lowercase=768 , _lowercase=12 , _lowercase=12 , _lowercase=3072 , _lowercase="gelu" , _lowercase=0.0 , _lowercase=0.0 , _lowercase=0.02 , _lowercase=1E-1_2 , _lowercase=224 , _lowercase=16 , _lowercase=3 , _lowercase=True , _lowercase=16 , **_lowercase , ) -> List[str]:
super().__init__(**_lowercase )
lowercase_ : Optional[int] = hidden_size
lowercase_ : str = num_hidden_layers
lowercase_ : str = num_attention_heads
lowercase_ : int = intermediate_size
lowercase_ : List[Any] = hidden_act
lowercase_ : Any = hidden_dropout_prob
lowercase_ : List[str] = attention_probs_dropout_prob
lowercase_ : str = initializer_range
lowercase_ : List[str] = layer_norm_eps
lowercase_ : Any = image_size
lowercase_ : Tuple = patch_size
lowercase_ : Optional[Any] = num_channels
lowercase_ : str = qkv_bias
lowercase_ : List[str] = encoder_stride
class __magic_name__ ( UpperCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = version.parse('1.11' )
@property
def lowerCamelCase__ ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def lowerCamelCase__ ( self ) -> float:
return 1E-4
| 7 | 0 |
A: Union[str, Any] = {
0: "0",
1: "1",
2: "2",
3: "3",
4: "4",
5: "5",
6: "6",
7: "7",
8: "8",
9: "9",
1_0: "a",
1_1: "b",
1_2: "c",
1_3: "d",
1_4: "e",
1_5: "f",
}
def _UpperCAmelCase ( a : float ) -> str:
"""simple docstring"""
assert type(a ) in (int, float) and decimal == int(a )
lowercase_ : Tuple = int(a )
lowercase_ : int = ''
lowercase_ : Tuple = False
if decimal < 0:
lowercase_ : Optional[Any] = True
decimal *= -1
while decimal > 0:
lowercase_ : Dict = divmod(a , 1_6 )
lowercase_ : Tuple = values[remainder] + hexadecimal
lowercase_ : Optional[Any] = '0x' + hexadecimal
if negative:
lowercase_ : str = '-' + hexadecimal
return hexadecimal
if __name__ == "__main__":
import doctest
doctest.testmod()
| 719 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A: int = logging.get_logger(__name__)
A: int = {
"bigcode/gpt_bigcode-santacoder": "https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json",
}
class __magic_name__ ( UpperCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = 'gpt_bigcode'
SCREAMING_SNAKE_CASE_ : int = ['past_key_values']
SCREAMING_SNAKE_CASE_ : Any = {
'hidden_size': 'n_embd',
'max_position_embeddings': 'n_positions',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self , _lowercase=5_0257 , _lowercase=1024 , _lowercase=768 , _lowercase=12 , _lowercase=12 , _lowercase=None , _lowercase="gelu_pytorch_tanh" , _lowercase=0.1 , _lowercase=0.1 , _lowercase=0.1 , _lowercase=1E-5 , _lowercase=0.02 , _lowercase=True , _lowercase=True , _lowercase=5_0256 , _lowercase=5_0256 , _lowercase=True , _lowercase=True , _lowercase=True , **_lowercase , ) -> Any:
lowercase_ : Tuple = vocab_size
lowercase_ : str = n_positions
lowercase_ : List[str] = n_embd
lowercase_ : str = n_layer
lowercase_ : Optional[Any] = n_head
lowercase_ : Optional[int] = n_inner
lowercase_ : Union[str, Any] = activation_function
lowercase_ : Dict = resid_pdrop
lowercase_ : str = embd_pdrop
lowercase_ : Optional[Any] = attn_pdrop
lowercase_ : List[Any] = layer_norm_epsilon
lowercase_ : Optional[int] = initializer_range
lowercase_ : List[Any] = scale_attn_weights
lowercase_ : Any = use_cache
lowercase_ : List[str] = attention_softmax_in_fpaa
lowercase_ : Any = scale_attention_softmax_in_fpaa
lowercase_ : Optional[Any] = multi_query
lowercase_ : Optional[Any] = bos_token_id
lowercase_ : Optional[Any] = eos_token_id
super().__init__(bos_token_id=_lowercase , eos_token_id=_lowercase , **_lowercase )
| 7 | 0 |
'''simple docstring'''
import random
def _UpperCAmelCase ( a : int ) -> bool:
"""simple docstring"""
lowercase_ : List[str] = num - 1
lowercase_ : Dict = 0
while s % 2 == 0:
lowercase_ : Union[str, Any] = s // 2
t += 1
for _ in range(5 ):
lowercase_ : Any = random.randrange(2 , num - 1 )
lowercase_ : Any = pow(a , a , a )
if v != 1:
lowercase_ : Any = 0
while v != (num - 1):
if i == t - 1:
return False
else:
lowercase_ : Union[str, Any] = i + 1
lowercase_ : Tuple = (v**2) % num
return True
def _UpperCAmelCase ( a : int ) -> bool:
"""simple docstring"""
if num < 2:
return False
lowercase_ : List[str] = [
2,
3,
5,
7,
1_1,
1_3,
1_7,
1_9,
2_3,
2_9,
3_1,
3_7,
4_1,
4_3,
4_7,
5_3,
5_9,
6_1,
6_7,
7_1,
7_3,
7_9,
8_3,
8_9,
9_7,
1_0_1,
1_0_3,
1_0_7,
1_0_9,
1_1_3,
1_2_7,
1_3_1,
1_3_7,
1_3_9,
1_4_9,
1_5_1,
1_5_7,
1_6_3,
1_6_7,
1_7_3,
1_7_9,
1_8_1,
1_9_1,
1_9_3,
1_9_7,
1_9_9,
2_1_1,
2_2_3,
2_2_7,
2_2_9,
2_3_3,
2_3_9,
2_4_1,
2_5_1,
2_5_7,
2_6_3,
2_6_9,
2_7_1,
2_7_7,
2_8_1,
2_8_3,
2_9_3,
3_0_7,
3_1_1,
3_1_3,
3_1_7,
3_3_1,
3_3_7,
3_4_7,
3_4_9,
3_5_3,
3_5_9,
3_6_7,
3_7_3,
3_7_9,
3_8_3,
3_8_9,
3_9_7,
4_0_1,
4_0_9,
4_1_9,
4_2_1,
4_3_1,
4_3_3,
4_3_9,
4_4_3,
4_4_9,
4_5_7,
4_6_1,
4_6_3,
4_6_7,
4_7_9,
4_8_7,
4_9_1,
4_9_9,
5_0_3,
5_0_9,
5_2_1,
5_2_3,
5_4_1,
5_4_7,
5_5_7,
5_6_3,
5_6_9,
5_7_1,
5_7_7,
5_8_7,
5_9_3,
5_9_9,
6_0_1,
6_0_7,
6_1_3,
6_1_7,
6_1_9,
6_3_1,
6_4_1,
6_4_3,
6_4_7,
6_5_3,
6_5_9,
6_6_1,
6_7_3,
6_7_7,
6_8_3,
6_9_1,
7_0_1,
7_0_9,
7_1_9,
7_2_7,
7_3_3,
7_3_9,
7_4_3,
7_5_1,
7_5_7,
7_6_1,
7_6_9,
7_7_3,
7_8_7,
7_9_7,
8_0_9,
8_1_1,
8_2_1,
8_2_3,
8_2_7,
8_2_9,
8_3_9,
8_5_3,
8_5_7,
8_5_9,
8_6_3,
8_7_7,
8_8_1,
8_8_3,
8_8_7,
9_0_7,
9_1_1,
9_1_9,
9_2_9,
9_3_7,
9_4_1,
9_4_7,
9_5_3,
9_6_7,
9_7_1,
9_7_7,
9_8_3,
9_9_1,
9_9_7,
]
if num in low_primes:
return True
for prime in low_primes:
if (num % prime) == 0:
return False
return rabin_miller(a )
def _UpperCAmelCase ( a : int = 1_0_2_4 ) -> int:
"""simple docstring"""
while True:
lowercase_ : int = random.randrange(2 ** (keysize - 1) , 2 ** (keysize) )
if is_prime_low_num(a ):
return num
if __name__ == "__main__":
A: str = generate_large_prime()
print(("Prime number:", num))
print(("is_prime_low_num:", is_prime_low_num(num)))
| 720 |
'''simple docstring'''
import unittest
from diffusers.pipelines.pipeline_utils import is_safetensors_compatible
class __magic_name__ ( unittest.TestCase ):
"""simple docstring"""
def lowerCamelCase__ ( self ) -> Optional[Any]:
lowercase_ : Tuple = [
'safety_checker/pytorch_model.bin',
'safety_checker/model.safetensors',
'vae/diffusion_pytorch_model.bin',
'vae/diffusion_pytorch_model.safetensors',
'text_encoder/pytorch_model.bin',
'text_encoder/model.safetensors',
'unet/diffusion_pytorch_model.bin',
'unet/diffusion_pytorch_model.safetensors',
]
self.assertTrue(is_safetensors_compatible(_lowercase ) )
def lowerCamelCase__ ( self ) -> Dict:
lowercase_ : Tuple = [
'unet/diffusion_pytorch_model.bin',
'unet/diffusion_pytorch_model.safetensors',
]
self.assertTrue(is_safetensors_compatible(_lowercase ) )
def lowerCamelCase__ ( self ) -> str:
lowercase_ : int = [
'safety_checker/pytorch_model.bin',
'safety_checker/model.safetensors',
'vae/diffusion_pytorch_model.bin',
'vae/diffusion_pytorch_model.safetensors',
'text_encoder/pytorch_model.bin',
'text_encoder/model.safetensors',
'unet/diffusion_pytorch_model.bin',
# Removed: 'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(_lowercase ) )
def lowerCamelCase__ ( self ) -> Any:
lowercase_ : Dict = [
'text_encoder/pytorch_model.bin',
'text_encoder/model.safetensors',
]
self.assertTrue(is_safetensors_compatible(_lowercase ) )
def lowerCamelCase__ ( self ) -> Optional[Any]:
lowercase_ : List[Any] = [
'safety_checker/pytorch_model.bin',
'safety_checker/model.safetensors',
'vae/diffusion_pytorch_model.bin',
'vae/diffusion_pytorch_model.safetensors',
'text_encoder/pytorch_model.bin',
# Removed: 'text_encoder/model.safetensors',
'unet/diffusion_pytorch_model.bin',
'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(_lowercase ) )
def lowerCamelCase__ ( self ) -> Optional[int]:
lowercase_ : str = [
'safety_checker/pytorch_model.fp16.bin',
'safety_checker/model.fp16.safetensors',
'vae/diffusion_pytorch_model.fp16.bin',
'vae/diffusion_pytorch_model.fp16.safetensors',
'text_encoder/pytorch_model.fp16.bin',
'text_encoder/model.fp16.safetensors',
'unet/diffusion_pytorch_model.fp16.bin',
'unet/diffusion_pytorch_model.fp16.safetensors',
]
lowercase_ : Union[str, Any] = 'fp16'
self.assertTrue(is_safetensors_compatible(_lowercase , variant=_lowercase ) )
def lowerCamelCase__ ( self ) -> int:
lowercase_ : Optional[int] = [
'unet/diffusion_pytorch_model.fp16.bin',
'unet/diffusion_pytorch_model.fp16.safetensors',
]
lowercase_ : Dict = 'fp16'
self.assertTrue(is_safetensors_compatible(_lowercase , variant=_lowercase ) )
def lowerCamelCase__ ( self ) -> int:
# pass variant but use the non-variant filenames
lowercase_ : Optional[int] = [
'unet/diffusion_pytorch_model.bin',
'unet/diffusion_pytorch_model.safetensors',
]
lowercase_ : Dict = 'fp16'
self.assertTrue(is_safetensors_compatible(_lowercase , variant=_lowercase ) )
def lowerCamelCase__ ( self ) -> Union[str, Any]:
lowercase_ : int = [
'safety_checker/pytorch_model.fp16.bin',
'safety_checker/model.fp16.safetensors',
'vae/diffusion_pytorch_model.fp16.bin',
'vae/diffusion_pytorch_model.fp16.safetensors',
'text_encoder/pytorch_model.fp16.bin',
'text_encoder/model.fp16.safetensors',
'unet/diffusion_pytorch_model.fp16.bin',
# Removed: 'unet/diffusion_pytorch_model.fp16.safetensors',
]
lowercase_ : Dict = 'fp16'
self.assertFalse(is_safetensors_compatible(_lowercase , variant=_lowercase ) )
def lowerCamelCase__ ( self ) -> int:
lowercase_ : str = [
'text_encoder/pytorch_model.fp16.bin',
'text_encoder/model.fp16.safetensors',
]
lowercase_ : str = 'fp16'
self.assertTrue(is_safetensors_compatible(_lowercase , variant=_lowercase ) )
def lowerCamelCase__ ( self ) -> List[str]:
# pass variant but use the non-variant filenames
lowercase_ : List[Any] = [
'text_encoder/pytorch_model.bin',
'text_encoder/model.safetensors',
]
lowercase_ : Dict = 'fp16'
self.assertTrue(is_safetensors_compatible(_lowercase , variant=_lowercase ) )
def lowerCamelCase__ ( self ) -> List[str]:
lowercase_ : Union[str, Any] = [
'safety_checker/pytorch_model.fp16.bin',
'safety_checker/model.fp16.safetensors',
'vae/diffusion_pytorch_model.fp16.bin',
'vae/diffusion_pytorch_model.fp16.safetensors',
'text_encoder/pytorch_model.fp16.bin',
# 'text_encoder/model.fp16.safetensors',
'unet/diffusion_pytorch_model.fp16.bin',
'unet/diffusion_pytorch_model.fp16.safetensors',
]
lowercase_ : Dict = 'fp16'
self.assertFalse(is_safetensors_compatible(_lowercase , variant=_lowercase ) )
| 7 | 0 |
'''simple docstring'''
import math
import os
import unittest
from transformers import MegatronBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
)
class __magic_name__ :
"""simple docstring"""
def __init__( self , _lowercase , _lowercase=13 , _lowercase=7 , _lowercase=True , _lowercase=True , _lowercase=True , _lowercase=True , _lowercase=99 , _lowercase=64 , _lowercase=32 , _lowercase=5 , _lowercase=4 , _lowercase=37 , _lowercase="gelu" , _lowercase=0.1 , _lowercase=0.1 , _lowercase=512 , _lowercase=16 , _lowercase=2 , _lowercase=0.02 , _lowercase=3 , _lowercase=4 , _lowercase=None , ) -> str:
lowercase_ : str = parent
lowercase_ : Optional[int] = batch_size
lowercase_ : Tuple = seq_length
lowercase_ : str = is_training
lowercase_ : Tuple = use_input_mask
lowercase_ : Tuple = use_token_type_ids
lowercase_ : int = use_labels
lowercase_ : Tuple = vocab_size
lowercase_ : List[str] = hidden_size
lowercase_ : Any = embedding_size
lowercase_ : Union[str, Any] = num_hidden_layers
lowercase_ : List[Any] = num_attention_heads
lowercase_ : Optional[Any] = intermediate_size
lowercase_ : Union[str, Any] = hidden_act
lowercase_ : Optional[int] = hidden_dropout_prob
lowercase_ : Optional[int] = attention_probs_dropout_prob
lowercase_ : Optional[Any] = max_position_embeddings
lowercase_ : str = type_vocab_size
lowercase_ : int = type_sequence_label_size
lowercase_ : List[str] = initializer_range
lowercase_ : Union[str, Any] = num_labels
lowercase_ : List[str] = num_choices
lowercase_ : Dict = scope
def lowerCamelCase__ ( self ) -> Union[str, Any]:
lowercase_ : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase_ : Union[str, Any] = None
if self.use_input_mask:
lowercase_ : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
lowercase_ : List[str] = None
if self.use_token_type_ids:
lowercase_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase_ : Tuple = None
lowercase_ : Any = None
lowercase_ : List[Any] = None
if self.use_labels:
lowercase_ : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase_ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase_ : Optional[int] = ids_tensor([self.batch_size] , self.num_choices )
lowercase_ : Union[str, Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCamelCase__ ( self ) -> List[str]:
return MegatronBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowercase , initializer_range=self.initializer_range , )
def lowerCamelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) -> Optional[Any]:
lowercase_ : Union[str, Any] = MegatronBertModel(config=_lowercase )
model.to(_lowercase )
model.eval()
lowercase_ : Tuple = model(_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase )
lowercase_ : List[str] = model(_lowercase , token_type_ids=_lowercase )
lowercase_ : List[Any] = model(_lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowerCamelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) -> Optional[int]:
lowercase_ : Optional[Any] = MegatronBertForMaskedLM(config=_lowercase )
model.to(_lowercase )
model.eval()
lowercase_ : Union[str, Any] = model(_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , labels=_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) -> int:
lowercase_ : Dict = MegatronBertForCausalLM(config=_lowercase )
model.to(_lowercase )
model.eval()
lowercase_ : Union[str, Any] = model(_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , labels=_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) -> Union[str, Any]:
lowercase_ : Optional[Any] = MegatronBertForNextSentencePrediction(config=_lowercase )
model.to(_lowercase )
model.eval()
lowercase_ : Dict = model(
_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , labels=_lowercase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def lowerCamelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) -> Any:
lowercase_ : Tuple = MegatronBertForPreTraining(config=_lowercase )
model.to(_lowercase )
model.eval()
lowercase_ : Union[str, Any] = model(
_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , labels=_lowercase , next_sentence_label=_lowercase , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def lowerCamelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) -> Optional[int]:
lowercase_ : Optional[Any] = MegatronBertForQuestionAnswering(config=_lowercase )
model.to(_lowercase )
model.eval()
lowercase_ : List[str] = model(
_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , start_positions=_lowercase , end_positions=_lowercase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCamelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) -> List[str]:
lowercase_ : Dict = self.num_labels
lowercase_ : Optional[int] = MegatronBertForSequenceClassification(_lowercase )
model.to(_lowercase )
model.eval()
lowercase_ : str = model(_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , labels=_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) -> Union[str, Any]:
lowercase_ : str = self.num_labels
lowercase_ : List[str] = MegatronBertForTokenClassification(config=_lowercase )
model.to(_lowercase )
model.eval()
lowercase_ : Optional[int] = model(_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , labels=_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCamelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) -> Union[str, Any]:
lowercase_ : Dict = self.num_choices
lowercase_ : Tuple = MegatronBertForMultipleChoice(config=_lowercase )
model.to(_lowercase )
model.eval()
lowercase_ : str = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase_ : Optional[Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase_ : List[str] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase_ : Tuple = model(
_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , labels=_lowercase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCamelCase__ ( self ) -> List[Any]:
lowercase_ : Optional[int] = self.prepare_config_and_inputs()
(
lowercase_
) : Tuple = config_and_inputs
lowercase_ : List[Any] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class __magic_name__ ( UpperCAmelCase_, UpperCAmelCase_, unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = (
(
MegatronBertModel,
MegatronBertForMaskedLM,
MegatronBertForCausalLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE_ : List[str] = (
{
'feature-extraction': MegatronBertModel,
'fill-mask': MegatronBertForMaskedLM,
'question-answering': MegatronBertForQuestionAnswering,
'text-classification': MegatronBertForSequenceClassification,
'text-generation': MegatronBertForCausalLM,
'token-classification': MegatronBertForTokenClassification,
'zero-shot': MegatronBertForSequenceClassification,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE_ : Any = True
# test_resize_embeddings = False
SCREAMING_SNAKE_CASE_ : List[str] = False
def lowerCamelCase__ ( self , _lowercase , _lowercase , _lowercase=False ) -> Dict:
lowercase_ : Any = super()._prepare_for_class(_lowercase , _lowercase , return_labels=_lowercase )
if return_labels:
if model_class in get_values(_lowercase ):
lowercase_ : int = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=_lowercase )
lowercase_ : List[Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_lowercase )
return inputs_dict
def lowerCamelCase__ ( self ) -> Optional[int]:
lowercase_ : Optional[Any] = MegatronBertModelTester(self )
lowercase_ : Optional[int] = ConfigTester(self , config_class=_lowercase , hidden_size=37 )
def lowerCamelCase__ ( self ) -> List[Any]:
self.config_tester.run_common_tests()
def lowerCamelCase__ ( self ) -> Dict:
lowercase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_model(*_lowercase )
def lowerCamelCase__ ( self ) -> List[str]:
lowercase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_masked_lm(*_lowercase )
def lowerCamelCase__ ( self ) -> Tuple:
lowercase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_multiple_choice(*_lowercase )
def lowerCamelCase__ ( self ) -> Any:
lowercase_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_next_sequence_prediction(*_lowercase )
def lowerCamelCase__ ( self ) -> Tuple:
lowercase_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_pretraining(*_lowercase )
def lowerCamelCase__ ( self ) -> Optional[int]:
lowercase_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_question_answering(*_lowercase )
def lowerCamelCase__ ( self ) -> Dict:
lowercase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_sequence_classification(*_lowercase )
def lowerCamelCase__ ( self ) -> List[Any]:
lowercase_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_token_classification(*_lowercase )
def _UpperCAmelCase ( a : List[Any] ) -> List[Any]:
"""simple docstring"""
return torch.tensor(
a , dtype=torch.long , device=a , )
A: Any = 1E-4
@require_torch
@require_sentencepiece
@require_tokenizers
class __magic_name__ ( unittest.TestCase ):
"""simple docstring"""
@slow
@unittest.skip('Model is not available.' )
def lowerCamelCase__ ( self ) -> Tuple:
lowercase_ : Optional[int] = 'nvidia/megatron-bert-uncased-345m'
if "MYDIR" in os.environ:
lowercase_ : Optional[Any] = os.path.join(os.environ['MYDIR'] , _lowercase )
lowercase_ : List[str] = MegatronBertModel.from_pretrained(_lowercase )
model.to(_lowercase )
model.half()
lowercase_ : Tuple = _long_tensor([[101, 7110, 1005, 1056, 2023, 1_1333, 1_7413, 1029, 102]] )
with torch.no_grad():
lowercase_ : List[Any] = model(_lowercase )[0]
lowercase_ : int = torch.Size((1, 9, 1024) )
self.assertEqual(output.shape , _lowercase )
lowercase_ : Union[str, Any] = [-0.60_40, -0.25_17, -0.10_25, 0.34_20, -0.67_58, -0.00_17, -0.10_89, -0.19_90, 0.57_28]
for ii in range(3 ):
for jj in range(3 ):
lowercase_ : int = output[0, ii, jj]
lowercase_ : Any = expected[3 * ii + jj]
lowercase_ : int = 'ii={} jj={} a={} b={}'.format(_lowercase , _lowercase , _lowercase , _lowercase )
self.assertTrue(math.isclose(_lowercase , _lowercase , rel_tol=_lowercase , abs_tol=_lowercase ) , msg=_lowercase )
| 721 |
'''simple docstring'''
import argparse
A: List[Any] = "docs/source/_static/js/custom.js"
def _UpperCAmelCase ( a : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
with open(a , encoding='utf-8' , newline='\n' ) as f:
lowercase_ : List[Any] = f.readlines()
lowercase_ : Dict = 0
# First let's put the right version
while not lines[index].startswith('const stableVersion =' ):
index += 1
lowercase_ : Dict = f"const stableVersion = \"v{version}\"\n"
# Then update the dictionary
while not lines[index].startswith('const versionMapping = {' ):
index += 1
# We go until the end
while not lines[index].startswith('}' ):
index += 1
# We add the new version at the end
lines[index - 1] += f" \"v{version}\": \"v{version}\",\n"
with open(a , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.writelines(a )
if __name__ == "__main__":
A: str = argparse.ArgumentParser()
parser.add_argument("--version", help="Release version.")
A: List[str] = parser.parse_args()
update_custom_js(args.version)
| 7 | 0 |
'''simple docstring'''
import json
import os
from typing import Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
A: List[Any] = logging.get_logger(__name__)
A: Union[str, Any] = {
"vocab_file": "vocab.json",
"merges_file": "merges.txt",
}
A: Dict = {
"vocab_file": {"ctrl": "https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-vocab.json"},
"merges_file": {"ctrl": "https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-merges.txt"},
}
A: Any = {
"ctrl": 2_5_6,
}
A: int = {
"Pregnancy": 1_6_8_6_2_9,
"Christianity": 7_6_7_5,
"Explain": 1_0_6_4_2_3,
"Fitness": 6_3_4_4_0,
"Saving": 6_3_1_6_3,
"Ask": 2_7_1_7_1,
"Ass": 9_5_9_8_5,
"Joke": 1_6_3_5_0_9,
"Questions": 4_5_6_2_2,
"Thoughts": 4_9_6_0_5,
"Retail": 5_2_3_4_2,
"Feminism": 1_6_4_3_3_8,
"Writing": 1_1_9_9_2,
"Atheism": 1_9_2_2_6_3,
"Netflix": 4_8_6_1_6,
"Computing": 3_9_6_3_9,
"Opinion": 4_3_2_1_3,
"Alone": 4_4_9_6_7,
"Funny": 5_8_9_1_7,
"Gaming": 4_0_3_5_8,
"Human": 4_0_8_8,
"India": 1_3_3_1,
"Joker": 7_7_1_3_8,
"Diet": 3_6_2_0_6,
"Legal": 1_1_8_5_9,
"Norman": 4_9_3_9,
"Tip": 7_2_6_8_9,
"Weight": 5_2_3_4_3,
"Movies": 4_6_2_7_3,
"Running": 2_3_4_2_5,
"Science": 2_0_9_0,
"Horror": 3_7_7_9_3,
"Confession": 6_0_5_7_2,
"Finance": 1_2_2_5_0,
"Politics": 1_6_3_6_0,
"Scary": 1_9_1_9_8_5,
"Support": 1_2_6_5_4,
"Technologies": 3_2_5_1_6,
"Teenage": 6_6_1_6_0,
"Event": 3_2_7_6_9,
"Learned": 6_7_4_6_0,
"Notion": 1_8_2_7_7_0,
"Wikipedia": 3_7_5_8_3,
"Books": 6_6_6_5,
"Extract": 7_6_0_5_0,
"Confessions": 1_0_2_7_0_1,
"Conspiracy": 7_5_9_3_2,
"Links": 6_3_6_7_4,
"Narcissus": 1_5_0_4_2_5,
"Relationship": 5_4_7_6_6,
"Relationships": 1_3_4_7_9_6,
"Reviews": 4_1_6_7_1,
"News": 4_2_5_6,
"Translation": 2_6_8_2_0,
"multilingual": 1_2_8_4_0_6,
}
def _UpperCAmelCase ( a : Tuple ) -> List[Any]:
"""simple docstring"""
lowercase_ : Optional[Any] = set()
lowercase_ : Dict = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowercase_ : Union[str, Any] = char
lowercase_ : List[str] = set(a )
return pairs
class __magic_name__ ( UpperCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ : Tuple = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ : Union[str, Any] = CONTROL_CODES
def __init__( self , _lowercase , _lowercase , _lowercase="<unk>" , **_lowercase ) -> Any:
super().__init__(unk_token=_lowercase , **_lowercase )
with open(_lowercase , encoding='utf-8' ) as vocab_handle:
lowercase_ : List[Any] = json.load(_lowercase )
lowercase_ : int = {v: k for k, v in self.encoder.items()}
with open(_lowercase , encoding='utf-8' ) as merges_handle:
lowercase_ : List[Any] = merges_handle.read().split('\n' )[1:-1]
lowercase_ : List[Any] = [tuple(merge.split() ) for merge in merges]
lowercase_ : List[str] = dict(zip(_lowercase , range(len(_lowercase ) ) ) )
lowercase_ : Union[str, Any] = {}
@property
def lowerCamelCase__ ( self ) -> str:
return len(self.encoder )
def lowerCamelCase__ ( self ) -> Any:
return dict(self.encoder , **self.added_tokens_encoder )
def lowerCamelCase__ ( self , _lowercase ) -> str:
if token in self.cache:
return self.cache[token]
lowercase_ : List[Any] = tuple(_lowercase )
lowercase_ : str = tuple(list(word[:-1] ) + [word[-1] + '</w>'] )
lowercase_ : Union[str, Any] = get_pairs(_lowercase )
if not pairs:
return token
while True:
lowercase_ : Tuple = min(_lowercase , key=lambda _lowercase : self.bpe_ranks.get(_lowercase , float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
lowercase_ : Optional[Any] = bigram
lowercase_ : Dict = []
lowercase_ : Tuple = 0
while i < len(_lowercase ):
try:
lowercase_ : List[Any] = word.index(_lowercase , _lowercase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowercase_ : List[Any] = j
if word[i] == first and i < len(_lowercase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowercase_ : List[str] = tuple(_lowercase )
lowercase_ : Tuple = new_word
if len(_lowercase ) == 1:
break
else:
lowercase_ : Optional[Any] = get_pairs(_lowercase )
lowercase_ : Union[str, Any] = '@@ '.join(_lowercase )
lowercase_ : int = word[:-4]
lowercase_ : Any = word
return word
def lowerCamelCase__ ( self , _lowercase ) -> List[str]:
lowercase_ : Optional[Any] = []
lowercase_ : Optional[int] = re.findall(r'\S+\n?' , _lowercase )
for token in words:
split_tokens.extend(list(self.bpe(_lowercase ).split(' ' ) ) )
return split_tokens
def lowerCamelCase__ ( self , _lowercase ) -> Dict:
return self.encoder.get(_lowercase , self.encoder.get(self.unk_token ) )
def lowerCamelCase__ ( self , _lowercase ) -> Optional[int]:
return self.decoder.get(_lowercase , self.unk_token )
def lowerCamelCase__ ( self , _lowercase ) -> Union[str, Any]:
lowercase_ : Union[str, Any] = ' '.join(_lowercase ).replace('@@ ' , '' ).strip()
return out_string
def lowerCamelCase__ ( self , _lowercase , _lowercase = None ) -> Tuple[str]:
if not os.path.isdir(_lowercase ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
lowercase_ : Optional[Any] = os.path.join(
_lowercase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
lowercase_ : Optional[Any] = os.path.join(
_lowercase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(_lowercase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=_lowercase , ensure_ascii=_lowercase ) + '\n' )
lowercase_ : Optional[Any] = 0
with open(_lowercase , 'w' , encoding='utf-8' ) as writer:
writer.write('#version: 0.2\n' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda _lowercase : kv[1] ):
if index != token_index:
logger.warning(
f"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
' Please check that the tokenizer is not corrupted!' )
lowercase_ : Optional[Any] = token_index
writer.write(' '.join(_lowercase ) + '\n' )
index += 1
return vocab_file, merge_file
# def decode(self, token_ids, skip_special_tokens=False, clean_up_tokenization_spaces=True):
# filtered_tokens = ' '.join(self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens))
# tokens_generated_so_far = re.sub('(@@ )', '', string=filtered_tokens)
# tokens_generated_so_far = re.sub('(@@ ?$)', '', string=tokens_generated_so_far)
# return ''.join(tokens_generated_so_far)
| 700 |
'''simple docstring'''
def _UpperCAmelCase ( a : list[list[float]] ) -> list[list[float]]:
"""simple docstring"""
lowercase_ : list[list[float]] = []
for data in source_data:
for i, el in enumerate(a ):
if len(a ) < i + 1:
data_lists.append([] )
data_lists[i].append(float(a ) )
return data_lists
def _UpperCAmelCase ( a : list[list[float]] , a : list[int] ) -> list[list[float]]:
"""simple docstring"""
lowercase_ : list[list[float]] = []
for dlist, weight in zip(a , a ):
lowercase_ : Tuple = min(a )
lowercase_ : Any = max(a )
lowercase_ : list[float] = []
# for weight 0 score is 1 - actual score
if weight == 0:
for item in dlist:
try:
score.append(1 - ((item - mind) / (maxd - mind)) )
except ZeroDivisionError:
score.append(1 )
elif weight == 1:
for item in dlist:
try:
score.append((item - mind) / (maxd - mind) )
except ZeroDivisionError:
score.append(0 )
# weight not 0 or 1
else:
lowercase_ : str = f"Invalid weight of {weight:f} provided"
raise ValueError(a )
score_lists.append(a )
return score_lists
def _UpperCAmelCase ( a : list[list[float]] ) -> list[float]:
"""simple docstring"""
lowercase_ : list[float] = [0 for i in range(len(score_lists[0] ) )]
for slist in score_lists:
for j, ele in enumerate(a ):
lowercase_ : List[Any] = final_scores[j] + ele
return final_scores
def _UpperCAmelCase ( a : list[list[float]] , a : list[int] ) -> list[list[float]]:
"""simple docstring"""
lowercase_ : int = get_data(a )
lowercase_ : Optional[int] = calculate_each_score(a , a )
lowercase_ : Dict = generate_final_scores(a )
# append scores to source data
for i, ele in enumerate(a ):
source_data[i].append(a )
return source_data
| 7 | 0 |
'''simple docstring'''
import inspect
import unittest
from math import floor
from transformers import CvtConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import CvtForImageClassification, CvtModel
from transformers.models.cvt.modeling_cvt import CVT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __magic_name__ ( UpperCAmelCase_ ):
"""simple docstring"""
def lowerCamelCase__ ( self ) -> List[Any]:
lowercase_ : Any = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_lowercase , 'embed_dim' ) )
self.parent.assertTrue(hasattr(_lowercase , 'num_heads' ) )
class __magic_name__ :
"""simple docstring"""
def __init__( self , _lowercase , _lowercase=13 , _lowercase=64 , _lowercase=3 , _lowercase=[16, 48, 96] , _lowercase=[1, 3, 6] , _lowercase=[1, 2, 10] , _lowercase=[7, 3, 3] , _lowercase=[4, 2, 2] , _lowercase=[2, 1, 1] , _lowercase=[2, 2, 2] , _lowercase=[False, False, True] , _lowercase=[0.0, 0.0, 0.0] , _lowercase=0.02 , _lowercase=1E-1_2 , _lowercase=True , _lowercase=True , _lowercase=2 , ) -> Dict:
lowercase_ : Any = parent
lowercase_ : Tuple = batch_size
lowercase_ : List[Any] = image_size
lowercase_ : Tuple = patch_sizes
lowercase_ : int = patch_stride
lowercase_ : Optional[Any] = patch_padding
lowercase_ : Optional[int] = is_training
lowercase_ : List[Any] = use_labels
lowercase_ : List[str] = num_labels
lowercase_ : Optional[int] = num_channels
lowercase_ : Any = embed_dim
lowercase_ : Optional[Any] = num_heads
lowercase_ : str = stride_kv
lowercase_ : List[Any] = depth
lowercase_ : int = cls_token
lowercase_ : Optional[Any] = attention_drop_rate
lowercase_ : Optional[int] = initializer_range
lowercase_ : int = layer_norm_eps
def lowerCamelCase__ ( self ) -> Optional[Any]:
lowercase_ : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase_ : str = None
if self.use_labels:
lowercase_ : str = ids_tensor([self.batch_size] , self.num_labels )
lowercase_ : Any = self.get_config()
return config, pixel_values, labels
def lowerCamelCase__ ( self ) -> Optional[int]:
return CvtConfig(
image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , )
def lowerCamelCase__ ( self , _lowercase , _lowercase , _lowercase ) -> Optional[int]:
lowercase_ : Optional[Any] = CvtModel(config=_lowercase )
model.to(_lowercase )
model.eval()
lowercase_ : Dict = model(_lowercase )
lowercase_ : Optional[Any] = (self.image_size, self.image_size)
lowercase_ : Dict = image_size[0], image_size[1]
for i in range(len(self.depth ) ):
lowercase_ : str = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
lowercase_ : List[str] = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width) )
def lowerCamelCase__ ( self , _lowercase , _lowercase , _lowercase ) -> List[Any]:
lowercase_ : List[Any] = self.num_labels
lowercase_ : Union[str, Any] = CvtForImageClassification(_lowercase )
model.to(_lowercase )
model.eval()
lowercase_ : List[str] = model(_lowercase , labels=_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase__ ( self ) -> Dict:
lowercase_ : Optional[int] = self.prepare_config_and_inputs()
lowercase_ : Dict = config_and_inputs
lowercase_ : Optional[Any] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class __magic_name__ ( UpperCAmelCase_, UpperCAmelCase_, unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = (CvtModel, CvtForImageClassification) if is_torch_available() else ()
SCREAMING_SNAKE_CASE_ : List[str] = (
{'feature-extraction': CvtModel, 'image-classification': CvtForImageClassification}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = False
SCREAMING_SNAKE_CASE_ : Any = False
SCREAMING_SNAKE_CASE_ : Any = False
SCREAMING_SNAKE_CASE_ : Union[str, Any] = False
SCREAMING_SNAKE_CASE_ : Dict = False
def lowerCamelCase__ ( self ) -> Dict:
lowercase_ : List[Any] = CvtModelTester(self )
lowercase_ : List[Any] = ConfigTester(self , config_class=_lowercase , has_text_modality=_lowercase , hidden_size=37 )
def lowerCamelCase__ ( self ) -> List[str]:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCamelCase__ ( self ) -> Union[str, Any]:
return
@unittest.skip(reason='Cvt does not output attentions' )
def lowerCamelCase__ ( self ) -> List[Any]:
pass
@unittest.skip(reason='Cvt does not use inputs_embeds' )
def lowerCamelCase__ ( self ) -> Union[str, Any]:
pass
@unittest.skip(reason='Cvt does not support input and output embeddings' )
def lowerCamelCase__ ( self ) -> Union[str, Any]:
pass
def lowerCamelCase__ ( self ) -> Union[str, Any]:
lowercase_ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ : str = model_class(_lowercase )
lowercase_ : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase_ : int = [*signature.parameters.keys()]
lowercase_ : List[str] = ['pixel_values']
self.assertListEqual(arg_names[:1] , _lowercase )
def lowerCamelCase__ ( self ) -> List[str]:
lowercase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowercase )
def lowerCamelCase__ ( self ) -> Union[str, Any]:
def check_hidden_states_output(_lowercase , _lowercase , _lowercase ):
lowercase_ : int = model_class(_lowercase )
model.to(_lowercase )
model.eval()
with torch.no_grad():
lowercase_ : List[str] = model(**self._prepare_for_class(_lowercase , _lowercase ) )
lowercase_ : Any = outputs.hidden_states
lowercase_ : str = len(self.model_tester.depth )
self.assertEqual(len(_lowercase ) , _lowercase )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.embed_dim[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
lowercase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ : Any = True
check_hidden_states_output(_lowercase , _lowercase , _lowercase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase_ : Union[str, Any] = True
check_hidden_states_output(_lowercase , _lowercase , _lowercase )
def lowerCamelCase__ ( self ) -> str:
lowercase_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowercase )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def lowerCamelCase__ ( self ) -> Optional[Any]:
pass
@slow
def lowerCamelCase__ ( self ) -> Union[str, Any]:
for model_name in CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ : Optional[int] = CvtModel.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
def _UpperCAmelCase ( ) -> Union[str, Any]:
"""simple docstring"""
lowercase_ : Any = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class __magic_name__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def lowerCamelCase__ ( self ) -> Union[str, Any]:
return AutoImageProcessor.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def lowerCamelCase__ ( self ) -> Any:
lowercase_ : Dict = CvtForImageClassification.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(_lowercase )
lowercase_ : Tuple = self.default_image_processor
lowercase_ : Any = prepare_img()
lowercase_ : Union[str, Any] = image_processor(images=_lowercase , return_tensors='pt' ).to(_lowercase )
# forward pass
with torch.no_grad():
lowercase_ : Optional[Any] = model(**_lowercase )
# verify the logits
lowercase_ : str = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _lowercase )
lowercase_ : int = torch.tensor([0.92_85, 0.90_15, -0.31_50] ).to(_lowercase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _lowercase , atol=1E-4 ) )
| 701 |
'''simple docstring'''
def _UpperCAmelCase ( a : int , a : int ) -> int:
"""simple docstring"""
while second != 0:
lowercase_ : Any = first & second
first ^= second
lowercase_ : List[str] = c << 1
return first
if __name__ == "__main__":
import doctest
doctest.testmod()
A: Union[str, Any] = int(input("Enter the first number: ").strip())
A: Union[str, Any] = int(input("Enter the second number: ").strip())
print(f"""{add(first, second) = }""")
| 7 | 0 |
'''simple docstring'''
import unittest
from transformers.models.xlm_prophetnet.tokenization_xlm_prophetnet import SPIECE_UNDERLINE, XLMProphetNetTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
A: Optional[int] = get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
class __magic_name__ ( UpperCAmelCase_, unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = XLMProphetNetTokenizer
SCREAMING_SNAKE_CASE_ : List[str] = False
SCREAMING_SNAKE_CASE_ : Optional[int] = True
def lowerCamelCase__ ( self ) -> Dict:
super().setUp()
# We have a SentencePiece fixture for testing
lowercase_ : List[Any] = XLMProphetNetTokenizer(_lowercase , keep_accents=_lowercase )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCamelCase__ ( self ) -> Dict:
lowercase_ : Dict = '[PAD]'
lowercase_ : Optional[int] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_lowercase ) , _lowercase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_lowercase ) , _lowercase )
def lowerCamelCase__ ( self ) -> List[Any]:
lowercase_ : Optional[Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '[PAD]' )
self.assertEqual(vocab_keys[1] , '[CLS]' )
self.assertEqual(vocab_keys[-1] , 'j' )
self.assertEqual(len(_lowercase ) , 1012 )
def lowerCamelCase__ ( self ) -> List[str]:
self.assertEqual(self.get_tokenizer().vocab_size , 1012 )
def lowerCamelCase__ ( self ) -> str:
lowercase_ : List[str] = XLMProphetNetTokenizer(_lowercase , keep_accents=_lowercase )
lowercase_ : str = tokenizer.tokenize('This is a test' )
self.assertListEqual(_lowercase , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_lowercase ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
lowercase_ : List[Any] = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
_lowercase , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
lowercase_ : int = tokenizer.convert_tokens_to_ids(_lowercase )
self.assertListEqual(
_lowercase , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, -9, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, -9, 4]
] , )
lowercase_ : List[str] = tokenizer.convert_ids_to_tokens(_lowercase )
self.assertListEqual(
_lowercase , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'[UNK]',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'[UNK]',
'.',
] , )
@cached_property
def lowerCamelCase__ ( self ) -> Any:
return XLMProphetNetTokenizer.from_pretrained('microsoft/xprophetnet-large-wiki100-cased' )
@slow
def lowerCamelCase__ ( self ) -> Tuple:
lowercase_ : List[Any] = 'Hello World!'
lowercase_ : Any = [3_5389, 6672, 49, 2]
self.assertListEqual(_lowercase , self.big_tokenizer.encode(_lowercase ) )
@slow
def lowerCamelCase__ ( self ) -> Any:
# fmt: off
lowercase_ : int = {'input_ids': [[1_1073, 8_2783, 18, 26, 8_2783, 549, 5_1540, 248, 1_7209, 1301, 217, 20, 21_5186, 1325, 147, 1_7209, 1301, 217, 20, 5_6370, 53, 12_2020, 20, 1_6477, 27, 8_7355, 4548, 20, 4728, 7_8392, 17, 15_9969, 18, 26, 2_4491, 629, 15, 538, 2_2704, 5439, 15, 2788, 2_4491, 9885, 15, 4_3534, 605, 15, 814, 1_8403, 3_3200, 29, 15, 4_3534, 2_4458, 1_2410, 111, 2_4966, 8_3669, 9637, 14_4068, 26, 850, 2_2346, 27, 147, 2_4966, 8_3669, 8_3490, 26, 3_9113, 735, 27, 689, 656, 2800, 1339, 4600, 53, 12_2020, 11_5785, 34, 816, 1339, 4_6887, 18, 147, 5_3905, 1951, 4_2238, 4_1170, 1_7732, 834, 436, 15, 2_7523, 9_8733, 217, 147, 5542, 4981, 930, 1_7347, 16, 2], [2_0091, 629, 94, 8_2786, 58, 490, 20, 1528, 84, 5_3905, 344, 8_0592, 11_0128, 1_8822, 5267, 1306, 62, 15_2537, 308, 7997, 401, 12_4427, 549, 3_5442, 225, 109, 1_5055, 2_5748, 147, 7119, 4_3712, 34, 767, 13_5366, 18, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [592, 6_3784, 11_9466, 17, 14_7808, 8_8214, 18, 656, 81, 32, 3296, 1_0280, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_lowercase , model_name='microsoft/xprophetnet-large-wiki100-cased' , revision='1acad1643ddd54a44df6a1b797ada8373685d90e' , )
| 702 |
'''simple docstring'''
class __magic_name__ :
"""simple docstring"""
def __init__( self , _lowercase ) -> Union[str, Any]:
lowercase_ : Dict = n
lowercase_ : Dict = [None] * self.n
lowercase_ : Tuple = 0 # index of the first element
lowercase_ : List[Any] = 0
lowercase_ : List[Any] = 0
def __len__( self ) -> int:
return self.size
def lowerCamelCase__ ( self ) -> bool:
return self.size == 0
def lowerCamelCase__ ( self ) -> List[Any]:
return False if self.is_empty() else self.array[self.front]
def lowerCamelCase__ ( self , _lowercase ) -> Any:
if self.size >= self.n:
raise Exception('QUEUE IS FULL' )
lowercase_ : Tuple = data
lowercase_ : List[Any] = (self.rear + 1) % self.n
self.size += 1
return self
def lowerCamelCase__ ( self ) -> Any:
if self.size == 0:
raise Exception('UNDERFLOW' )
lowercase_ : Dict = self.array[self.front]
lowercase_ : Tuple = None
lowercase_ : int = (self.front + 1) % self.n
self.size -= 1
return temp
| 7 | 0 |
'''simple docstring'''
import collections
import gzip
import os
import urllib
import numpy
from tensorflow.python.framework import dtypes, random_seed
from tensorflow.python.platform import gfile
from tensorflow.python.util.deprecation import deprecated
A: int = collections.namedtuple("_Datasets", ["train", "validation", "test"])
# CVDF mirror of http://yann.lecun.com/exdb/mnist/
A: int = "https://storage.googleapis.com/cvdf-datasets/mnist/"
def _UpperCAmelCase ( a : List[Any] ) -> List[Any]:
"""simple docstring"""
lowercase_ : str = numpy.dtype(numpy.uintaa ).newbyteorder('>' )
return numpy.frombuffer(bytestream.read(4 ) , dtype=a )[0]
@deprecated(a , 'Please use tf.data to implement this functionality.' )
def _UpperCAmelCase ( a : List[str] ) -> Optional[int]:
"""simple docstring"""
print('Extracting' , f.name )
with gzip.GzipFile(fileobj=a ) as bytestream:
lowercase_ : List[str] = _readaa(a )
if magic != 2_0_5_1:
raise ValueError(
'Invalid magic number %d in MNIST image file: %s' % (magic, f.name) )
lowercase_ : Optional[int] = _readaa(a )
lowercase_ : Tuple = _readaa(a )
lowercase_ : Optional[Any] = _readaa(a )
lowercase_ : Tuple = bytestream.read(rows * cols * num_images )
lowercase_ : Optional[int] = numpy.frombuffer(a , dtype=numpy.uinta )
lowercase_ : Optional[Any] = data.reshape(a , a , a , 1 )
return data
@deprecated(a , 'Please use tf.one_hot on tensors.' )
def _UpperCAmelCase ( a : Optional[Any] , a : List[str] ) -> Any:
"""simple docstring"""
lowercase_ : Dict = labels_dense.shape[0]
lowercase_ : List[Any] = numpy.arange(a ) * num_classes
lowercase_ : Union[str, Any] = numpy.zeros((num_labels, num_classes) )
lowercase_ : List[str] = 1
return labels_one_hot
@deprecated(a , 'Please use tf.data to implement this functionality.' )
def _UpperCAmelCase ( a : Optional[Any] , a : str=False , a : Tuple=1_0 ) -> str:
"""simple docstring"""
print('Extracting' , f.name )
with gzip.GzipFile(fileobj=a ) as bytestream:
lowercase_ : int = _readaa(a )
if magic != 2_0_4_9:
raise ValueError(
'Invalid magic number %d in MNIST label file: %s' % (magic, f.name) )
lowercase_ : Tuple = _readaa(a )
lowercase_ : Union[str, Any] = bytestream.read(a )
lowercase_ : int = numpy.frombuffer(a , dtype=numpy.uinta )
if one_hot:
return _dense_to_one_hot(a , a )
return labels
class __magic_name__ :
"""simple docstring"""
@deprecated(
_lowercase , 'Please use alternatives such as official/mnist/_DataSet.py'
' from tensorflow/models.' , )
def __init__( self , _lowercase , _lowercase , _lowercase=False , _lowercase=False , _lowercase=dtypes.floataa , _lowercase=True , _lowercase=None , ) -> int:
lowercase_ : Union[str, Any] = random_seed.get_seed(_lowercase )
# If op level seed is not set, use whatever graph level seed is returned
numpy.random.seed(seeda if seed is None else seeda )
lowercase_ : Optional[int] = dtypes.as_dtype(_lowercase ).base_dtype
if dtype not in (dtypes.uinta, dtypes.floataa):
raise TypeError('Invalid image dtype %r, expected uint8 or float32' % dtype )
if fake_data:
lowercase_ : Optional[int] = 1_0000
lowercase_ : int = one_hot
else:
assert (
images.shape[0] == labels.shape[0]
), f"images.shape: {images.shape} labels.shape: {labels.shape}"
lowercase_ : Any = images.shape[0]
# Convert shape from [num examples, rows, columns, depth]
# to [num examples, rows*columns] (assuming depth == 1)
if reshape:
assert images.shape[3] == 1
lowercase_ : Optional[Any] = images.reshape(
images.shape[0] , images.shape[1] * images.shape[2] )
if dtype == dtypes.floataa:
# Convert from [0, 255] -> [0.0, 1.0].
lowercase_ : Union[str, Any] = images.astype(numpy.floataa )
lowercase_ : str = numpy.multiply(_lowercase , 1.0 / 255.0 )
lowercase_ : Dict = images
lowercase_ : Union[str, Any] = labels
lowercase_ : int = 0
lowercase_ : List[Any] = 0
@property
def lowerCamelCase__ ( self ) -> Tuple:
return self._images
@property
def lowerCamelCase__ ( self ) -> Tuple:
return self._labels
@property
def lowerCamelCase__ ( self ) -> Dict:
return self._num_examples
@property
def lowerCamelCase__ ( self ) -> Optional[Any]:
return self._epochs_completed
def lowerCamelCase__ ( self , _lowercase , _lowercase=False , _lowercase=True ) -> int:
if fake_data:
lowercase_ : str = [1] * 784
lowercase_ : Union[str, Any] = [1] + [0] * 9 if self.one_hot else 0
return (
[fake_image for _ in range(_lowercase )],
[fake_label for _ in range(_lowercase )],
)
lowercase_ : str = self._index_in_epoch
# Shuffle for the first epoch
if self._epochs_completed == 0 and start == 0 and shuffle:
lowercase_ : List[Any] = numpy.arange(self._num_examples )
numpy.random.shuffle(_lowercase )
lowercase_ : Optional[Any] = self.images[perma]
lowercase_ : Optional[Any] = self.labels[perma]
# Go to the next epoch
if start + batch_size > self._num_examples:
# Finished epoch
self._epochs_completed += 1
# Get the rest examples in this epoch
lowercase_ : List[str] = self._num_examples - start
lowercase_ : List[str] = self._images[start : self._num_examples]
lowercase_ : Union[str, Any] = self._labels[start : self._num_examples]
# Shuffle the data
if shuffle:
lowercase_ : int = numpy.arange(self._num_examples )
numpy.random.shuffle(_lowercase )
lowercase_ : List[str] = self.images[perm]
lowercase_ : Dict = self.labels[perm]
# Start next epoch
lowercase_ : List[str] = 0
lowercase_ : int = batch_size - rest_num_examples
lowercase_ : Dict = self._index_in_epoch
lowercase_ : Union[str, Any] = self._images[start:end]
lowercase_ : List[Any] = self._labels[start:end]
return (
numpy.concatenate((images_rest_part, images_new_part) , axis=0 ),
numpy.concatenate((labels_rest_part, labels_new_part) , axis=0 ),
)
else:
self._index_in_epoch += batch_size
lowercase_ : Any = self._index_in_epoch
return self._images[start:end], self._labels[start:end]
@deprecated(a , 'Please write your own downloading logic.' )
def _UpperCAmelCase ( a : Dict , a : Any , a : str ) -> List[str]:
"""simple docstring"""
if not gfile.Exists(a ):
gfile.MakeDirs(a )
lowercase_ : Tuple = os.path.join(a , a )
if not gfile.Exists(a ):
urllib.request.urlretrieve(a , a ) # noqa: S310
with gfile.GFile(a ) as f:
lowercase_ : str = f.size()
print('Successfully downloaded' , a , a , 'bytes.' )
return filepath
@deprecated(
a , 'Please use alternatives such as:' ' tensorflow_datasets.load(\'mnist\')' )
def _UpperCAmelCase ( a : Union[str, Any] , a : int=False , a : Dict=False , a : List[Any]=dtypes.floataa , a : List[str]=True , a : Tuple=5_0_0_0 , a : Any=None , a : Optional[int]=DEFAULT_SOURCE_URL , ) -> Optional[int]:
"""simple docstring"""
if fake_data:
def fake():
return _DataSet(
[] , [] , fake_data=a , one_hot=a , dtype=a , seed=a )
lowercase_ : Tuple = fake()
lowercase_ : Tuple = fake()
lowercase_ : Tuple = fake()
return _Datasets(train=a , validation=a , test=a )
if not source_url: # empty string check
lowercase_ : Dict = DEFAULT_SOURCE_URL
lowercase_ : List[Any] = 'train-images-idx3-ubyte.gz'
lowercase_ : str = 'train-labels-idx1-ubyte.gz'
lowercase_ : Optional[int] = 't10k-images-idx3-ubyte.gz'
lowercase_ : Optional[Any] = 't10k-labels-idx1-ubyte.gz'
lowercase_ : Tuple = _maybe_download(
a , a , source_url + train_images_file )
with gfile.Open(a , 'rb' ) as f:
lowercase_ : Optional[int] = _extract_images(a )
lowercase_ : Optional[int] = _maybe_download(
a , a , source_url + train_labels_file )
with gfile.Open(a , 'rb' ) as f:
lowercase_ : Optional[Any] = _extract_labels(a , one_hot=a )
lowercase_ : Dict = _maybe_download(
a , a , source_url + test_images_file )
with gfile.Open(a , 'rb' ) as f:
lowercase_ : Optional[Any] = _extract_images(a )
lowercase_ : List[str] = _maybe_download(
a , a , source_url + test_labels_file )
with gfile.Open(a , 'rb' ) as f:
lowercase_ : int = _extract_labels(a , one_hot=a )
if not 0 <= validation_size <= len(a ):
lowercase_ : List[str] = (
'Validation size should be between 0 and '
f"{len(a )}. Received: {validation_size}."
)
raise ValueError(a )
lowercase_ : List[Any] = train_images[:validation_size]
lowercase_ : List[str] = train_labels[:validation_size]
lowercase_ : Any = train_images[validation_size:]
lowercase_ : Optional[int] = train_labels[validation_size:]
lowercase_ : Optional[int] = {'dtype': dtype, 'reshape': reshape, 'seed': seed}
lowercase_ : Optional[int] = _DataSet(a , a , **a )
lowercase_ : List[Any] = _DataSet(a , a , **a )
lowercase_ : Optional[Any] = _DataSet(a , a , **a )
return _Datasets(train=a , validation=a , test=a )
| 703 |
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
A: List[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
A: Union[str, Any] = "\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline\n >>> from diffusers.utils import load_image\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... \"kandinsky-community/kandinsky-2-2-prior\", torch_dtype=torch.float16\n ... )\n >>> pipe_prior.to(\"cuda\")\n\n >>> prompt = \"A red cartoon frog, 4k\"\n >>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False)\n\n >>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained(\n ... \"kandinsky-community/kandinsky-2-2-decoder\", torch_dtype=torch.float16\n ... )\n >>> pipe.to(\"cuda\")\n\n >>> init_image = load_image(\n ... \"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main\"\n ... \"/kandinsky/frog.png\"\n ... )\n\n >>> image = pipe(\n ... image=init_image,\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=100,\n ... strength=0.2,\n ... ).images\n\n >>> image[0].save(\"red_frog.png\")\n ```\n"
def _UpperCAmelCase ( a : Tuple , a : Union[str, Any] , a : List[Any]=8 ) -> Dict:
"""simple docstring"""
lowercase_ : List[Any] = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
lowercase_ : List[str] = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
def _UpperCAmelCase ( a : Any , a : Dict=5_1_2 , a : Optional[Any]=5_1_2 ) -> Tuple:
"""simple docstring"""
lowercase_ : int = pil_image.resize((w, h) , resample=Image.BICUBIC , reducing_gap=1 )
lowercase_ : int = np.array(pil_image.convert('RGB' ) )
lowercase_ : Optional[int] = arr.astype(np.floataa ) / 1_27.5 - 1
lowercase_ : Any = np.transpose(a , [2, 0, 1] )
lowercase_ : Any = torch.from_numpy(a ).unsqueeze(0 )
return image
class __magic_name__ ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self , _lowercase , _lowercase , _lowercase , ) -> List[Any]:
super().__init__()
self.register_modules(
unet=_lowercase , scheduler=_lowercase , movq=_lowercase , )
lowercase_ : Dict = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def lowerCamelCase__ ( self , _lowercase , _lowercase , _lowercase ) -> int:
# get the original timestep using init_timestep
lowercase_ : List[Any] = min(int(num_inference_steps * strength ) , _lowercase )
lowercase_ : Tuple = max(num_inference_steps - init_timestep , 0 )
lowercase_ : Optional[Any] = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def lowerCamelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase=None ) -> Any:
if not isinstance(_lowercase , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(_lowercase )}" )
lowercase_ : Dict = image.to(device=_lowercase , dtype=_lowercase )
lowercase_ : Dict = batch_size * num_images_per_prompt
if image.shape[1] == 4:
lowercase_ : str = image
else:
if isinstance(_lowercase , _lowercase ) and len(_lowercase ) != batch_size:
raise ValueError(
f"You have passed a list of generators of length {len(_lowercase )}, but requested an effective batch"
f" size of {batch_size}. Make sure the batch size matches the length of the generators." )
elif isinstance(_lowercase , _lowercase ):
lowercase_ : List[Any] = [
self.movq.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(_lowercase )
]
lowercase_ : Union[str, Any] = torch.cat(_lowercase , dim=0 )
else:
lowercase_ : Union[str, Any] = self.movq.encode(_lowercase ).latent_dist.sample(_lowercase )
lowercase_ : str = self.movq.config.scaling_factor * init_latents
lowercase_ : int = torch.cat([init_latents] , dim=0 )
lowercase_ : Dict = init_latents.shape
lowercase_ : Dict = randn_tensor(_lowercase , generator=_lowercase , device=_lowercase , dtype=_lowercase )
# get latents
lowercase_ : List[str] = self.scheduler.add_noise(_lowercase , _lowercase , _lowercase )
lowercase_ : Optional[Any] = init_latents
return latents
def lowerCamelCase__ ( self , _lowercase=0 ) -> int:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
lowercase_ : List[Any] = torch.device(f"cuda:{gpu_id}" )
lowercase_ : Optional[Any] = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(_lowercase , _lowercase )
def lowerCamelCase__ ( self , _lowercase=0 ) -> int:
if is_accelerate_available() and is_accelerate_version('>=' , '0.17.0.dev0' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.' )
lowercase_ : List[Any] = torch.device(f"cuda:{gpu_id}" )
if self.device.type != "cpu":
self.to('cpu' , silence_dtype_warnings=_lowercase )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
lowercase_ : Tuple = None
for cpu_offloaded_model in [self.unet, self.movq]:
lowercase_ , lowercase_ : Dict = cpu_offload_with_hook(_lowercase , _lowercase , prev_module_hook=_lowercase )
# We'll offload the last model manually.
lowercase_ : List[str] = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def lowerCamelCase__ ( self ) -> List[str]:
if not hasattr(self.unet , '_hf_hook' ):
return self.device
for module in self.unet.modules():
if (
hasattr(_lowercase , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(_lowercase )
def __call__( self , _lowercase , _lowercase , _lowercase , _lowercase = 512 , _lowercase = 512 , _lowercase = 100 , _lowercase = 4.0 , _lowercase = 0.3 , _lowercase = 1 , _lowercase = None , _lowercase = "pil" , _lowercase = True , ) -> str:
lowercase_ : List[Any] = self._execution_device
lowercase_ : List[Any] = guidance_scale > 1.0
if isinstance(_lowercase , _lowercase ):
lowercase_ : Union[str, Any] = torch.cat(_lowercase , dim=0 )
lowercase_ : Optional[Any] = image_embeds.shape[0]
if isinstance(_lowercase , _lowercase ):
lowercase_ : List[str] = torch.cat(_lowercase , dim=0 )
if do_classifier_free_guidance:
lowercase_ : List[str] = image_embeds.repeat_interleave(_lowercase , dim=0 )
lowercase_ : Dict = negative_image_embeds.repeat_interleave(_lowercase , dim=0 )
lowercase_ : Dict = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=_lowercase )
if not isinstance(_lowercase , _lowercase ):
lowercase_ : Union[str, Any] = [image]
if not all(isinstance(_lowercase , (PIL.Image.Image, torch.Tensor) ) for i in image ):
raise ValueError(
f"Input is in incorrect format: {[type(_lowercase ) for i in image]}. Currently, we only support PIL image and pytorch tensor" )
lowercase_ : List[Any] = torch.cat([prepare_image(_lowercase , _lowercase , _lowercase ) for i in image] , dim=0 )
lowercase_ : Dict = image.to(dtype=image_embeds.dtype , device=_lowercase )
lowercase_ : Dict = self.movq.encode(_lowercase )['latents']
lowercase_ : Optional[Any] = latents.repeat_interleave(_lowercase , dim=0 )
self.scheduler.set_timesteps(_lowercase , device=_lowercase )
lowercase_ , lowercase_ : str = self.get_timesteps(_lowercase , _lowercase , _lowercase )
lowercase_ : int = timesteps[:1].repeat(batch_size * num_images_per_prompt )
lowercase_ , lowercase_ : Union[str, Any] = downscale_height_and_width(_lowercase , _lowercase , self.movq_scale_factor )
lowercase_ : List[str] = self.prepare_latents(
_lowercase , _lowercase , _lowercase , _lowercase , image_embeds.dtype , _lowercase , _lowercase )
for i, t in enumerate(self.progress_bar(_lowercase ) ):
# expand the latents if we are doing classifier free guidance
lowercase_ : int = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowercase_ : str = {'image_embeds': image_embeds}
lowercase_ : str = self.unet(
sample=_lowercase , timestep=_lowercase , encoder_hidden_states=_lowercase , added_cond_kwargs=_lowercase , return_dict=_lowercase , )[0]
if do_classifier_free_guidance:
lowercase_ , lowercase_ : Dict = noise_pred.split(latents.shape[1] , dim=1 )
lowercase_ , lowercase_ : Optional[int] = noise_pred.chunk(2 )
lowercase_ , lowercase_ : Tuple = variance_pred.chunk(2 )
lowercase_ : List[str] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
lowercase_ : Tuple = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , 'variance_type' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
lowercase_ , lowercase_ : List[str] = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
lowercase_ : Dict = self.scheduler.step(
_lowercase , _lowercase , _lowercase , generator=_lowercase , )[0]
# post-processing
lowercase_ : Any = self.movq.decode(_lowercase , force_not_quantize=_lowercase )['sample']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f"Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}" )
if output_type in ["np", "pil"]:
lowercase_ : Dict = image * 0.5 + 0.5
lowercase_ : Dict = image.clamp(0 , 1 )
lowercase_ : int = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
lowercase_ : int = self.numpy_to_pil(_lowercase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_lowercase )
| 7 | 0 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A: int = logging.get_logger(__name__)
A: Tuple = {
"BAAI/AltCLIP": "https://huggingface.co/BAAI/AltCLIP/resolve/main/config.json",
# See all AltCLIP models at https://huggingface.co/models?filter=altclip
}
class __magic_name__ ( UpperCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = 'altclip_text_model'
def __init__( self , _lowercase=25_0002 , _lowercase=1024 , _lowercase=24 , _lowercase=16 , _lowercase=4096 , _lowercase="gelu" , _lowercase=0.1 , _lowercase=0.1 , _lowercase=514 , _lowercase=1 , _lowercase=0.02 , _lowercase=0.02 , _lowercase=1E-0_5 , _lowercase=1 , _lowercase=0 , _lowercase=2 , _lowercase="absolute" , _lowercase=True , _lowercase=768 , **_lowercase , ) -> str:
super().__init__(pad_token_id=_lowercase , bos_token_id=_lowercase , eos_token_id=_lowercase , **_lowercase )
lowercase_ : Tuple = vocab_size
lowercase_ : str = hidden_size
lowercase_ : int = num_hidden_layers
lowercase_ : Optional[int] = num_attention_heads
lowercase_ : List[str] = hidden_act
lowercase_ : Union[str, Any] = intermediate_size
lowercase_ : Union[str, Any] = hidden_dropout_prob
lowercase_ : Optional[Any] = attention_probs_dropout_prob
lowercase_ : Union[str, Any] = max_position_embeddings
lowercase_ : Union[str, Any] = type_vocab_size
lowercase_ : Any = initializer_range
lowercase_ : str = initializer_factor
lowercase_ : Tuple = layer_norm_eps
lowercase_ : Union[str, Any] = position_embedding_type
lowercase_ : int = use_cache
lowercase_ : List[Any] = project_dim
class __magic_name__ ( UpperCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = 'altclip_vision_model'
def __init__( self , _lowercase=768 , _lowercase=3072 , _lowercase=512 , _lowercase=12 , _lowercase=12 , _lowercase=3 , _lowercase=224 , _lowercase=32 , _lowercase="quick_gelu" , _lowercase=1E-5 , _lowercase=0.0 , _lowercase=0.02 , _lowercase=1.0 , **_lowercase , ) -> Optional[int]:
super().__init__(**_lowercase )
lowercase_ : List[str] = hidden_size
lowercase_ : Any = intermediate_size
lowercase_ : int = projection_dim
lowercase_ : Union[str, Any] = num_hidden_layers
lowercase_ : List[Any] = num_attention_heads
lowercase_ : Any = num_channels
lowercase_ : Optional[Any] = patch_size
lowercase_ : Optional[Any] = image_size
lowercase_ : Tuple = initializer_range
lowercase_ : List[str] = initializer_factor
lowercase_ : Any = attention_dropout
lowercase_ : str = layer_norm_eps
lowercase_ : Tuple = hidden_act
@classmethod
def lowerCamelCase__ ( cls , _lowercase , **_lowercase ) -> "PretrainedConfig":
cls._set_token_in_kwargs(_lowercase )
lowercase_ : int = cls.get_config_dict(_lowercase , **_lowercase )
# get the vision config dict if we are loading from AltCLIPConfig
if config_dict.get('model_type' ) == "altclip":
lowercase_ : List[str] = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(_lowercase , **_lowercase )
class __magic_name__ ( UpperCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = 'altclip'
SCREAMING_SNAKE_CASE_ : Any = True
def __init__( self , _lowercase=None , _lowercase=None , _lowercase=768 , _lowercase=2.65_92 , **_lowercase ) -> int:
# If `_config_dict` exist, we use them for the backward compatibility.
# We pop out these 2 attributes before calling `super().__init__` to avoid them being saved (which causes a lot
# of confusion!).
lowercase_ : List[str] = kwargs.pop('text_config_dict' , _lowercase )
lowercase_ : Optional[Any] = kwargs.pop('vision_config_dict' , _lowercase )
super().__init__(**_lowercase )
# Instead of simply assigning `[text|vision]_config_dict` to `[text|vision]_config`, we use the values in
# `[text|vision]_config_dict` to update the values in `[text|vision]_config`. The values should be same in most
# cases, but we don't want to break anything regarding `_config_dict` that existed before commit `8827e1b2`.
if text_config_dict is not None:
if text_config is None:
lowercase_ : List[str] = {}
# This is the complete result when using `text_config_dict`.
lowercase_ : str = AltCLIPTextConfig(**_lowercase ).to_dict()
# Give a warning if the values exist in both `_text_config_dict` and `text_config` but being different.
for key, value in _text_config_dict.items():
if key in text_config and value != text_config[key] and key not in ["transformers_version"]:
# If specified in `text_config_dict`
if key in text_config_dict:
lowercase_ : int = (
f"`{key}` is found in both `text_config_dict` and `text_config` but with different values. "
f"The value `text_config_dict[\"{key}\"]` will be used instead."
)
# If inferred from default argument values (just to be super careful)
else:
lowercase_ : List[str] = (
f"`text_config_dict` is provided which will be used to initialize `AltCLIPTextConfig`. The "
f"value `text_config[\"{key}\"]` will be overriden."
)
logger.warning(_lowercase )
# Update all values in `text_config` with the ones in `_text_config_dict`.
text_config.update(_text_config_dict )
if vision_config_dict is not None:
if vision_config is None:
lowercase_ : Dict = {}
# This is the complete result when using `vision_config_dict`.
lowercase_ : Optional[Any] = AltCLIPVisionConfig(**_lowercase ).to_dict()
# convert keys to string instead of integer
if "id2label" in _vision_config_dict:
lowercase_ : List[str] = {
str(_lowercase ): value for key, value in _vision_config_dict['id2label'].items()
}
# Give a warning if the values exist in both `_vision_config_dict` and `vision_config` but being different.
for key, value in _vision_config_dict.items():
if key in vision_config and value != vision_config[key] and key not in ["transformers_version"]:
# If specified in `vision_config_dict`
if key in vision_config_dict:
lowercase_ : Union[str, Any] = (
f"`{key}` is found in both `vision_config_dict` and `vision_config` but with different "
f"values. The value `vision_config_dict[\"{key}\"]` will be used instead."
)
# If inferred from default argument values (just to be super careful)
else:
lowercase_ : Any = (
f"`vision_config_dict` is provided which will be used to initialize `AltCLIPVisionConfig`. "
f"The value `vision_config[\"{key}\"]` will be overriden."
)
logger.warning(_lowercase )
# Update all values in `vision_config` with the ones in `_vision_config_dict`.
vision_config.update(_vision_config_dict )
if text_config is None:
lowercase_ : List[Any] = {}
logger.info('`text_config` is `None`. Initializing the `AltCLIPTextConfig` with default values.' )
if vision_config is None:
lowercase_ : Optional[Any] = {}
logger.info('`vision_config` is `None`. initializing the `AltCLIPVisionConfig` with default values.' )
lowercase_ : int = AltCLIPTextConfig(**_lowercase )
lowercase_ : Any = AltCLIPVisionConfig(**_lowercase )
lowercase_ : Union[str, Any] = projection_dim
lowercase_ : List[Any] = logit_scale_init_value
lowercase_ : str = 1.0
@classmethod
def lowerCamelCase__ ( cls , _lowercase , _lowercase , **_lowercase ) -> int:
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **_lowercase )
def lowerCamelCase__ ( self ) -> Union[str, Any]:
lowercase_ : List[Any] = copy.deepcopy(self.__dict__ )
lowercase_ : Union[str, Any] = self.text_config.to_dict()
lowercase_ : int = self.vision_config.to_dict()
lowercase_ : Optional[int] = self.__class__.model_type
return output
| 704 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ..utils import _LazyModule
A: int = {
"config": [
"EXTERNAL_DATA_FORMAT_SIZE_LIMIT",
"OnnxConfig",
"OnnxConfigWithPast",
"OnnxSeq2SeqConfigWithPast",
"PatchingSpec",
],
"convert": ["export", "validate_model_outputs"],
"features": ["FeaturesManager"],
"utils": ["ParameterFormat", "compute_serialized_parameters_size"],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
A: Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 7 | 0 |
'''simple docstring'''
import logging
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import arg_to_scheduler
from transformers import TrainingArguments
A: int = logging.getLogger(__name__)
@dataclass
class __magic_name__ ( UpperCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[float] = field(
default=0.0, metadata={'help': 'The label smoothing epsilon to apply (if not zero).'} )
SCREAMING_SNAKE_CASE_ : bool = field(default=UpperCAmelCase_, metadata={'help': 'Whether to SortishSamler or not.'} )
SCREAMING_SNAKE_CASE_ : bool = field(
default=UpperCAmelCase_, metadata={'help': 'Whether to use generate to calculate generative metrics (ROUGE, BLEU).'} )
SCREAMING_SNAKE_CASE_ : bool = field(default=UpperCAmelCase_, metadata={'help': 'whether to use adafactor'} )
SCREAMING_SNAKE_CASE_ : Optional[float] = field(
default=UpperCAmelCase_, metadata={'help': 'Encoder layer dropout probability. Goes into model.config.'} )
SCREAMING_SNAKE_CASE_ : Optional[float] = field(
default=UpperCAmelCase_, metadata={'help': 'Decoder layer dropout probability. Goes into model.config.'} )
SCREAMING_SNAKE_CASE_ : Optional[float] = field(default=UpperCAmelCase_, metadata={'help': 'Dropout probability. Goes into model.config.'} )
SCREAMING_SNAKE_CASE_ : Optional[float] = field(
default=UpperCAmelCase_, metadata={'help': 'Attention dropout probability. Goes into model.config.'} )
SCREAMING_SNAKE_CASE_ : Optional[str] = field(
default='linear', metadata={'help': f"""Which lr scheduler to use. Selected in {sorted(arg_to_scheduler.keys() )}"""}, )
| 705 |
'''simple docstring'''
import io
import json
import unittest
from parameterized import parameterized
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device
from utils import calculate_bleu
A: Any = get_tests_dir() + "/test_data/fsmt/fsmt_val_data.json"
with io.open(filename, "r", encoding="utf-8") as f:
A: List[Any] = json.load(f)
@require_torch
class __magic_name__ ( unittest.TestCase ):
"""simple docstring"""
def lowerCamelCase__ ( self , _lowercase ) -> Tuple:
return FSMTTokenizer.from_pretrained(_lowercase )
def lowerCamelCase__ ( self , _lowercase ) -> Optional[int]:
lowercase_ : str = FSMTForConditionalGeneration.from_pretrained(_lowercase ).to(_lowercase )
if torch_device == "cuda":
model.half()
return model
@parameterized.expand(
[
['en-ru', 26.0],
['ru-en', 22.0],
['en-de', 22.0],
['de-en', 29.0],
] )
@slow
def lowerCamelCase__ ( self , _lowercase , _lowercase ) -> Optional[int]:
# note: this test is not testing the best performance since it only evals a small batch
# but it should be enough to detect a regression in the output quality
lowercase_ : Optional[Any] = f"facebook/wmt19-{pair}"
lowercase_ : str = self.get_tokenizer(_lowercase )
lowercase_ : Any = self.get_model(_lowercase )
lowercase_ : Any = bleu_data[pair]['src']
lowercase_ : Any = bleu_data[pair]['tgt']
lowercase_ : Dict = tokenizer(_lowercase , return_tensors='pt' , truncation=_lowercase , padding='longest' ).to(_lowercase )
lowercase_ : str = model.generate(
input_ids=batch.input_ids , num_beams=8 , )
lowercase_ : Any = tokenizer.batch_decode(
_lowercase , skip_special_tokens=_lowercase , clean_up_tokenization_spaces=_lowercase )
lowercase_ : Union[str, Any] = calculate_bleu(_lowercase , _lowercase )
print(_lowercase )
self.assertGreaterEqual(scores['bleu'] , _lowercase )
| 7 | 0 |
'''simple docstring'''
import json
import os
from datetime import date
from pathlib import Path
from tabulate import DataRow, TableFormat, tabulate
A: Tuple = TableFormat(
lineabove=None,
linebelowheader=None,
linebetweenrows=None,
linebelow=None,
headerrow=DataRow("", "|", "|"),
datarow=DataRow("", "|", "|"),
padding=1,
with_header_hide=None,
)
A: Any = []
A: Dict = []
A: Optional[Any] = {"type": "section", "text": {"type": "plain_text", "text": "No failed tests! 🤗", "emoji": True}}
A: str = [
{
"type": "header",
"text": {
"type": "plain_text",
"text": f"""🤗 Accelerate nightly {os.environ.get('TEST_TYPE', '')} test results""",
"emoji": True,
},
}
]
A: Optional[int] = 0
for log in Path().glob("*.log"):
A: Dict = 0
with open(log, "r") as f:
for line in f:
A: str = json.loads(line)
if line.get("nodeid", "") != "":
A: List[Any] = line["nodeid"]
if line.get("duration", None) is not None:
A: Dict = f"""{line['duration']:.4f}"""
if line.get("outcome", "") == "failed":
section_num_failed += 1
failed.append([test, duration, log.name.split("_")[0]])
total_num_failed += 1
group_info.append([str(log), section_num_failed, failed])
A: Tuple = []
log.unlink()
A: Dict = ""
A: Tuple = []
if total_num_failed > 0:
for name, num_failed, failed_tests in group_info:
if num_failed > 0:
if num_failed == 1:
message += f"*{name[1:]}: {num_failed} failed test*\n"
else:
message += f"*{name[1:]}: {num_failed} failed tests*\n"
A: Union[str, Any] = []
A: str = {}
for test in failed_tests:
A: List[str] = test[0].split("::")
A: Any = data[0].split("/")[-1]
if data[0] not in filesafailed:
A: List[str] = [data[1:]]
else:
filesafailed[data[0]] += [data[1:]]
failed_table.append(data)
A: List[str] = [test[0] for test in failed_table]
A: Optional[Any] = list(set(files))
# Count number of instances in failed_tests
A: Tuple = []
for file in individual_files:
table.append([file, len(filesafailed[file])])
A: Optional[Any] = tabulate(
table,
headers=["Test Location", "Num Failed"],
tablefmt=hf_table_format,
stralign="right",
)
message += f"\n```\n{failed_table}\n```"
all_filesafailed.append(filesafailed)
if len(message) > 3_0_0_0:
A: Tuple = "Too many failed tests, please see the full report in the Action results."
A: Optional[int] = len(err) + 1_0
A: str = message[: 3_0_0_0 - offset] + f"""\n...\n```\n{err}"""
print(f"""### {message}""")
else:
A: List[Any] = "No failed tests! 🤗"
print(f"""## {message}""")
payload.append(no_error_payload)
if os.environ.get("TEST_TYPE", "") != "":
from slack_sdk import WebClient
A: List[str] = WebClient(token=os.environ["SLACK_API_TOKEN"])
if message != "No failed tests! 🤗":
A: List[Any] = {
"type": "section",
"text": {
"type": "mrkdwn",
"text": message,
},
}
payload.append(md_report)
A: Optional[Any] = {
"type": "section",
"text": {
"type": "mrkdwn",
"text": "*For more details:*",
},
"accessory": {
"type": "button",
"text": {
"type": "plain_text",
"text": "Check Action results",
"emoji": True,
},
"url": f"""https://github.com/{os.environ['GITHUB_REPOSITORY']}/actions/runs/{os.environ['GITHUB_RUN_ID']}""",
},
}
payload.append(action_button)
A: List[str] = {
"type": "context",
"elements": [
{
"type": "plain_text",
"text": f"""Nightly {os.environ.get('TEST_TYPE')} test results for {date.today()}""",
}
],
}
payload.append(date_report)
A: Dict = client.chat_postMessage(channel="#accelerate-ci-daily", text=message, blocks=payload)
A: int = response.data["ts"]
for failed_file in all_filesafailed:
for test_location, test_failures in failed_file.items():
# Keep only the first instance of the test name
A: List[Any] = ""
for i, row in enumerate(test_failures):
if row[0] != test_class:
A: str = row[0]
else:
A: Any = ""
A: str = {
"type": "section",
"text": {
"type": "mrkdwn",
"text": f"""Test location: {test_location}\n```\n{tabulate(test_failures, headers=['Class', 'Test'], tablefmt=hf_table_format, stralign='right')}\n```""",
},
}
client.chat_postMessage(
channel="#accelerate-ci-daily",
thread_ts=ts,
blocks=[payload],
)
| 706 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A: int = {
"configuration_trajectory_transformer": [
"TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"TrajectoryTransformerConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A: Union[str, Any] = [
"TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TrajectoryTransformerModel",
"TrajectoryTransformerPreTrainedModel",
"load_tf_weights_in_trajectory_transformer",
]
if TYPE_CHECKING:
from .configuration_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TrajectoryTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TrajectoryTransformerModel,
TrajectoryTransformerPreTrainedModel,
load_tf_weights_in_trajectory_transformer,
)
else:
import sys
A: int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 7 | 0 |
'''simple docstring'''
import inspect
import unittest
from transformers import BitConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel
from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class __magic_name__ :
"""simple docstring"""
def __init__( self , _lowercase , _lowercase=3 , _lowercase=32 , _lowercase=3 , _lowercase=10 , _lowercase=[8, 16, 32, 64] , _lowercase=[1, 1, 2, 1] , _lowercase=True , _lowercase=True , _lowercase="relu" , _lowercase=3 , _lowercase=None , _lowercase=["stage2", "stage3", "stage4"] , _lowercase=[2, 3, 4] , _lowercase=1 , ) -> str:
lowercase_ : List[Any] = parent
lowercase_ : str = batch_size
lowercase_ : Optional[int] = image_size
lowercase_ : Union[str, Any] = num_channels
lowercase_ : Any = embeddings_size
lowercase_ : List[Any] = hidden_sizes
lowercase_ : str = depths
lowercase_ : Optional[Any] = is_training
lowercase_ : Any = use_labels
lowercase_ : Tuple = hidden_act
lowercase_ : Optional[Any] = num_labels
lowercase_ : Union[str, Any] = scope
lowercase_ : Union[str, Any] = len(_lowercase )
lowercase_ : str = out_features
lowercase_ : Optional[Any] = out_indices
lowercase_ : List[Any] = num_groups
def lowerCamelCase__ ( self ) -> Optional[Any]:
lowercase_ : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase_ : List[str] = None
if self.use_labels:
lowercase_ : Any = ids_tensor([self.batch_size] , self.num_labels )
lowercase_ : Any = self.get_config()
return config, pixel_values, labels
def lowerCamelCase__ ( self ) -> Any:
return BitConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , )
def lowerCamelCase__ ( self , _lowercase , _lowercase , _lowercase ) -> int:
lowercase_ : List[str] = BitModel(config=_lowercase )
model.to(_lowercase )
model.eval()
lowercase_ : Tuple = model(_lowercase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def lowerCamelCase__ ( self , _lowercase , _lowercase , _lowercase ) -> Union[str, Any]:
lowercase_ : Any = self.num_labels
lowercase_ : List[Any] = BitForImageClassification(_lowercase )
model.to(_lowercase )
model.eval()
lowercase_ : Optional[Any] = model(_lowercase , labels=_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase__ ( self , _lowercase , _lowercase , _lowercase ) -> Optional[Any]:
lowercase_ : int = BitBackbone(config=_lowercase )
model.to(_lowercase )
model.eval()
lowercase_ : str = model(_lowercase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
lowercase_ : int = None
lowercase_ : int = BitBackbone(config=_lowercase )
model.to(_lowercase )
model.eval()
lowercase_ : Optional[Any] = model(_lowercase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def lowerCamelCase__ ( self ) -> Optional[Any]:
lowercase_ : Tuple = self.prepare_config_and_inputs()
lowercase_ : Optional[Any] = config_and_inputs
lowercase_ : int = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class __magic_name__ ( UpperCAmelCase_, UpperCAmelCase_, unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else ()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = (
{'feature-extraction': BitModel, 'image-classification': BitForImageClassification}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE_ : Any = False
SCREAMING_SNAKE_CASE_ : List[Any] = False
SCREAMING_SNAKE_CASE_ : List[Any] = False
SCREAMING_SNAKE_CASE_ : List[str] = False
SCREAMING_SNAKE_CASE_ : str = False
def lowerCamelCase__ ( self ) -> str:
lowercase_ : List[Any] = BitModelTester(self )
lowercase_ : Optional[Any] = ConfigTester(self , config_class=_lowercase , has_text_modality=_lowercase )
def lowerCamelCase__ ( self ) -> str:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCamelCase__ ( self ) -> List[Any]:
return
@unittest.skip(reason='Bit does not output attentions' )
def lowerCamelCase__ ( self ) -> str:
pass
@unittest.skip(reason='Bit does not use inputs_embeds' )
def lowerCamelCase__ ( self ) -> Any:
pass
@unittest.skip(reason='Bit does not support input and output embeddings' )
def lowerCamelCase__ ( self ) -> Any:
pass
def lowerCamelCase__ ( self ) -> Any:
lowercase_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ : Dict = model_class(_lowercase )
lowercase_ : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase_ : Union[str, Any] = [*signature.parameters.keys()]
lowercase_ : Tuple = ['pixel_values']
self.assertListEqual(arg_names[:1] , _lowercase )
def lowerCamelCase__ ( self ) -> int:
lowercase_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowercase )
def lowerCamelCase__ ( self ) -> Dict:
lowercase_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*_lowercase )
def lowerCamelCase__ ( self ) -> Any:
lowercase_ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ : Optional[int] = model_class(config=_lowercase )
for name, module in model.named_modules():
if isinstance(_lowercase , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=f"Parameter {name} of model {model_class} seems not properly initialized" , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=f"Parameter {name} of model {model_class} seems not properly initialized" , )
def lowerCamelCase__ ( self ) -> str:
def check_hidden_states_output(_lowercase , _lowercase , _lowercase ):
lowercase_ : int = model_class(_lowercase )
model.to(_lowercase )
model.eval()
with torch.no_grad():
lowercase_ : Optional[Any] = model(**self._prepare_for_class(_lowercase , _lowercase ) )
lowercase_ : List[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowercase_ : Dict = self.model_tester.num_stages
self.assertEqual(len(_lowercase ) , expected_num_stages + 1 )
# Bit's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
lowercase_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ : List[str] = ['preactivation', 'bottleneck']
for model_class in self.all_model_classes:
for layer_type in layers_type:
lowercase_ : Dict = layer_type
lowercase_ : Optional[int] = True
check_hidden_states_output(_lowercase , _lowercase , _lowercase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase_ : Union[str, Any] = True
check_hidden_states_output(_lowercase , _lowercase , _lowercase )
@unittest.skip(reason='Bit does not use feedforward chunking' )
def lowerCamelCase__ ( self ) -> Tuple:
pass
def lowerCamelCase__ ( self ) -> Tuple:
lowercase_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowercase )
@slow
def lowerCamelCase__ ( self ) -> Dict:
for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ : Optional[Any] = BitModel.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
def _UpperCAmelCase ( ) -> str:
"""simple docstring"""
lowercase_ : int = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class __magic_name__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def lowerCamelCase__ ( self ) -> Any:
return (
BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None
)
@slow
def lowerCamelCase__ ( self ) -> Optional[int]:
lowercase_ : Union[str, Any] = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(_lowercase )
lowercase_ : Optional[int] = self.default_image_processor
lowercase_ : List[Any] = prepare_img()
lowercase_ : List[Any] = image_processor(images=_lowercase , return_tensors='pt' ).to(_lowercase )
# forward pass
with torch.no_grad():
lowercase_ : Union[str, Any] = model(**_lowercase )
# verify the logits
lowercase_ : Union[str, Any] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _lowercase )
lowercase_ : Dict = torch.tensor([[-0.65_26, -0.52_63, -1.43_98]] ).to(_lowercase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _lowercase , atol=1E-4 ) )
@require_torch
class __magic_name__ ( UpperCAmelCase_, unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = (BitBackbone,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE_ : List[Any] = BitConfig
SCREAMING_SNAKE_CASE_ : Optional[Any] = False
def lowerCamelCase__ ( self ) -> Tuple:
lowercase_ : str = BitModelTester(self )
| 707 |
'''simple docstring'''
def _UpperCAmelCase ( a : str ) -> str:
"""simple docstring"""
lowercase_ : Dict = 0
# if input_string is "aba" than new_input_string become "a|b|a"
lowercase_ : Dict = ''
lowercase_ : Any = ''
# append each character + "|" in new_string for range(0, length-1)
for i in input_string[: len(a ) - 1]:
new_input_string += i + "|"
# append last character
new_input_string += input_string[-1]
# we will store the starting and ending of previous furthest ending palindromic
# substring
lowercase_ , lowercase_ : Dict = 0, 0
# length[i] shows the length of palindromic substring with center i
lowercase_ : List[Any] = [1 for i in range(len(a ) )]
# for each character in new_string find corresponding palindromic string
lowercase_ : Dict = 0
for j in range(len(a ) ):
lowercase_ : Tuple = 1 if j > r else min(length[l + r - j] // 2 , r - j + 1 )
while (
j - k >= 0
and j + k < len(a )
and new_input_string[k + j] == new_input_string[j - k]
):
k += 1
lowercase_ : int = 2 * k - 1
# does this string is ending after the previously explored end (that is r) ?
# if yes the update the new r to the last index of this
if j + k - 1 > r:
lowercase_ : Tuple = j - k + 1 # noqa: E741
lowercase_ : Tuple = j + k - 1
# update max_length and start position
if max_length < length[j]:
lowercase_ : Tuple = length[j]
lowercase_ : List[Any] = j
# create that string
lowercase_ : str = new_input_string[start - max_length // 2 : start + max_length // 2 + 1]
for i in s:
if i != "|":
output_string += i
return output_string
if __name__ == "__main__":
import doctest
doctest.testmod()
| 7 | 0 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_poolformer import PoolFormerImageProcessor
A: List[Any] = logging.get_logger(__name__)
class __magic_name__ ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self , *_lowercase , **_lowercase ) -> None:
warnings.warn(
'The class PoolFormerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use PoolFormerImageProcessor instead.' , _lowercase , )
super().__init__(*_lowercase , **_lowercase )
| 708 |
'''simple docstring'''
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class __magic_name__ ( UpperCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ''
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 'hf-legacy' # "hf://"" is reserved for hffs
def __init__( self , _lowercase = None , _lowercase = None , **_lowercase , ) -> Optional[Any]:
super().__init__(self , **_lowercase )
lowercase_ : int = repo_info
lowercase_ : List[Any] = token
lowercase_ : Union[str, Any] = None
def lowerCamelCase__ ( self ) -> Optional[Any]:
if self.dir_cache is None:
lowercase_ : Optional[Any] = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
lowercase_ : str = {
'name': hf_file.rfilename,
'size': None,
'type': 'file',
}
self.dir_cache.update(
{
str(_lowercase ): {'name': str(_lowercase ), 'size': None, 'type': 'directory'}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def lowerCamelCase__ ( self , _lowercase , _lowercase = "rb" , **_lowercase , ) -> Dict:
if not isinstance(self.repo_info , _lowercase ):
raise NotImplementedError(f"Open is only implemented for dataset repositories, but got {self.repo_info}" )
lowercase_ : Optional[int] = hf_hub_url(self.repo_info.id , _lowercase , revision=self.repo_info.sha )
return fsspec.open(
_lowercase , mode=_lowercase , headers=get_authentication_headers_for_url(_lowercase , use_auth_token=self.token ) , client_kwargs={'trust_env': True} , ).open()
def lowerCamelCase__ ( self , _lowercase , **_lowercase ) -> Tuple:
self._get_dirs()
lowercase_ : str = self._strip_protocol(_lowercase )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(_lowercase )
def lowerCamelCase__ ( self , _lowercase , _lowercase=False , **_lowercase ) -> List[str]:
self._get_dirs()
lowercase_ : List[str] = PurePosixPath(path.strip('/' ) )
lowercase_ : List[str] = {}
for p, f in self.dir_cache.items():
lowercase_ : Tuple = PurePosixPath(p.strip('/' ) )
lowercase_ : Optional[int] = p.parent
if root == path:
lowercase_ : List[str] = f
lowercase_ : List[str] = list(paths.values() )
if detail:
return out
else:
return sorted(f['name'] for f in out )
| 7 | 0 |
'''simple docstring'''
import torch
from diffusers import KDPMaDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class __magic_name__ ( UpperCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = (KDPMaDiscreteScheduler,)
SCREAMING_SNAKE_CASE_ : Dict = 1_0
def lowerCamelCase__ ( self , **_lowercase ) -> List[Any]:
lowercase_ : str = {
'num_train_timesteps': 1100,
'beta_start': 0.00_01,
'beta_end': 0.02,
'beta_schedule': 'linear',
}
config.update(**_lowercase )
return config
def lowerCamelCase__ ( self ) -> Dict:
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=_lowercase )
def lowerCamelCase__ ( self ) -> List[str]:
for beta_start, beta_end in zip([0.0_00_01, 0.00_01, 0.0_01] , [0.00_02, 0.0_02, 0.02] ):
self.check_over_configs(beta_start=_lowercase , beta_end=_lowercase )
def lowerCamelCase__ ( self ) -> int:
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=_lowercase )
def lowerCamelCase__ ( self ) -> Any:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_lowercase )
def lowerCamelCase__ ( self ) -> Any:
lowercase_ : Dict = self.scheduler_classes[0]
lowercase_ : int = self.get_scheduler_config(prediction_type='v_prediction' )
lowercase_ : Union[str, Any] = scheduler_class(**_lowercase )
scheduler.set_timesteps(self.num_inference_steps )
lowercase_ : Optional[int] = self.dummy_model()
lowercase_ : Union[str, Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
lowercase_ : Union[str, Any] = sample.to(_lowercase )
for i, t in enumerate(scheduler.timesteps ):
lowercase_ : Optional[int] = scheduler.scale_model_input(_lowercase , _lowercase )
lowercase_ : Tuple = model(_lowercase , _lowercase )
lowercase_ : Any = scheduler.step(_lowercase , _lowercase , _lowercase )
lowercase_ : str = output.prev_sample
lowercase_ : Optional[Any] = torch.sum(torch.abs(_lowercase ) )
lowercase_ : List[Any] = torch.mean(torch.abs(_lowercase ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 4.6_9_3_4E-0_7 ) < 1E-2
assert abs(result_mean.item() - 6.1_1_1_2E-1_0 ) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 4.6_9_3_4_2_8_6_5_0_1_7_0_9_7_2E-0_7 ) < 1E-2
assert abs(result_mean.item() - 0.00_02 ) < 1E-3
def lowerCamelCase__ ( self ) -> Optional[Any]:
if torch_device == "mps":
return
lowercase_ : str = self.scheduler_classes[0]
lowercase_ : Tuple = self.get_scheduler_config()
lowercase_ : Tuple = scheduler_class(**_lowercase )
scheduler.set_timesteps(self.num_inference_steps )
lowercase_ : Optional[int] = self.dummy_model()
lowercase_ : List[str] = self.dummy_sample_deter * scheduler.init_noise_sigma
lowercase_ : str = sample.to(_lowercase )
for i, t in enumerate(scheduler.timesteps ):
lowercase_ : Dict = scheduler.scale_model_input(_lowercase , _lowercase )
lowercase_ : Union[str, Any] = model(_lowercase , _lowercase )
lowercase_ : List[Any] = scheduler.step(_lowercase , _lowercase , _lowercase )
lowercase_ : List[Any] = output.prev_sample
lowercase_ : Union[str, Any] = torch.sum(torch.abs(_lowercase ) )
lowercase_ : str = torch.mean(torch.abs(_lowercase ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 20.41_25 ) < 1E-2
assert abs(result_mean.item() - 0.02_66 ) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 20.41_25 ) < 1E-2
assert abs(result_mean.item() - 0.02_66 ) < 1E-3
def lowerCamelCase__ ( self ) -> Dict:
if torch_device == "mps":
return
lowercase_ : str = self.scheduler_classes[0]
lowercase_ : Dict = self.get_scheduler_config()
lowercase_ : Optional[int] = scheduler_class(**_lowercase )
scheduler.set_timesteps(self.num_inference_steps , device=_lowercase )
lowercase_ : Dict = self.dummy_model()
lowercase_ : List[Any] = self.dummy_sample_deter.to(_lowercase ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
lowercase_ : Tuple = scheduler.scale_model_input(_lowercase , _lowercase )
lowercase_ : Union[str, Any] = model(_lowercase , _lowercase )
lowercase_ : Dict = scheduler.step(_lowercase , _lowercase , _lowercase )
lowercase_ : Union[str, Any] = output.prev_sample
lowercase_ : Any = torch.sum(torch.abs(_lowercase ) )
lowercase_ : Optional[Any] = torch.mean(torch.abs(_lowercase ) )
if str(_lowercase ).startswith('cpu' ):
# The following sum varies between 148 and 156 on mps. Why?
assert abs(result_sum.item() - 20.41_25 ) < 1E-2
assert abs(result_mean.item() - 0.02_66 ) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 20.41_25 ) < 1E-2
assert abs(result_mean.item() - 0.02_66 ) < 1E-3
| 709 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
BertTokenizer,
ViltConfig,
ViltForImageAndTextRetrieval,
ViltForImagesAndTextClassification,
ViltForMaskedLM,
ViltForQuestionAnswering,
ViltImageProcessor,
ViltProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
A: List[Any] = logging.get_logger(__name__)
def _UpperCAmelCase ( a : Any , a : Dict=False , a : Union[str, Any]=False , a : Tuple=False ) -> List[str]:
"""simple docstring"""
lowercase_ : int = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"transformer.blocks.{i}.norm1.weight", f"vilt.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((f"transformer.blocks.{i}.norm1.bias", f"vilt.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append(
(f"transformer.blocks.{i}.attn.proj.weight", f"vilt.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append(
(f"transformer.blocks.{i}.attn.proj.bias", f"vilt.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((f"transformer.blocks.{i}.norm2.weight", f"vilt.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((f"transformer.blocks.{i}.norm2.bias", f"vilt.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append(
(f"transformer.blocks.{i}.mlp.fc1.weight", f"vilt.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((f"transformer.blocks.{i}.mlp.fc1.bias", f"vilt.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((f"transformer.blocks.{i}.mlp.fc2.weight", f"vilt.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((f"transformer.blocks.{i}.mlp.fc2.bias", f"vilt.encoder.layer.{i}.output.dense.bias") )
# embeddings
rename_keys.extend(
[
# text embeddings
('text_embeddings.word_embeddings.weight', 'vilt.embeddings.text_embeddings.word_embeddings.weight'),
(
'text_embeddings.position_embeddings.weight',
'vilt.embeddings.text_embeddings.position_embeddings.weight',
),
('text_embeddings.position_ids', 'vilt.embeddings.text_embeddings.position_ids'),
(
'text_embeddings.token_type_embeddings.weight',
'vilt.embeddings.text_embeddings.token_type_embeddings.weight',
),
('text_embeddings.LayerNorm.weight', 'vilt.embeddings.text_embeddings.LayerNorm.weight'),
('text_embeddings.LayerNorm.bias', 'vilt.embeddings.text_embeddings.LayerNorm.bias'),
# patch embeddings
('transformer.cls_token', 'vilt.embeddings.cls_token'),
('transformer.patch_embed.proj.weight', 'vilt.embeddings.patch_embeddings.projection.weight'),
('transformer.patch_embed.proj.bias', 'vilt.embeddings.patch_embeddings.projection.bias'),
('transformer.pos_embed', 'vilt.embeddings.position_embeddings'),
# token type embeddings
('token_type_embeddings.weight', 'vilt.embeddings.token_type_embeddings.weight'),
] )
# final layernorm + pooler
rename_keys.extend(
[
('transformer.norm.weight', 'vilt.layernorm.weight'),
('transformer.norm.bias', 'vilt.layernorm.bias'),
('pooler.dense.weight', 'vilt.pooler.dense.weight'),
('pooler.dense.bias', 'vilt.pooler.dense.bias'),
] )
# classifier head(s)
if vqa_model:
# classification head
rename_keys.extend(
[
('vqa_classifier.0.weight', 'classifier.0.weight'),
('vqa_classifier.0.bias', 'classifier.0.bias'),
('vqa_classifier.1.weight', 'classifier.1.weight'),
('vqa_classifier.1.bias', 'classifier.1.bias'),
('vqa_classifier.3.weight', 'classifier.3.weight'),
('vqa_classifier.3.bias', 'classifier.3.bias'),
] )
elif nlvr_model:
# classification head
rename_keys.extend(
[
('nlvr2_classifier.0.weight', 'classifier.0.weight'),
('nlvr2_classifier.0.bias', 'classifier.0.bias'),
('nlvr2_classifier.1.weight', 'classifier.1.weight'),
('nlvr2_classifier.1.bias', 'classifier.1.bias'),
('nlvr2_classifier.3.weight', 'classifier.3.weight'),
('nlvr2_classifier.3.bias', 'classifier.3.bias'),
] )
else:
pass
return rename_keys
def _UpperCAmelCase ( a : Dict , a : Tuple ) -> Dict:
"""simple docstring"""
for i in range(config.num_hidden_layers ):
lowercase_ : Optional[int] = 'vilt.'
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowercase_ : str = state_dict.pop(f"transformer.blocks.{i}.attn.qkv.weight" )
lowercase_ : int = state_dict.pop(f"transformer.blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
lowercase_ : Dict = in_proj_weight[
: config.hidden_size, :
]
lowercase_ : List[str] = in_proj_bias[: config.hidden_size]
lowercase_ : int = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowercase_ : Optional[int] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowercase_ : Tuple = in_proj_weight[
-config.hidden_size :, :
]
lowercase_ : Dict = in_proj_bias[-config.hidden_size :]
def _UpperCAmelCase ( a : List[str] ) -> Optional[int]:
"""simple docstring"""
lowercase_ : Union[str, Any] = ['head.weight', 'head.bias']
for k in ignore_keys:
state_dict.pop(a , a )
def _UpperCAmelCase ( a : Optional[Any] , a : Tuple , a : Union[str, Any] ) -> Tuple:
"""simple docstring"""
lowercase_ : List[Any] = dct.pop(a )
lowercase_ : Dict = val
@torch.no_grad()
def _UpperCAmelCase ( a : List[Any] , a : List[Any] ) -> Optional[Any]:
"""simple docstring"""
lowercase_ : str = ViltConfig(image_size=3_8_4 , patch_size=3_2 , tie_word_embeddings=a )
lowercase_ : int = False
lowercase_ : Union[str, Any] = False
lowercase_ : List[str] = False
lowercase_ : str = False
if "vqa" in checkpoint_url:
lowercase_ : str = True
lowercase_ : Optional[int] = 3_1_2_9
lowercase_ : Any = 'huggingface/label-files'
lowercase_ : Optional[Any] = 'vqa2-id2label.json'
lowercase_ : int = json.load(open(hf_hub_download(a , a , repo_type='dataset' ) , 'r' ) )
lowercase_ : Optional[int] = {int(a ): v for k, v in idalabel.items()}
lowercase_ : List[Any] = idalabel
lowercase_ : str = {v: k for k, v in idalabel.items()}
lowercase_ : List[Any] = ViltForQuestionAnswering(a )
elif "nlvr" in checkpoint_url:
lowercase_ : Dict = True
lowercase_ : List[str] = 2
lowercase_ : Tuple = {0: 'False', 1: 'True'}
lowercase_ : Optional[int] = {v: k for k, v in config.idalabel.items()}
lowercase_ : int = 3
lowercase_ : Any = ViltForImagesAndTextClassification(a )
elif "irtr" in checkpoint_url:
lowercase_ : Union[str, Any] = True
lowercase_ : Dict = ViltForImageAndTextRetrieval(a )
elif "mlm_itm" in checkpoint_url:
lowercase_ : int = True
lowercase_ : Tuple = ViltForMaskedLM(a )
else:
raise ValueError('Unknown model type' )
# load state_dict of original model, remove and rename some keys
lowercase_ : List[Any] = torch.hub.load_state_dict_from_url(a , map_location='cpu' )['state_dict']
lowercase_ : Union[str, Any] = create_rename_keys(a , a , a , a )
for src, dest in rename_keys:
rename_key(a , a , a )
read_in_q_k_v(a , a )
if mlm_model or irtr_model:
lowercase_ : str = ['itm_score.fc.weight', 'itm_score.fc.bias']
for k in ignore_keys:
state_dict.pop(a , a )
# load state dict into HuggingFace model
model.eval()
if mlm_model:
lowercase_ , lowercase_ : Dict = model.load_state_dict(a , strict=a )
assert missing_keys == ["mlm_score.decoder.bias"]
else:
model.load_state_dict(a )
# Define processor
lowercase_ : Optional[int] = ViltImageProcessor(size=3_8_4 )
lowercase_ : Optional[int] = BertTokenizer.from_pretrained('bert-base-uncased' )
lowercase_ : Any = ViltProcessor(a , a )
# Forward pass on example inputs (image + text)
if nlvr_model:
lowercase_ : Union[str, Any] = Image.open(requests.get('https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg' , stream=a ).raw )
lowercase_ : Optional[Any] = Image.open(requests.get('https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg' , stream=a ).raw )
lowercase_ : Any = (
'The left image contains twice the number of dogs as the right image, and at least two dogs in total are'
' standing.'
)
lowercase_ : Union[str, Any] = processor(a , a , return_tensors='pt' )
lowercase_ : List[str] = processor(a , a , return_tensors='pt' )
lowercase_ : Union[str, Any] = model(
input_ids=encoding_a.input_ids , pixel_values=encoding_a.pixel_values , pixel_values_a=encoding_a.pixel_values , )
else:
lowercase_ : List[str] = Image.open(requests.get('http://images.cocodataset.org/val2017/000000039769.jpg' , stream=a ).raw )
if mlm_model:
lowercase_ : Dict = 'a bunch of [MASK] laying on a [MASK].'
else:
lowercase_ : List[Any] = 'How many cats are there?'
lowercase_ : List[Any] = processor(a , a , return_tensors='pt' )
lowercase_ : Optional[int] = model(**a )
# Verify outputs
if mlm_model:
lowercase_ : Union[str, Any] = torch.Size([1, 1_1, 3_0_5_2_2] )
lowercase_ : Optional[Any] = torch.tensor([-12.50_61, -12.51_23, -12.51_74] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , a , atol=1e-4 )
# verify masked token prediction equals "cats"
lowercase_ : int = outputs.logits[0, 4, :].argmax(-1 ).item()
assert tokenizer.decode([predicted_id] ) == "cats"
elif vqa_model:
lowercase_ : Optional[Any] = torch.Size([1, 3_1_2_9] )
lowercase_ : Tuple = torch.tensor([-15.94_95, -18.14_72, -10.30_41] )
assert torch.allclose(outputs.logits[0, :3] , a , atol=1e-4 )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , a , atol=1e-4 )
# verify vqa prediction equals "2"
lowercase_ : Union[str, Any] = outputs.logits.argmax(-1 ).item()
assert model.config.idalabel[predicted_idx] == "2"
elif nlvr_model:
lowercase_ : Optional[Any] = torch.Size([1, 2] )
lowercase_ : Optional[Any] = torch.tensor([-2.87_21, 2.12_91] )
assert torch.allclose(outputs.logits[0, :3] , a , atol=1e-4 )
assert outputs.logits.shape == expected_shape
Path(a ).mkdir(exist_ok=a )
print(f"Saving model and processor to {pytorch_dump_folder_path}" )
model.save_pretrained(a )
processor.save_pretrained(a )
if __name__ == "__main__":
A: Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://github.com/dandelin/ViLT/releases/download/200k/vilt_200k_mlm_itm.ckpt",
type=str,
help="URL of the checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
A: Union[str, Any] = parser.parse_args()
convert_vilt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 7 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__a: int = {
"configuration_trajectory_transformer": [
"TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"TrajectoryTransformerConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a: Union[str, Any] = [
"TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TrajectoryTransformerModel",
"TrajectoryTransformerPreTrainedModel",
"load_tf_weights_in_trajectory_transformer",
]
if TYPE_CHECKING:
from .configuration_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TrajectoryTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TrajectoryTransformerModel,
TrajectoryTransformerPreTrainedModel,
load_tf_weights_in_trajectory_transformer,
)
else:
import sys
__a: int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 710 |
'''simple docstring'''
def _UpperCAmelCase ( a : list ) -> list:
"""simple docstring"""
for i in range(len(a ) - 1 , 0 , -1 ):
lowercase_ : Any = False
for j in range(a , 0 , -1 ):
if unsorted[j] < unsorted[j - 1]:
lowercase_ , lowercase_ : Any = unsorted[j - 1], unsorted[j]
lowercase_ : int = True
for j in range(a ):
if unsorted[j] > unsorted[j + 1]:
lowercase_ , lowercase_ : Union[str, Any] = unsorted[j + 1], unsorted[j]
lowercase_ : Optional[Any] = True
if not swapped:
break
return unsorted
if __name__ == "__main__":
import doctest
doctest.testmod()
A: Union[str, Any] = input("Enter numbers separated by a comma:\n").strip()
A: Tuple = [int(item) for item in user_input.split(",")]
print(f"""{cocktail_shaker_sort(unsorted) = }""")
| 7 | 0 |
'''simple docstring'''
import doctest
import glob
import importlib
import inspect
import os
import re
from contextlib import contextmanager
from functools import wraps
from unittest.mock import patch
import numpy as np
import pytest
from absl.testing import parameterized
import datasets
from datasets import load_metric
from .utils import for_all_test_methods, local, slow
# mark all tests as integration
A: int = pytest.mark.integration
A: List[Any] = {"comet"}
A: Dict = importlib.util.find_spec("fairseq") is not None
A: Dict = {"code_eval"}
A: str = os.name == "nt"
A: Optional[int] = {"bertscore", "frugalscore", "perplexity"}
A: Optional[Any] = importlib.util.find_spec("transformers") is not None
def _UpperCAmelCase ( a : Tuple ) -> Tuple:
"""simple docstring"""
@wraps(a )
def wrapper(self : Optional[Any] , a : Any ):
if not _has_fairseq and metric_name in REQUIRE_FAIRSEQ:
self.skipTest('"test requires Fairseq"' )
else:
test_case(self , a )
return wrapper
def _UpperCAmelCase ( a : int ) -> Dict:
"""simple docstring"""
@wraps(a )
def wrapper(self : Dict , a : List[Any] ):
if not _has_transformers and metric_name in REQUIRE_TRANSFORMERS:
self.skipTest('"test requires transformers"' )
else:
test_case(self , a )
return wrapper
def _UpperCAmelCase ( a : Any ) -> Tuple:
"""simple docstring"""
@wraps(a )
def wrapper(self : Optional[int] , a : str ):
if _on_windows and metric_name in UNSUPPORTED_ON_WINDOWS:
self.skipTest('"test not supported on Windows"' )
else:
test_case(self , a )
return wrapper
def _UpperCAmelCase ( ) -> List[str]:
"""simple docstring"""
lowercase_ : Union[str, Any] = [metric_dir.split(os.sep )[-2] for metric_dir in glob.glob('./metrics/*/' )]
return [{"testcase_name": x, "metric_name": x} for x in metrics if x != "gleu"] # gleu is unfinished
@parameterized.named_parameters(get_local_metric_names() )
@for_all_test_methods(
UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_ )
@local
class __magic_name__ ( parameterized.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = {}
SCREAMING_SNAKE_CASE_ : Tuple = None
@pytest.mark.filterwarnings('ignore:metric_module_factory is deprecated:FutureWarning' )
@pytest.mark.filterwarnings('ignore:load_metric is deprecated:FutureWarning' )
def lowerCamelCase__ ( self , _lowercase ) -> Optional[int]:
lowercase_ : str = '[...]'
lowercase_ : int = importlib.import_module(
datasets.load.metric_module_factory(os.path.join('metrics' , _lowercase ) ).module_path )
lowercase_ : int = datasets.load.import_main_class(metric_module.__name__ , dataset=_lowercase )
# check parameters
lowercase_ : str = inspect.signature(metric._compute ).parameters
self.assertTrue(all(p.kind != p.VAR_KEYWORD for p in parameters.values() ) ) # no **kwargs
# run doctest
with self.patch_intensive_calls(_lowercase , metric_module.__name__ ):
with self.use_local_metrics():
try:
lowercase_ : Optional[Any] = doctest.testmod(_lowercase , verbose=_lowercase , raise_on_error=_lowercase )
except doctest.UnexpectedException as e:
raise e.exc_info[1] # raise the exception that doctest caught
self.assertEqual(results.failed , 0 )
self.assertGreater(results.attempted , 1 )
@slow
def lowerCamelCase__ ( self , _lowercase ) -> Any:
lowercase_ : List[str] = '[...]'
lowercase_ : Dict = importlib.import_module(
datasets.load.metric_module_factory(os.path.join('metrics' , _lowercase ) ).module_path )
# run doctest
with self.use_local_metrics():
lowercase_ : Optional[int] = doctest.testmod(_lowercase , verbose=_lowercase , raise_on_error=_lowercase )
self.assertEqual(results.failed , 0 )
self.assertGreater(results.attempted , 1 )
@contextmanager
def lowerCamelCase__ ( self , _lowercase , _lowercase ) -> Tuple:
if metric_name in self.INTENSIVE_CALLS_PATCHER:
with self.INTENSIVE_CALLS_PATCHER[metric_name](_lowercase ):
yield
else:
yield
@contextmanager
def lowerCamelCase__ ( self ) -> int:
def load_local_metric(_lowercase , *_lowercase , **_lowercase ):
return load_metric(os.path.join('metrics' , _lowercase ) , *_lowercase , **_lowercase )
with patch('datasets.load_metric' ) as mock_load_metric:
lowercase_ : Dict = load_local_metric
yield
@classmethod
def lowerCamelCase__ ( cls , _lowercase ) -> Dict:
def wrapper(_lowercase ):
lowercase_ : Optional[int] = contextmanager(_lowercase )
lowercase_ : int = patcher
return patcher
return wrapper
@LocalMetricTest.register_intensive_calls_patcher('bleurt' )
def _UpperCAmelCase ( a : Dict ) -> Any:
"""simple docstring"""
import tensorflow.compat.va as tf
from bleurt.score import Predictor
tf.flags.DEFINE_string('sv' , '' , '' ) # handle pytest cli flags
class __magic_name__ ( UpperCAmelCase_ ):
"""simple docstring"""
def lowerCamelCase__ ( self , _lowercase ) -> Dict:
assert len(input_dict['input_ids'] ) == 2
return np.array([1.03, 1.04] )
# mock predict_fn which is supposed to do a forward pass with a bleurt model
with patch('bleurt.score._create_predictor' ) as mock_create_predictor:
lowercase_ : Dict = MockedPredictor()
yield
@LocalMetricTest.register_intensive_calls_patcher('bertscore' )
def _UpperCAmelCase ( a : Union[str, Any] ) -> Tuple:
"""simple docstring"""
import torch
def bert_cos_score_idf(a : Any , a : List[str] , *a : Tuple , **a : int ):
return torch.tensor([[1.0, 1.0, 1.0]] * len(a ) )
# mock get_model which is supposed to do download a bert model
# mock bert_cos_score_idf which is supposed to do a forward pass with a bert model
with patch('bert_score.scorer.get_model' ), patch(
'bert_score.scorer.bert_cos_score_idf' ) as mock_bert_cos_score_idf:
lowercase_ : int = bert_cos_score_idf
yield
@LocalMetricTest.register_intensive_calls_patcher('comet' )
def _UpperCAmelCase ( a : Dict ) -> Any:
"""simple docstring"""
def load_from_checkpoint(a : Any ):
class __magic_name__ :
"""simple docstring"""
def lowerCamelCase__ ( self , _lowercase , *_lowercase , **_lowercase ) -> int:
assert len(_lowercase ) == 2
lowercase_ : Dict = [0.19, 0.92]
return scores, sum(_lowercase ) / len(_lowercase )
return Model()
# mock load_from_checkpoint which is supposed to do download a bert model
# mock load_from_checkpoint which is supposed to do download a bert model
with patch('comet.download_model' ) as mock_download_model:
lowercase_ : List[Any] = None
with patch('comet.load_from_checkpoint' ) as mock_load_from_checkpoint:
lowercase_ : List[str] = load_from_checkpoint
yield
def _UpperCAmelCase ( ) -> Optional[Any]:
"""simple docstring"""
lowercase_ : List[str] = load_metric(os.path.join('metrics' , 'seqeval' ) )
lowercase_ : List[str] = 'ERROR'
lowercase_ : str = f"Scheme should be one of [IOB1, IOB2, IOE1, IOE2, IOBES, BILOU], got {wrong_scheme}"
with pytest.raises(a , match=re.escape(a ) ):
metric.compute(predictions=[] , references=[] , scheme=a )
| 711 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class __magic_name__ ( metaclass=UpperCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = ['transformers', 'torch', 'note_seq']
def __init__( self , *_lowercase , **_lowercase ) -> Dict:
requires_backends(self , ['transformers', 'torch', 'note_seq'] )
@classmethod
def lowerCamelCase__ ( cls , *_lowercase , **_lowercase ) -> List[str]:
requires_backends(cls , ['transformers', 'torch', 'note_seq'] )
@classmethod
def lowerCamelCase__ ( cls , *_lowercase , **_lowercase ) -> Dict:
requires_backends(cls , ['transformers', 'torch', 'note_seq'] )
| 7 | 0 |
def _UpperCAmelCase ( a : Optional[Any] , a : Any , a : List[str]=False ) -> str:
"""simple docstring"""
if isinstance(a , a ) and isinstance(a , a ):
lowercase_ : Dict = len(set_a.intersection(a ) )
if alternative_union:
lowercase_ : Union[str, Any] = len(a ) + len(a )
else:
lowercase_ : Optional[Any] = len(set_a.union(a ) )
return intersection / union
if isinstance(a , (list, tuple) ) and isinstance(a , (list, tuple) ):
lowercase_ : Union[str, Any] = [element for element in set_a if element in set_b]
if alternative_union:
lowercase_ : Dict = len(a ) + len(a )
return len(a ) / union
else:
lowercase_ : int = set_a + [element for element in set_b if element not in set_a]
return len(a ) / len(a )
return len(a ) / len(a )
return None
if __name__ == "__main__":
A: List[Any] = {"a", "b", "c", "d", "e"}
A: Optional[Any] = {"c", "d", "e", "f", "h", "i"}
print(jaccard_similarity(set_a, set_b))
| 712 |
'''simple docstring'''
def _UpperCAmelCase ( a : str , a : str ) -> float:
"""simple docstring"""
def get_matched_characters(a : str , a : str ) -> str:
lowercase_ : Union[str, Any] = []
lowercase_ : Tuple = min(len(_stra ) , len(_stra ) ) // 2
for i, l in enumerate(_stra ):
lowercase_ : Optional[int] = int(max(0 , i - limit ) )
lowercase_ : Union[str, Any] = int(min(i + limit + 1 , len(_stra ) ) )
if l in _stra[left:right]:
matched.append(a )
lowercase_ : Union[str, Any] = f"{_stra[0:_stra.index(a )]} {_stra[_stra.index(a ) + 1:]}"
return "".join(a )
# matching characters
lowercase_ : Union[str, Any] = get_matched_characters(a , a )
lowercase_ : Optional[Any] = get_matched_characters(a , a )
lowercase_ : Optional[int] = len(a )
# transposition
lowercase_ : Dict = (
len([(ca, ca) for ca, ca in zip(a , a ) if ca != ca] ) // 2
)
if not match_count:
lowercase_ : List[str] = 0.0
else:
lowercase_ : Any = (
1
/ 3
* (
match_count / len(a )
+ match_count / len(a )
+ (match_count - transpositions) / match_count
)
)
# common prefix up to 4 characters
lowercase_ : Optional[Any] = 0
for ca, ca in zip(stra[:4] , stra[:4] ):
if ca == ca:
prefix_len += 1
else:
break
return jaro + 0.1 * prefix_len * (1 - jaro)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(jaro_winkler("hello", "world"))
| 7 | 0 |
class __magic_name__ :
"""simple docstring"""
def __init__( self , _lowercase ) -> Union[str, Any]:
lowercase_ : Dict = n
lowercase_ : Dict = [None] * self.n
lowercase_ : Tuple = 0 # index of the first element
lowercase_ : List[Any] = 0
lowercase_ : List[Any] = 0
def __len__( self ) -> int:
return self.size
def lowerCamelCase__ ( self ) -> bool:
return self.size == 0
def lowerCamelCase__ ( self ) -> List[Any]:
return False if self.is_empty() else self.array[self.front]
def lowerCamelCase__ ( self , _lowercase ) -> Any:
if self.size >= self.n:
raise Exception('QUEUE IS FULL' )
lowercase_ : Tuple = data
lowercase_ : List[Any] = (self.rear + 1) % self.n
self.size += 1
return self
def lowerCamelCase__ ( self ) -> Any:
if self.size == 0:
raise Exception('UNDERFLOW' )
lowercase_ : Dict = self.array[self.front]
lowercase_ : Tuple = None
lowercase_ : int = (self.front + 1) % self.n
self.size -= 1
return temp
| 713 |
'''simple docstring'''
from __future__ import annotations
def _UpperCAmelCase ( a : int = 4 ) -> list[list[int]]:
"""simple docstring"""
lowercase_ : Tuple = abs(a ) or 4
return [[1 + x + y * row_size for x in range(a )] for y in range(a )]
def _UpperCAmelCase ( a : list[list[int]] ) -> list[list[int]]:
"""simple docstring"""
return reverse_row(transpose(a ) )
# OR.. transpose(reverse_column(matrix))
def _UpperCAmelCase ( a : list[list[int]] ) -> list[list[int]]:
"""simple docstring"""
return reverse_row(reverse_column(a ) )
# OR.. reverse_column(reverse_row(matrix))
def _UpperCAmelCase ( a : list[list[int]] ) -> list[list[int]]:
"""simple docstring"""
return reverse_column(transpose(a ) )
# OR.. transpose(reverse_row(matrix))
def _UpperCAmelCase ( a : list[list[int]] ) -> list[list[int]]:
"""simple docstring"""
lowercase_ : Any = [list(a ) for x in zip(*a )]
return matrix
def _UpperCAmelCase ( a : list[list[int]] ) -> list[list[int]]:
"""simple docstring"""
lowercase_ : List[str] = matrix[::-1]
return matrix
def _UpperCAmelCase ( a : list[list[int]] ) -> list[list[int]]:
"""simple docstring"""
lowercase_ : str = [x[::-1] for x in matrix]
return matrix
def _UpperCAmelCase ( a : list[list[int]] ) -> None:
"""simple docstring"""
for i in matrix:
print(*a )
if __name__ == "__main__":
A: Dict = make_matrix()
print("\norigin:\n")
print_matrix(matrix)
print("\nrotate 90 counterclockwise:\n")
print_matrix(rotate_aa(matrix))
A: List[Any] = make_matrix()
print("\norigin:\n")
print_matrix(matrix)
print("\nrotate 180:\n")
print_matrix(rotate_aaa(matrix))
A: List[str] = make_matrix()
print("\norigin:\n")
print_matrix(matrix)
print("\nrotate 270 counterclockwise:\n")
print_matrix(rotate_aaa(matrix))
| 7 | 0 |
'''simple docstring'''
import math
from numpy import inf
from scipy.integrate import quad
def _UpperCAmelCase ( a : float ) -> float:
"""simple docstring"""
if num <= 0:
raise ValueError('math domain error' )
return quad(a , 0 , a , args=(a) )[0]
def _UpperCAmelCase ( a : float , a : float ) -> float:
"""simple docstring"""
return math.pow(a , z - 1 ) * math.exp(-x )
if __name__ == "__main__":
from doctest import testmod
testmod() | 714 |
'''simple docstring'''
# Usage:
# ./gen-card-facebook-wmt19.py
import os
from pathlib import Path
def _UpperCAmelCase ( a : Dict , a : Optional[int] , a : Tuple ) -> Optional[int]:
"""simple docstring"""
lowercase_ : Any = {
'en': 'Machine learning is great, isn\'t it?',
'ru': 'Машинное обучение - это здорово, не так ли?',
'de': 'Maschinelles Lernen ist großartig, oder?',
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
lowercase_ : List[str] = {
'ru-en': ['[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)', '39.20'],
'en-ru': ['[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)', '33.47'],
'en-de': ['[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)', '42.83'],
'de-en': ['[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)', '41.35'],
}
lowercase_ : Optional[Any] = f"{src_lang}-{tgt_lang}"
lowercase_ : Optional[Any] = f"\n---\nlanguage: \n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt19\n- facebook\nlicense: apache-2.0\ndatasets:\n- wmt19\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.\n\nFor more details, please see, [Facebook FAIR's WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).\n\nThe abbreviation FSMT stands for FairSeqMachineTranslation\n\nAll four models are available:\n\n* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)\n* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)\n* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)\n* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = \"facebook/wmt19-{src_lang}-{tgt_lang}\"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = \"{texts[src_lang]}\"\ninput_ids = tokenizer.encode(input, return_tensors=\"pt\")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n- The original (and this ported model) doesn't seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)\n\n## Training data\n\nPretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).\n\n## Eval results\n\npair | fairseq | transformers\n-------|---------|----------\n{pair} | {scores[pair][0]} | {scores[pair][1]}\n\nThe score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn't support:\n- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).\n- re-ranking\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=15\nmkdir -p $DATA_DIR\nsacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\nnote: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt19/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)\n\n\n### BibTeX entry and citation info\n\n```bibtex\n@inproceedings{{...,\n year={{2020}},\n title={{Facebook FAIR's WMT19 News Translation Task Submission}},\n author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},\n booktitle={{Proc. of WMT}},\n}}\n```\n\n\n## TODO\n\n- port model ensemble (fairseq uses 4 model checkpoints)\n\n"
os.makedirs(a , exist_ok=a )
lowercase_ : int = os.path.join(a , 'README.md' )
print(f"Generating {path}" )
with open(a , 'w' , encoding='utf-8' ) as f:
f.write(a )
# make sure we are under the root of the project
A: List[str] = Path(__file__).resolve().parent.parent.parent
A: List[str] = repo_dir / "model_cards"
for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
A , A , A: Any = model_name.split("-")
A: int = model_cards_dir / "facebook" / model_name
write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
| 7 | 0 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A: Dict = logging.get_logger(__name__)
A: Optional[Any] = {
"google/vit-base-patch16-224": "https://huggingface.co/vit-base-patch16-224/resolve/main/config.json",
# See all ViT models at https://huggingface.co/models?filter=vit
}
class __magic_name__ ( UpperCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = 'vit'
def __init__( self , _lowercase=768 , _lowercase=12 , _lowercase=12 , _lowercase=3072 , _lowercase="gelu" , _lowercase=0.0 , _lowercase=0.0 , _lowercase=0.02 , _lowercase=1E-1_2 , _lowercase=224 , _lowercase=16 , _lowercase=3 , _lowercase=True , _lowercase=16 , **_lowercase , ) -> List[str]:
super().__init__(**_lowercase )
lowercase_ : Optional[int] = hidden_size
lowercase_ : str = num_hidden_layers
lowercase_ : str = num_attention_heads
lowercase_ : int = intermediate_size
lowercase_ : List[Any] = hidden_act
lowercase_ : Any = hidden_dropout_prob
lowercase_ : List[str] = attention_probs_dropout_prob
lowercase_ : str = initializer_range
lowercase_ : List[str] = layer_norm_eps
lowercase_ : Any = image_size
lowercase_ : Tuple = patch_size
lowercase_ : Optional[Any] = num_channels
lowercase_ : str = qkv_bias
lowercase_ : List[str] = encoder_stride
class __magic_name__ ( UpperCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = version.parse('1.11' )
@property
def lowerCamelCase__ ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def lowerCamelCase__ ( self ) -> float:
return 1E-4
| 715 |
'''simple docstring'''
import json
import logging
import os
import socket
import git
import numpy as np
import torch
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - PID: %(process)d - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
)
A: Tuple = logging.getLogger(__name__)
def _UpperCAmelCase ( a : str ) -> List[Any]:
"""simple docstring"""
lowercase_ : List[str] = git.Repo(search_parent_directories=a )
lowercase_ : Union[str, Any] = {
'repo_id': str(a ),
'repo_sha': str(repo.head.object.hexsha ),
'repo_branch': str(repo.active_branch ),
}
with open(os.path.join(a , 'git_log.json' ) , 'w' ) as f:
json.dump(a , a , indent=4 )
def _UpperCAmelCase ( a : str ) -> Union[str, Any]:
"""simple docstring"""
if params.n_gpu <= 0:
lowercase_ : int = 0
lowercase_ : Union[str, Any] = -1
lowercase_ : List[str] = True
lowercase_ : Optional[Any] = False
return
assert torch.cuda.is_available()
logger.info('Initializing GPUs' )
if params.n_gpu > 1:
assert params.local_rank != -1
lowercase_ : Dict = int(os.environ['WORLD_SIZE'] )
lowercase_ : Union[str, Any] = int(os.environ['N_GPU_NODE'] )
lowercase_ : Optional[int] = int(os.environ['RANK'] )
# number of nodes / node ID
lowercase_ : int = params.world_size // params.n_gpu_per_node
lowercase_ : str = params.global_rank // params.n_gpu_per_node
lowercase_ : Dict = True
assert params.n_nodes == int(os.environ['N_NODES'] )
assert params.node_id == int(os.environ['NODE_RANK'] )
# local job (single GPU)
else:
assert params.local_rank == -1
lowercase_ : str = 1
lowercase_ : Dict = 0
lowercase_ : Tuple = 0
lowercase_ : List[Any] = 0
lowercase_ : int = 1
lowercase_ : Tuple = 1
lowercase_ : str = False
# sanity checks
assert params.n_nodes >= 1
assert 0 <= params.node_id < params.n_nodes
assert 0 <= params.local_rank <= params.global_rank < params.world_size
assert params.world_size == params.n_nodes * params.n_gpu_per_node
# define whether this is the master process / if we are in multi-node distributed mode
lowercase_ : List[str] = params.node_id == 0 and params.local_rank == 0
lowercase_ : Optional[Any] = params.n_nodes > 1
# summary
lowercase_ : int = f"--- Global rank: {params.global_rank} - "
logger.info(PREFIX + 'Number of nodes: %i' % params.n_nodes )
logger.info(PREFIX + 'Node ID : %i' % params.node_id )
logger.info(PREFIX + 'Local rank : %i' % params.local_rank )
logger.info(PREFIX + 'World size : %i' % params.world_size )
logger.info(PREFIX + 'GPUs per node : %i' % params.n_gpu_per_node )
logger.info(PREFIX + 'Master : %s' % str(params.is_master ) )
logger.info(PREFIX + 'Multi-node : %s' % str(params.multi_node ) )
logger.info(PREFIX + 'Multi-GPU : %s' % str(params.multi_gpu ) )
logger.info(PREFIX + 'Hostname : %s' % socket.gethostname() )
# set GPU device
torch.cuda.set_device(params.local_rank )
# initialize multi-GPU
if params.multi_gpu:
logger.info('Initializing PyTorch distributed' )
torch.distributed.init_process_group(
init_method='env://' , backend='nccl' , )
def _UpperCAmelCase ( a : Dict ) -> Optional[int]:
"""simple docstring"""
np.random.seed(args.seed )
torch.manual_seed(args.seed )
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed )
| 7 | 0 |
'''simple docstring'''
import logging
import os
from typing import List, TextIO, Union
from conllu import parse_incr
from utils_ner import InputExample, Split, TokenClassificationTask
A: str = logging.getLogger(__name__)
class __magic_name__ ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self , _lowercase=-1 ) -> Tuple:
# in NER datasets, the last column is usually reserved for NER label
lowercase_ : Optional[int] = label_idx
def lowerCamelCase__ ( self , _lowercase , _lowercase ) -> List[InputExample]:
if isinstance(_lowercase , _lowercase ):
lowercase_ : Union[str, Any] = mode.value
lowercase_ : Tuple = os.path.join(_lowercase , f"{mode}.txt" )
lowercase_ : Any = 1
lowercase_ : str = []
with open(_lowercase , encoding='utf-8' ) as f:
lowercase_ : List[str] = []
lowercase_ : int = []
for line in f:
if line.startswith('-DOCSTART-' ) or line == "" or line == "\n":
if words:
examples.append(InputExample(guid=f"{mode}-{guid_index}" , words=_lowercase , labels=_lowercase ) )
guid_index += 1
lowercase_ : Optional[Any] = []
lowercase_ : int = []
else:
lowercase_ : Dict = line.split(' ' )
words.append(splits[0] )
if len(_lowercase ) > 1:
labels.append(splits[self.label_idx].replace('\n' , '' ) )
else:
# Examples could have no label for mode = "test"
labels.append('O' )
if words:
examples.append(InputExample(guid=f"{mode}-{guid_index}" , words=_lowercase , labels=_lowercase ) )
return examples
def lowerCamelCase__ ( self , _lowercase , _lowercase , _lowercase ) -> List[str]:
lowercase_ : Tuple = 0
for line in test_input_reader:
if line.startswith('-DOCSTART-' ) or line == "" or line == "\n":
writer.write(_lowercase )
if not preds_list[example_id]:
example_id += 1
elif preds_list[example_id]:
lowercase_ : str = line.split()[0] + ' ' + preds_list[example_id].pop(0 ) + '\n'
writer.write(_lowercase )
else:
logger.warning('Maximum sequence length exceeded: No prediction for \'%s\'.' , line.split()[0] )
def lowerCamelCase__ ( self , _lowercase ) -> List[str]:
if path:
with open(_lowercase , 'r' ) as f:
lowercase_ : Union[str, Any] = f.read().splitlines()
if "O" not in labels:
lowercase_ : Tuple = ['O'] + labels
return labels
else:
return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"]
class __magic_name__ ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self ) -> Tuple:
# in CONLL2003 dataset chunk column is second-to-last
super().__init__(label_idx=-2 )
def lowerCamelCase__ ( self , _lowercase ) -> List[str]:
if path:
with open(_lowercase , 'r' ) as f:
lowercase_ : Tuple = f.read().splitlines()
if "O" not in labels:
lowercase_ : str = ['O'] + labels
return labels
else:
return [
"O",
"B-ADVP",
"B-INTJ",
"B-LST",
"B-PRT",
"B-NP",
"B-SBAR",
"B-VP",
"B-ADJP",
"B-CONJP",
"B-PP",
"I-ADVP",
"I-INTJ",
"I-LST",
"I-PRT",
"I-NP",
"I-SBAR",
"I-VP",
"I-ADJP",
"I-CONJP",
"I-PP",
]
class __magic_name__ ( UpperCAmelCase_ ):
"""simple docstring"""
def lowerCamelCase__ ( self , _lowercase , _lowercase ) -> List[InputExample]:
if isinstance(_lowercase , _lowercase ):
lowercase_ : Optional[int] = mode.value
lowercase_ : Any = os.path.join(_lowercase , f"{mode}.txt" )
lowercase_ : Dict = 1
lowercase_ : Dict = []
with open(_lowercase , encoding='utf-8' ) as f:
for sentence in parse_incr(_lowercase ):
lowercase_ : List[Any] = []
lowercase_ : Union[str, Any] = []
for token in sentence:
words.append(token['form'] )
labels.append(token['upos'] )
assert len(_lowercase ) == len(_lowercase )
if words:
examples.append(InputExample(guid=f"{mode}-{guid_index}" , words=_lowercase , labels=_lowercase ) )
guid_index += 1
return examples
def lowerCamelCase__ ( self , _lowercase , _lowercase , _lowercase ) -> Optional[int]:
lowercase_ : Optional[int] = 0
for sentence in parse_incr(_lowercase ):
lowercase_ : Optional[int] = preds_list[example_id]
lowercase_ : Tuple = ''
for token in sentence:
out += f"{token['form']} ({token['upos']}|{s_p.pop(0 )}) "
out += "\n"
writer.write(_lowercase )
example_id += 1
def lowerCamelCase__ ( self , _lowercase ) -> List[str]:
if path:
with open(_lowercase , 'r' ) as f:
return f.read().splitlines()
else:
return [
"ADJ",
"ADP",
"ADV",
"AUX",
"CCONJ",
"DET",
"INTJ",
"NOUN",
"NUM",
"PART",
"PRON",
"PROPN",
"PUNCT",
"SCONJ",
"SYM",
"VERB",
"X",
]
| 716 |
'''simple docstring'''
import os
from distutils.util import strtobool
def _UpperCAmelCase ( a : Any , a : int ) -> Any:
"""simple docstring"""
for e in env_keys:
lowercase_ : Optional[Any] = int(os.environ.get(a , -1 ) )
if val >= 0:
return val
return default
def _UpperCAmelCase ( a : List[Any] , a : Dict=False ) -> Optional[Any]:
"""simple docstring"""
lowercase_ : Optional[int] = os.environ.get(a , str(a ) )
return strtobool(a ) == 1 # As its name indicates `strtobool` actually returns an int...
def _UpperCAmelCase ( a : List[Any] , a : Dict="no" ) -> str:
"""simple docstring"""
lowercase_ : List[Any] = os.environ.get(a , str(a ) )
return value
| 7 | 0 |
'''simple docstring'''
class __magic_name__ :
"""simple docstring"""
def __init__( self , _lowercase , _lowercase , _lowercase ) -> Union[str, Any]:
lowercase_ : str = None
lowercase_ : Dict = None
lowercase_ : str = graph
self._normalize_graph(_lowercase , _lowercase )
lowercase_ : List[str] = len(_lowercase )
lowercase_ : Optional[Any] = None
def lowerCamelCase__ ( self , _lowercase , _lowercase ) -> List[str]:
if sources is int:
lowercase_ : Tuple = [sources]
if sinks is int:
lowercase_ : Union[str, Any] = [sinks]
if len(_lowercase ) == 0 or len(_lowercase ) == 0:
return
lowercase_ : Dict = sources[0]
lowercase_ : Union[str, Any] = sinks[0]
# make fake vertex if there are more
# than one source or sink
if len(_lowercase ) > 1 or len(_lowercase ) > 1:
lowercase_ : str = 0
for i in sources:
max_input_flow += sum(self.graph[i] )
lowercase_ : List[Any] = len(self.graph ) + 1
for room in self.graph:
room.insert(0 , 0 )
self.graph.insert(0 , [0] * size )
for i in sources:
lowercase_ : List[Any] = max_input_flow
lowercase_ : List[Any] = 0
lowercase_ : Any = len(self.graph ) + 1
for room in self.graph:
room.append(0 )
self.graph.append([0] * size )
for i in sinks:
lowercase_ : str = max_input_flow
lowercase_ : str = size - 1
def lowerCamelCase__ ( self ) -> int:
if self.maximum_flow_algorithm is None:
raise Exception('You need to set maximum flow algorithm before.' )
if self.source_index is None or self.sink_index is None:
return 0
self.maximum_flow_algorithm.execute()
return self.maximum_flow_algorithm.getMaximumFlow()
def lowerCamelCase__ ( self , _lowercase ) -> Optional[Any]:
lowercase_ : Union[str, Any] = algorithm(self )
class __magic_name__ :
"""simple docstring"""
def __init__( self , _lowercase ) -> Dict:
lowercase_ : List[Any] = flow_network
lowercase_ : Tuple = flow_network.verticesCount
lowercase_ : str = flow_network.sourceIndex
lowercase_ : Optional[int] = flow_network.sinkIndex
# it's just a reference, so you shouldn't change
# it in your algorithms, use deep copy before doing that
lowercase_ : Dict = flow_network.graph
lowercase_ : Optional[int] = False
def lowerCamelCase__ ( self ) -> Optional[int]:
if not self.executed:
self._algorithm()
lowercase_ : str = True
def lowerCamelCase__ ( self ) -> Any:
pass
class __magic_name__ ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self , _lowercase ) -> int:
super().__init__(_lowercase )
# use this to save your result
lowercase_ : List[Any] = -1
def lowerCamelCase__ ( self ) -> Dict:
if not self.executed:
raise Exception('You should execute algorithm before using its result!' )
return self.maximum_flow
class __magic_name__ ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self , _lowercase ) -> List[Any]:
super().__init__(_lowercase )
lowercase_ : Union[str, Any] = [[0] * self.verticies_count for i in range(self.verticies_count )]
lowercase_ : Any = [0] * self.verticies_count
lowercase_ : List[Any] = [0] * self.verticies_count
def lowerCamelCase__ ( self ) -> List[Any]:
lowercase_ : Dict = self.verticies_count
# push some substance to graph
for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ):
self.preflow[self.source_index][nextvertex_index] += bandwidth
self.preflow[nextvertex_index][self.source_index] -= bandwidth
self.excesses[nextvertex_index] += bandwidth
# Relabel-to-front selection rule
lowercase_ : str = [
i
for i in range(self.verticies_count )
if i != self.source_index and i != self.sink_index
]
# move through list
lowercase_ : List[Any] = 0
while i < len(_lowercase ):
lowercase_ : Any = vertices_list[i]
lowercase_ : Optional[Any] = self.heights[vertex_index]
self.process_vertex(_lowercase )
if self.heights[vertex_index] > previous_height:
# if it was relabeled, swap elements
# and start from 0 index
vertices_list.insert(0 , vertices_list.pop(_lowercase ) )
lowercase_ : Union[str, Any] = 0
else:
i += 1
lowercase_ : str = sum(self.preflow[self.source_index] )
def lowerCamelCase__ ( self , _lowercase ) -> List[Any]:
while self.excesses[vertex_index] > 0:
for neighbour_index in range(self.verticies_count ):
# if it's neighbour and current vertex is higher
if (
self.graph[vertex_index][neighbour_index]
- self.preflow[vertex_index][neighbour_index]
> 0
and self.heights[vertex_index] > self.heights[neighbour_index]
):
self.push(_lowercase , _lowercase )
self.relabel(_lowercase )
def lowerCamelCase__ ( self , _lowercase , _lowercase ) -> List[Any]:
lowercase_ : List[str] = min(
self.excesses[from_index] , self.graph[from_index][to_index] - self.preflow[from_index][to_index] , )
self.preflow[from_index][to_index] += preflow_delta
self.preflow[to_index][from_index] -= preflow_delta
self.excesses[from_index] -= preflow_delta
self.excesses[to_index] += preflow_delta
def lowerCamelCase__ ( self , _lowercase ) -> Optional[int]:
lowercase_ : Union[str, Any] = None
for to_index in range(self.verticies_count ):
if (
self.graph[vertex_index][to_index]
- self.preflow[vertex_index][to_index]
> 0
) and (min_height is None or self.heights[to_index] < min_height):
lowercase_ : Tuple = self.heights[to_index]
if min_height is not None:
lowercase_ : Union[str, Any] = min_height + 1
if __name__ == "__main__":
A: str = [0]
A: Dict = [3]
# graph = [
# [0, 0, 4, 6, 0, 0],
# [0, 0, 5, 2, 0, 0],
# [0, 0, 0, 0, 4, 4],
# [0, 0, 0, 0, 6, 6],
# [0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0],
# ]
A: str = [[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]]
# prepare our network
A: Any = FlowNetwork(graph, entrances, exits)
# set algorithm
flow_network.set_maximum_flow_algorithm(PushRelabelExecutor)
# and calculate
A: Tuple = flow_network.find_maximum_flow()
print(f"""maximum flow is {maximum_flow}""")
| 717 |
'''simple docstring'''
from typing import Dict, Optional
import numpy as np
import datasets
A: int = "\nIoU is the area of overlap between the predicted segmentation and the ground truth divided by the area of union\nbetween the predicted segmentation and the ground truth. For binary (two classes) or multi-class segmentation,\nthe mean IoU of the image is calculated by taking the IoU of each class and averaging them.\n"
A: List[str] = "\nArgs:\n predictions (`List[ndarray]`):\n List of predicted segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n references (`List[ndarray]`):\n List of ground truth segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n num_labels (`int`):\n Number of classes (categories).\n ignore_index (`int`):\n Index that will be ignored during evaluation.\n nan_to_num (`int`, *optional*):\n If specified, NaN values will be replaced by the number defined by the user.\n label_map (`dict`, *optional*):\n If specified, dictionary mapping old label indices to new label indices.\n reduce_labels (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background,\n and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by 255.\n\nReturns:\n `Dict[str, float | ndarray]` comprising various elements:\n - *mean_iou* (`float`):\n Mean Intersection-over-Union (IoU averaged over all categories).\n - *mean_accuracy* (`float`):\n Mean accuracy (averaged over all categories).\n - *overall_accuracy* (`float`):\n Overall accuracy on all images.\n - *per_category_accuracy* (`ndarray` of shape `(num_labels,)`):\n Per category accuracy.\n - *per_category_iou* (`ndarray` of shape `(num_labels,)`):\n Per category IoU.\n\nExamples:\n\n >>> import numpy as np\n\n >>> mean_iou = datasets.load_metric(\"mean_iou\")\n\n >>> # suppose one has 3 different segmentation maps predicted\n >>> predicted_1 = np.array([[1, 2], [3, 4], [5, 255]])\n >>> actual_1 = np.array([[0, 3], [5, 4], [6, 255]])\n\n >>> predicted_2 = np.array([[2, 7], [9, 2], [3, 6]])\n >>> actual_2 = np.array([[1, 7], [9, 2], [3, 6]])\n\n >>> predicted_3 = np.array([[2, 2, 3], [8, 2, 4], [3, 255, 2]])\n >>> actual_3 = np.array([[1, 2, 2], [8, 2, 1], [3, 255, 1]])\n\n >>> predicted = [predicted_1, predicted_2, predicted_3]\n >>> ground_truth = [actual_1, actual_2, actual_3]\n\n >>> results = mean_iou.compute(predictions=predicted, references=ground_truth, num_labels=10, ignore_index=255, reduce_labels=False)\n >>> print(results) # doctest: +NORMALIZE_WHITESPACE\n {'mean_iou': 0.47750000000000004, 'mean_accuracy': 0.5916666666666666, 'overall_accuracy': 0.5263157894736842, 'per_category_iou': array([0. , 0. , 0.375, 0.4 , 0.5 , 0. , 0.5 , 1. , 1. , 1. ]), 'per_category_accuracy': array([0. , 0. , 0.75 , 0.66666667, 1. , 0. , 0.5 , 1. , 1. , 1. ])}\n"
A: Union[str, Any] = "\\n@software{MMSegmentation_Contributors_OpenMMLab_Semantic_Segmentation_2020,\nauthor = {{MMSegmentation Contributors}},\nlicense = {Apache-2.0},\nmonth = {7},\ntitle = {{OpenMMLab Semantic Segmentation Toolbox and Benchmark}},\nurl = {https://github.com/open-mmlab/mmsegmentation},\nyear = {2020}\n}"
def _UpperCAmelCase ( a : str , a : Union[str, Any] , a : Dict , a : bool , a : Optional[Dict[int, int]] = None , a : bool = False , ) -> str:
"""simple docstring"""
if label_map is not None:
for old_id, new_id in label_map.items():
lowercase_ : Union[str, Any] = new_id
# turn into Numpy arrays
lowercase_ : List[Any] = np.array(a )
lowercase_ : Optional[Any] = np.array(a )
if reduce_labels:
lowercase_ : Any = 2_5_5
lowercase_ : Dict = label - 1
lowercase_ : List[Any] = 2_5_5
lowercase_ : Any = label != ignore_index
lowercase_ : List[Any] = np.not_equal(a , a )
lowercase_ : Optional[int] = pred_label[mask]
lowercase_ : Union[str, Any] = np.array(a )[mask]
lowercase_ : Optional[int] = pred_label[pred_label == label]
lowercase_ : Optional[int] = np.histogram(a , bins=a , range=(0, num_labels - 1) )[0]
lowercase_ : Optional[int] = np.histogram(a , bins=a , range=(0, num_labels - 1) )[0]
lowercase_ : Dict = np.histogram(a , bins=a , range=(0, num_labels - 1) )[0]
lowercase_ : Optional[Any] = area_pred_label + area_label - area_intersect
return area_intersect, area_union, area_pred_label, area_label
def _UpperCAmelCase ( a : int , a : Optional[Any] , a : Optional[int] , a : bool , a : Optional[Dict[int, int]] = None , a : bool = False , ) -> Dict:
"""simple docstring"""
lowercase_ : Dict = np.zeros((num_labels,) , dtype=np.floataa )
lowercase_ : List[str] = np.zeros((num_labels,) , dtype=np.floataa )
lowercase_ : List[Any] = np.zeros((num_labels,) , dtype=np.floataa )
lowercase_ : str = np.zeros((num_labels,) , dtype=np.floataa )
for result, gt_seg_map in zip(a , a ):
lowercase_ , lowercase_ , lowercase_ , lowercase_ : Tuple = intersect_and_union(
a , a , a , a , a , a )
total_area_intersect += area_intersect
total_area_union += area_union
total_area_pred_label += area_pred_label
total_area_label += area_label
return total_area_intersect, total_area_union, total_area_pred_label, total_area_label
def _UpperCAmelCase ( a : Optional[Any] , a : List[str] , a : Optional[Any] , a : bool , a : Optional[int] = None , a : Optional[Dict[int, int]] = None , a : bool = False , ) -> Optional[int]:
"""simple docstring"""
lowercase_ , lowercase_ , lowercase_ , lowercase_ : Optional[Any] = total_intersect_and_union(
a , a , a , a , a , a )
# compute metrics
lowercase_ : str = {}
lowercase_ : str = total_area_intersect.sum() / total_area_label.sum()
lowercase_ : Optional[Any] = total_area_intersect / total_area_union
lowercase_ : List[Any] = total_area_intersect / total_area_label
lowercase_ : Any = np.nanmean(a )
lowercase_ : Optional[Any] = np.nanmean(a )
lowercase_ : int = all_acc
lowercase_ : Union[str, Any] = iou
lowercase_ : Optional[Any] = acc
if nan_to_num is not None:
lowercase_ : Optional[int] = {metric: np.nan_to_num(a , nan=a ) for metric, metric_value in metrics.items()}
return metrics
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class __magic_name__ ( datasets.Metric ):
"""simple docstring"""
def lowerCamelCase__ ( self ) -> int:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
# 1st Seq - height dim, 2nd - width dim
{
'predictions': datasets.Sequence(datasets.Sequence(datasets.Value('uint16' ) ) ),
'references': datasets.Sequence(datasets.Sequence(datasets.Value('uint16' ) ) ),
} ) , reference_urls=[
'https://github.com/open-mmlab/mmsegmentation/blob/71c201b1813267d78764f306a297ca717827c4bf/mmseg/core/evaluation/metrics.py'
] , )
def lowerCamelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase = None , _lowercase = None , _lowercase = False , ) -> Tuple:
lowercase_ : Optional[int] = mean_iou(
results=_lowercase , gt_seg_maps=_lowercase , num_labels=_lowercase , ignore_index=_lowercase , nan_to_num=_lowercase , label_map=_lowercase , reduce_labels=_lowercase , )
return iou_result
| 7 | 0 |
'''simple docstring'''
import os
import tempfile
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from torch import nn
from transformers import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_inverse_sqrt_schedule,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
def _UpperCAmelCase ( a : Tuple , a : Union[str, Any]=1_0 ) -> List[str]:
"""simple docstring"""
lowercase_ : int = []
for _ in range(a ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
return lrs
def _UpperCAmelCase ( a : List[Any] , a : Union[str, Any]=1_0 ) -> Optional[int]:
"""simple docstring"""
lowercase_ : Optional[Any] = []
for step in range(a ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
if step == num_steps // 2:
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase_ : Dict = os.path.join(a , 'schedule.bin' )
torch.save(scheduler.state_dict() , a )
lowercase_ : Tuple = torch.load(a )
scheduler.load_state_dict(a )
return lrs
@require_torch
class __magic_name__ ( unittest.TestCase ):
"""simple docstring"""
def lowerCamelCase__ ( self , _lowercase , _lowercase , _lowercase ) -> Optional[Any]:
self.assertEqual(len(_lowercase ) , len(_lowercase ) )
for a, b in zip(_lowercase , _lowercase ):
self.assertAlmostEqual(_lowercase , _lowercase , delta=_lowercase )
def lowerCamelCase__ ( self ) -> int:
lowercase_ : int = torch.tensor([0.1, -0.2, -0.1] , requires_grad=_lowercase )
lowercase_ : List[Any] = torch.tensor([0.4, 0.2, -0.5] )
lowercase_ : List[Any] = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
lowercase_ : int = AdamW(params=[w] , lr=2E-1 , weight_decay=0.0 )
for _ in range(100 ):
lowercase_ : Optional[Any] = criterion(_lowercase , _lowercase )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2 )
def lowerCamelCase__ ( self ) -> Dict:
lowercase_ : List[Any] = torch.tensor([0.1, -0.2, -0.1] , requires_grad=_lowercase )
lowercase_ : Optional[Any] = torch.tensor([0.4, 0.2, -0.5] )
lowercase_ : Dict = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
lowercase_ : Optional[Any] = Adafactor(
params=[w] , lr=1E-2 , eps=(1E-3_0, 1E-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=_lowercase , weight_decay=0.0 , relative_step=_lowercase , scale_parameter=_lowercase , warmup_init=_lowercase , )
for _ in range(1000 ):
lowercase_ : str = criterion(_lowercase , _lowercase )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2 )
@require_torch
class __magic_name__ ( unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = nn.Linear(5_0, 5_0 ) if is_torch_available() else None
SCREAMING_SNAKE_CASE_ : Optional[Any] = AdamW(m.parameters(), lr=10.0 ) if is_torch_available() else None
SCREAMING_SNAKE_CASE_ : Optional[Any] = 1_0
def lowerCamelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase=None ) -> Union[str, Any]:
self.assertEqual(len(_lowercase ) , len(_lowercase ) )
for a, b in zip(_lowercase , _lowercase ):
self.assertAlmostEqual(_lowercase , _lowercase , delta=_lowercase , msg=_lowercase )
def lowerCamelCase__ ( self ) -> int:
lowercase_ : int = {'num_warmup_steps': 2, 'num_training_steps': 10}
# schedulers doct format
# function: (sched_args_dict, expected_learning_rates)
lowercase_ : Dict = {
get_constant_schedule: ({}, [10.0] * self.num_steps),
get_constant_schedule_with_warmup: (
{'num_warmup_steps': 4},
[0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0],
),
get_linear_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25],
),
get_cosine_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38],
),
get_cosine_with_hard_restarts_schedule_with_warmup: (
{**common_kwargs, 'num_cycles': 2},
[0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46],
),
get_polynomial_decay_schedule_with_warmup: (
{**common_kwargs, 'power': 2.0, 'lr_end': 1E-7},
[0.0, 5.0, 10.0, 7.6_56, 5.6_25, 3.9_06, 2.5, 1.4_06, 0.6_25, 0.1_56],
),
get_inverse_sqrt_schedule: (
{'num_warmup_steps': 2},
[0.0, 5.0, 10.0, 8.1_65, 7.0_71, 6.3_25, 5.7_74, 5.3_45, 5.0, 4.7_14],
),
}
for scheduler_func, data in scheds.items():
lowercase_ : Union[str, Any] = data
lowercase_ : Tuple = scheduler_func(self.optimizer , **_lowercase )
self.assertEqual(len([scheduler.get_lr()[0]] ) , 1 )
lowercase_ : Union[str, Any] = unwrap_schedule(_lowercase , self.num_steps )
self.assertListAlmostEqual(
_lowercase , _lowercase , tol=1E-2 , msg=f"failed for {scheduler_func} in normal scheduler" , )
lowercase_ : int = scheduler_func(self.optimizer , **_lowercase )
if scheduler_func.__name__ != "get_constant_schedule":
LambdaScheduleWrapper.wrap_scheduler(_lowercase ) # wrap to test picklability of the schedule
lowercase_ : List[Any] = unwrap_and_save_reload_schedule(_lowercase , self.num_steps )
self.assertListEqual(_lowercase , _lowercase , msg=f"failed for {scheduler_func} in save and reload" )
class __magic_name__ :
"""simple docstring"""
def __init__( self , _lowercase ) -> int:
lowercase_ : Dict = fn
def __call__( self , *_lowercase , **_lowercase ) -> List[str]:
return self.fn(*_lowercase , **_lowercase )
@classmethod
def lowerCamelCase__ ( self , _lowercase ) -> Union[str, Any]:
lowercase_ : Union[str, Any] = list(map(self , scheduler.lr_lambdas ) )
| 718 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A: Dict = logging.get_logger(__name__)
A: Optional[Any] = {
"google/vit-base-patch16-224": "https://huggingface.co/vit-base-patch16-224/resolve/main/config.json",
# See all ViT models at https://huggingface.co/models?filter=vit
}
class __magic_name__ ( UpperCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = 'vit'
def __init__( self , _lowercase=768 , _lowercase=12 , _lowercase=12 , _lowercase=3072 , _lowercase="gelu" , _lowercase=0.0 , _lowercase=0.0 , _lowercase=0.02 , _lowercase=1E-1_2 , _lowercase=224 , _lowercase=16 , _lowercase=3 , _lowercase=True , _lowercase=16 , **_lowercase , ) -> List[str]:
super().__init__(**_lowercase )
lowercase_ : Optional[int] = hidden_size
lowercase_ : str = num_hidden_layers
lowercase_ : str = num_attention_heads
lowercase_ : int = intermediate_size
lowercase_ : List[Any] = hidden_act
lowercase_ : Any = hidden_dropout_prob
lowercase_ : List[str] = attention_probs_dropout_prob
lowercase_ : str = initializer_range
lowercase_ : List[str] = layer_norm_eps
lowercase_ : Any = image_size
lowercase_ : Tuple = patch_size
lowercase_ : Optional[Any] = num_channels
lowercase_ : str = qkv_bias
lowercase_ : List[str] = encoder_stride
class __magic_name__ ( UpperCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = version.parse('1.11' )
@property
def lowerCamelCase__ ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def lowerCamelCase__ ( self ) -> float:
return 1E-4
| 7 | 0 |
import gc
import unittest
from diffusers import FlaxStableDiffusionInpaintPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class __magic_name__ ( unittest.TestCase ):
"""simple docstring"""
def lowerCamelCase__ ( self ) -> int:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def lowerCamelCase__ ( self ) -> Any:
lowercase_ : int = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-inpaint/init_image.png' )
lowercase_ : str = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png' )
lowercase_ : Optional[Any] = 'xvjiarui/stable-diffusion-2-inpainting'
lowercase_ : List[str] = FlaxStableDiffusionInpaintPipeline.from_pretrained(_lowercase , safety_checker=_lowercase )
lowercase_ : Optional[int] = 'Face of a yellow cat, high resolution, sitting on a park bench'
lowercase_ : List[Any] = jax.random.PRNGKey(0 )
lowercase_ : Union[str, Any] = 50
lowercase_ : Any = jax.device_count()
lowercase_ : int = num_samples * [prompt]
lowercase_ : Tuple = num_samples * [init_image]
lowercase_ : Optional[Any] = num_samples * [mask_image]
lowercase_ : Union[str, Any] = pipeline.prepare_inputs(_lowercase , _lowercase , _lowercase )
# shard inputs and rng
lowercase_ : Union[str, Any] = replicate(_lowercase )
lowercase_ : Dict = jax.random.split(_lowercase , jax.device_count() )
lowercase_ : Optional[int] = shard(_lowercase )
lowercase_ : Optional[Any] = shard(_lowercase )
lowercase_ : Any = shard(_lowercase )
lowercase_ : str = pipeline(
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , jit=_lowercase )
lowercase_ : Tuple = output.images.reshape(_lowercase , 512 , 512 , 3 )
lowercase_ : Dict = images[0, 253:256, 253:256, -1]
lowercase_ : List[str] = jnp.asarray(jax.device_get(image_slice.flatten() ) )
lowercase_ : Optional[Any] = jnp.array(
[0.3_61_13_07, 0.37_64_97_36, 0.3_75_74_08, 0.38_21_39_53, 0.39_29_51_67, 0.3_84_16_31, 0.41_55_49_78, 0.4_13_74_75, 0.4_21_70_84] )
print(f"output_slice: {output_slice}" )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 719 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A: int = logging.get_logger(__name__)
A: int = {
"bigcode/gpt_bigcode-santacoder": "https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json",
}
class __magic_name__ ( UpperCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = 'gpt_bigcode'
SCREAMING_SNAKE_CASE_ : int = ['past_key_values']
SCREAMING_SNAKE_CASE_ : Any = {
'hidden_size': 'n_embd',
'max_position_embeddings': 'n_positions',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self , _lowercase=5_0257 , _lowercase=1024 , _lowercase=768 , _lowercase=12 , _lowercase=12 , _lowercase=None , _lowercase="gelu_pytorch_tanh" , _lowercase=0.1 , _lowercase=0.1 , _lowercase=0.1 , _lowercase=1E-5 , _lowercase=0.02 , _lowercase=True , _lowercase=True , _lowercase=5_0256 , _lowercase=5_0256 , _lowercase=True , _lowercase=True , _lowercase=True , **_lowercase , ) -> Any:
lowercase_ : Tuple = vocab_size
lowercase_ : str = n_positions
lowercase_ : List[str] = n_embd
lowercase_ : str = n_layer
lowercase_ : Optional[Any] = n_head
lowercase_ : Optional[int] = n_inner
lowercase_ : Union[str, Any] = activation_function
lowercase_ : Dict = resid_pdrop
lowercase_ : str = embd_pdrop
lowercase_ : Optional[Any] = attn_pdrop
lowercase_ : List[Any] = layer_norm_epsilon
lowercase_ : Optional[int] = initializer_range
lowercase_ : List[Any] = scale_attn_weights
lowercase_ : Any = use_cache
lowercase_ : List[str] = attention_softmax_in_fpaa
lowercase_ : Any = scale_attention_softmax_in_fpaa
lowercase_ : Optional[Any] = multi_query
lowercase_ : Optional[Any] = bos_token_id
lowercase_ : Optional[Any] = eos_token_id
super().__init__(bos_token_id=_lowercase , eos_token_id=_lowercase , **_lowercase )
| 7 | 0 |
'''simple docstring'''
def _UpperCAmelCase ( a : int ) -> bool:
"""simple docstring"""
return sum(i for i in range(1 , number // 2 + 1 ) if number % i == 0 ) == number
if __name__ == "__main__":
print("Program to check whether a number is a Perfect number or not...")
A: List[Any] = int(input("Enter number: ").strip())
print(f"""{number} is {'' if perfect(number) else 'not '}a Perfect Number.""")
| 720 |
'''simple docstring'''
import unittest
from diffusers.pipelines.pipeline_utils import is_safetensors_compatible
class __magic_name__ ( unittest.TestCase ):
"""simple docstring"""
def lowerCamelCase__ ( self ) -> Optional[Any]:
lowercase_ : Tuple = [
'safety_checker/pytorch_model.bin',
'safety_checker/model.safetensors',
'vae/diffusion_pytorch_model.bin',
'vae/diffusion_pytorch_model.safetensors',
'text_encoder/pytorch_model.bin',
'text_encoder/model.safetensors',
'unet/diffusion_pytorch_model.bin',
'unet/diffusion_pytorch_model.safetensors',
]
self.assertTrue(is_safetensors_compatible(_lowercase ) )
def lowerCamelCase__ ( self ) -> Dict:
lowercase_ : Tuple = [
'unet/diffusion_pytorch_model.bin',
'unet/diffusion_pytorch_model.safetensors',
]
self.assertTrue(is_safetensors_compatible(_lowercase ) )
def lowerCamelCase__ ( self ) -> str:
lowercase_ : int = [
'safety_checker/pytorch_model.bin',
'safety_checker/model.safetensors',
'vae/diffusion_pytorch_model.bin',
'vae/diffusion_pytorch_model.safetensors',
'text_encoder/pytorch_model.bin',
'text_encoder/model.safetensors',
'unet/diffusion_pytorch_model.bin',
# Removed: 'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(_lowercase ) )
def lowerCamelCase__ ( self ) -> Any:
lowercase_ : Dict = [
'text_encoder/pytorch_model.bin',
'text_encoder/model.safetensors',
]
self.assertTrue(is_safetensors_compatible(_lowercase ) )
def lowerCamelCase__ ( self ) -> Optional[Any]:
lowercase_ : List[Any] = [
'safety_checker/pytorch_model.bin',
'safety_checker/model.safetensors',
'vae/diffusion_pytorch_model.bin',
'vae/diffusion_pytorch_model.safetensors',
'text_encoder/pytorch_model.bin',
# Removed: 'text_encoder/model.safetensors',
'unet/diffusion_pytorch_model.bin',
'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(_lowercase ) )
def lowerCamelCase__ ( self ) -> Optional[int]:
lowercase_ : str = [
'safety_checker/pytorch_model.fp16.bin',
'safety_checker/model.fp16.safetensors',
'vae/diffusion_pytorch_model.fp16.bin',
'vae/diffusion_pytorch_model.fp16.safetensors',
'text_encoder/pytorch_model.fp16.bin',
'text_encoder/model.fp16.safetensors',
'unet/diffusion_pytorch_model.fp16.bin',
'unet/diffusion_pytorch_model.fp16.safetensors',
]
lowercase_ : Union[str, Any] = 'fp16'
self.assertTrue(is_safetensors_compatible(_lowercase , variant=_lowercase ) )
def lowerCamelCase__ ( self ) -> int:
lowercase_ : Optional[int] = [
'unet/diffusion_pytorch_model.fp16.bin',
'unet/diffusion_pytorch_model.fp16.safetensors',
]
lowercase_ : Dict = 'fp16'
self.assertTrue(is_safetensors_compatible(_lowercase , variant=_lowercase ) )
def lowerCamelCase__ ( self ) -> int:
# pass variant but use the non-variant filenames
lowercase_ : Optional[int] = [
'unet/diffusion_pytorch_model.bin',
'unet/diffusion_pytorch_model.safetensors',
]
lowercase_ : Dict = 'fp16'
self.assertTrue(is_safetensors_compatible(_lowercase , variant=_lowercase ) )
def lowerCamelCase__ ( self ) -> Union[str, Any]:
lowercase_ : int = [
'safety_checker/pytorch_model.fp16.bin',
'safety_checker/model.fp16.safetensors',
'vae/diffusion_pytorch_model.fp16.bin',
'vae/diffusion_pytorch_model.fp16.safetensors',
'text_encoder/pytorch_model.fp16.bin',
'text_encoder/model.fp16.safetensors',
'unet/diffusion_pytorch_model.fp16.bin',
# Removed: 'unet/diffusion_pytorch_model.fp16.safetensors',
]
lowercase_ : Dict = 'fp16'
self.assertFalse(is_safetensors_compatible(_lowercase , variant=_lowercase ) )
def lowerCamelCase__ ( self ) -> int:
lowercase_ : str = [
'text_encoder/pytorch_model.fp16.bin',
'text_encoder/model.fp16.safetensors',
]
lowercase_ : str = 'fp16'
self.assertTrue(is_safetensors_compatible(_lowercase , variant=_lowercase ) )
def lowerCamelCase__ ( self ) -> List[str]:
# pass variant but use the non-variant filenames
lowercase_ : List[Any] = [
'text_encoder/pytorch_model.bin',
'text_encoder/model.safetensors',
]
lowercase_ : Dict = 'fp16'
self.assertTrue(is_safetensors_compatible(_lowercase , variant=_lowercase ) )
def lowerCamelCase__ ( self ) -> List[str]:
lowercase_ : Union[str, Any] = [
'safety_checker/pytorch_model.fp16.bin',
'safety_checker/model.fp16.safetensors',
'vae/diffusion_pytorch_model.fp16.bin',
'vae/diffusion_pytorch_model.fp16.safetensors',
'text_encoder/pytorch_model.fp16.bin',
# 'text_encoder/model.fp16.safetensors',
'unet/diffusion_pytorch_model.fp16.bin',
'unet/diffusion_pytorch_model.fp16.safetensors',
]
lowercase_ : Dict = 'fp16'
self.assertFalse(is_safetensors_compatible(_lowercase , variant=_lowercase ) )
| 7 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A: Dict = logging.get_logger(__name__)
A: Optional[Any] = {
"facebook/s2t-small-librispeech-asr": (
"https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/config.json"
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech_to_text
}
class __magic_name__ ( UpperCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = 'speech_to_text'
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ['past_key_values']
SCREAMING_SNAKE_CASE_ : Optional[int] = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self , _lowercase=1_0000 , _lowercase=12 , _lowercase=2048 , _lowercase=4 , _lowercase=6 , _lowercase=2048 , _lowercase=4 , _lowercase=0.0 , _lowercase=0.0 , _lowercase=True , _lowercase=True , _lowercase="relu" , _lowercase=256 , _lowercase=0.1 , _lowercase=0.0 , _lowercase=0.0 , _lowercase=0.02 , _lowercase=2 , _lowercase=True , _lowercase=1 , _lowercase=0 , _lowercase=2 , _lowercase=6000 , _lowercase=1024 , _lowercase=2 , _lowercase=(5, 5) , _lowercase=1024 , _lowercase=80 , _lowercase=1 , **_lowercase , ) -> List[str]:
lowercase_ : List[Any] = vocab_size
lowercase_ : str = d_model
lowercase_ : Optional[Any] = encoder_ffn_dim
lowercase_ : Tuple = encoder_layers
lowercase_ : int = encoder_attention_heads
lowercase_ : List[str] = decoder_ffn_dim
lowercase_ : List[str] = decoder_layers
lowercase_ : List[str] = decoder_attention_heads
lowercase_ : List[Any] = dropout
lowercase_ : List[Any] = attention_dropout
lowercase_ : List[str] = activation_dropout
lowercase_ : List[Any] = activation_function
lowercase_ : Any = init_std
lowercase_ : List[Any] = encoder_layerdrop
lowercase_ : Dict = decoder_layerdrop
lowercase_ : str = use_cache
lowercase_ : List[Any] = encoder_layers
lowercase_ : Optional[Any] = scale_embedding # scale factor will be sqrt(d_model) if True
lowercase_ : Any = max_source_positions
lowercase_ : str = max_target_positions
lowercase_ : Dict = num_conv_layers
lowercase_ : List[str] = list(_lowercase )
lowercase_ : Optional[int] = conv_channels
lowercase_ : Optional[int] = input_feat_per_channel
lowercase_ : int = input_channels
if len(self.conv_kernel_sizes ) != self.num_conv_layers:
raise ValueError(
'Configuration for convolutional module is incorrect. '
'It is required that `len(config.conv_kernel_sizes)` == `config.num_conv_layers` '
f"but is `len(config.conv_kernel_sizes) = {len(self.conv_kernel_sizes )}`, "
f"`config.num_conv_layers = {self.num_conv_layers}`." )
super().__init__(
pad_token_id=_lowercase , bos_token_id=_lowercase , eos_token_id=_lowercase , is_encoder_decoder=_lowercase , decoder_start_token_id=_lowercase , **_lowercase , )
| 721 |
'''simple docstring'''
import argparse
A: List[Any] = "docs/source/_static/js/custom.js"
def _UpperCAmelCase ( a : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
with open(a , encoding='utf-8' , newline='\n' ) as f:
lowercase_ : List[Any] = f.readlines()
lowercase_ : Dict = 0
# First let's put the right version
while not lines[index].startswith('const stableVersion =' ):
index += 1
lowercase_ : Dict = f"const stableVersion = \"v{version}\"\n"
# Then update the dictionary
while not lines[index].startswith('const versionMapping = {' ):
index += 1
# We go until the end
while not lines[index].startswith('}' ):
index += 1
# We add the new version at the end
lines[index - 1] += f" \"v{version}\": \"v{version}\",\n"
with open(a , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.writelines(a )
if __name__ == "__main__":
A: str = argparse.ArgumentParser()
parser.add_argument("--version", help="Release version.")
A: List[str] = parser.parse_args()
update_custom_js(args.version)
| 7 | 0 |
'''simple docstring'''
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import pyarrow as pa
if TYPE_CHECKING:
from .features import FeatureType
@dataclass
class __magic_name__ :
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str]
SCREAMING_SNAKE_CASE_ : Optional[str] = None
# Automatically constructed
SCREAMING_SNAKE_CASE_ : ClassVar[str] = "dict"
SCREAMING_SNAKE_CASE_ : ClassVar[Any] = None
SCREAMING_SNAKE_CASE_ : str = field(default='Translation', init=UpperCAmelCase_, repr=UpperCAmelCase_ )
def __call__( self ) -> str:
return pa.struct({lang: pa.string() for lang in sorted(self.languages )} )
def lowerCamelCase__ ( self ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
from .features import Value
return {k: Value('string' ) for k in sorted(self.languages )}
@dataclass
class __magic_name__ :
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[List] = None
SCREAMING_SNAKE_CASE_ : Optional[int] = None
SCREAMING_SNAKE_CASE_ : Optional[str] = None
# Automatically constructed
SCREAMING_SNAKE_CASE_ : ClassVar[str] = "dict"
SCREAMING_SNAKE_CASE_ : ClassVar[Any] = None
SCREAMING_SNAKE_CASE_ : str = field(default='TranslationVariableLanguages', init=UpperCAmelCase_, repr=UpperCAmelCase_ )
def lowerCamelCase__ ( self ) -> List[Any]:
lowercase_ : Union[str, Any] = sorted(set(self.languages ) ) if self.languages else None
lowercase_ : str = len(self.languages ) if self.languages else None
def __call__( self ) -> Union[str, Any]:
return pa.struct({'language': pa.list_(pa.string() ), 'translation': pa.list_(pa.string() )} )
def lowerCamelCase__ ( self , _lowercase ) -> Union[str, Any]:
lowercase_ : Union[str, Any] = set(self.languages )
if self.languages and set(_lowercase ) - lang_set:
raise ValueError(
f"Some languages in example ({', '.join(sorted(set(_lowercase ) - lang_set ) )}) are not in valid set ({', '.join(_lowercase )})." )
# Convert dictionary into tuples, splitting out cases where there are
# multiple translations for a single language.
lowercase_ : List[Any] = []
for lang, text in translation_dict.items():
if isinstance(_lowercase , _lowercase ):
translation_tuples.append((lang, text) )
else:
translation_tuples.extend([(lang, el) for el in text] )
# Ensure translations are in ascending order by language code.
lowercase_ : int = zip(*sorted(_lowercase ) )
return {"language": languages, "translation": translations}
def lowerCamelCase__ ( self ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
from .features import Sequence, Value
return {
"language": Sequence(Value('string' ) ),
"translation": Sequence(Value('string' ) ),
}
| 700 |
'''simple docstring'''
def _UpperCAmelCase ( a : list[list[float]] ) -> list[list[float]]:
"""simple docstring"""
lowercase_ : list[list[float]] = []
for data in source_data:
for i, el in enumerate(a ):
if len(a ) < i + 1:
data_lists.append([] )
data_lists[i].append(float(a ) )
return data_lists
def _UpperCAmelCase ( a : list[list[float]] , a : list[int] ) -> list[list[float]]:
"""simple docstring"""
lowercase_ : list[list[float]] = []
for dlist, weight in zip(a , a ):
lowercase_ : Tuple = min(a )
lowercase_ : Any = max(a )
lowercase_ : list[float] = []
# for weight 0 score is 1 - actual score
if weight == 0:
for item in dlist:
try:
score.append(1 - ((item - mind) / (maxd - mind)) )
except ZeroDivisionError:
score.append(1 )
elif weight == 1:
for item in dlist:
try:
score.append((item - mind) / (maxd - mind) )
except ZeroDivisionError:
score.append(0 )
# weight not 0 or 1
else:
lowercase_ : str = f"Invalid weight of {weight:f} provided"
raise ValueError(a )
score_lists.append(a )
return score_lists
def _UpperCAmelCase ( a : list[list[float]] ) -> list[float]:
"""simple docstring"""
lowercase_ : list[float] = [0 for i in range(len(score_lists[0] ) )]
for slist in score_lists:
for j, ele in enumerate(a ):
lowercase_ : List[Any] = final_scores[j] + ele
return final_scores
def _UpperCAmelCase ( a : list[list[float]] , a : list[int] ) -> list[list[float]]:
"""simple docstring"""
lowercase_ : int = get_data(a )
lowercase_ : Optional[int] = calculate_each_score(a , a )
lowercase_ : Dict = generate_final_scores(a )
# append scores to source data
for i, ele in enumerate(a ):
source_data[i].append(a )
return source_data
| 7 | 0 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class __magic_name__ ( metaclass=UpperCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = ['transformers', 'torch', 'note_seq']
def __init__( self , *_lowercase , **_lowercase ) -> Dict:
requires_backends(self , ['transformers', 'torch', 'note_seq'] )
@classmethod
def lowerCamelCase__ ( cls , *_lowercase , **_lowercase ) -> List[str]:
requires_backends(cls , ['transformers', 'torch', 'note_seq'] )
@classmethod
def lowerCamelCase__ ( cls , *_lowercase , **_lowercase ) -> Dict:
requires_backends(cls , ['transformers', 'torch', 'note_seq'] )
| 701 |
'''simple docstring'''
def _UpperCAmelCase ( a : int , a : int ) -> int:
"""simple docstring"""
while second != 0:
lowercase_ : Any = first & second
first ^= second
lowercase_ : List[str] = c << 1
return first
if __name__ == "__main__":
import doctest
doctest.testmod()
A: Union[str, Any] = int(input("Enter the first number: ").strip())
A: Union[str, Any] = int(input("Enter the second number: ").strip())
print(f"""{add(first, second) = }""")
| 7 | 0 |
'''simple docstring'''
A: Any = 8.314_462 # Unit - J mol-1 K-1
def _UpperCAmelCase ( a : float , a : float , a : float ) -> float:
"""simple docstring"""
if moles < 0 or kelvin < 0 or volume < 0:
raise ValueError('Invalid inputs. Enter positive value.' )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / volume
def _UpperCAmelCase ( a : float , a : float , a : float ) -> float:
"""simple docstring"""
if moles < 0 or kelvin < 0 or pressure < 0:
raise ValueError('Invalid inputs. Enter positive value.' )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / pressure
if __name__ == "__main__":
from doctest import testmod
testmod()
| 702 |
'''simple docstring'''
class __magic_name__ :
"""simple docstring"""
def __init__( self , _lowercase ) -> Union[str, Any]:
lowercase_ : Dict = n
lowercase_ : Dict = [None] * self.n
lowercase_ : Tuple = 0 # index of the first element
lowercase_ : List[Any] = 0
lowercase_ : List[Any] = 0
def __len__( self ) -> int:
return self.size
def lowerCamelCase__ ( self ) -> bool:
return self.size == 0
def lowerCamelCase__ ( self ) -> List[Any]:
return False if self.is_empty() else self.array[self.front]
def lowerCamelCase__ ( self , _lowercase ) -> Any:
if self.size >= self.n:
raise Exception('QUEUE IS FULL' )
lowercase_ : Tuple = data
lowercase_ : List[Any] = (self.rear + 1) % self.n
self.size += 1
return self
def lowerCamelCase__ ( self ) -> Any:
if self.size == 0:
raise Exception('UNDERFLOW' )
lowercase_ : Dict = self.array[self.front]
lowercase_ : Tuple = None
lowercase_ : int = (self.front + 1) % self.n
self.size -= 1
return temp
| 7 | 0 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_mbart import MBartTokenizer
else:
A: Any = None
A: Optional[int] = logging.get_logger(__name__)
A: List[str] = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"}
A: Dict = {
"vocab_file": {
"facebook/mbart-large-en-ro": (
"https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model"
),
"facebook/mbart-large-cc25": (
"https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model"
),
},
"tokenizer_file": {
"facebook/mbart-large-en-ro": "https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json",
"facebook/mbart-large-cc25": "https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json",
},
}
A: Tuple = {
"facebook/mbart-large-en-ro": 1_0_2_4,
"facebook/mbart-large-cc25": 1_0_2_4,
}
# fmt: off
A: Optional[Any] = ["ar_AR", "cs_CZ", "de_DE", "en_XX", "es_XX", "et_EE", "fi_FI", "fr_XX", "gu_IN", "hi_IN", "it_IT", "ja_XX", "kk_KZ", "ko_KR", "lt_LT", "lv_LV", "my_MM", "ne_NP", "nl_XX", "ro_RO", "ru_RU", "si_LK", "tr_TR", "vi_VN", "zh_CN"]
class __magic_name__ ( UpperCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ : Any = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ : str = ['input_ids', 'attention_mask']
SCREAMING_SNAKE_CASE_ : str = MBartTokenizer
SCREAMING_SNAKE_CASE_ : List[int] = []
SCREAMING_SNAKE_CASE_ : List[int] = []
def __init__( self , _lowercase=None , _lowercase=None , _lowercase="<s>" , _lowercase="</s>" , _lowercase="</s>" , _lowercase="<s>" , _lowercase="<unk>" , _lowercase="<pad>" , _lowercase="<mask>" , _lowercase=None , _lowercase=None , _lowercase=None , **_lowercase , ) -> List[str]:
# Mask token behave like a normal word, i.e. include the space before it
lowercase_ : Union[str, Any] = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else mask_token
super().__init__(
vocab_file=_lowercase , tokenizer_file=_lowercase , bos_token=_lowercase , eos_token=_lowercase , sep_token=_lowercase , cls_token=_lowercase , unk_token=_lowercase , pad_token=_lowercase , mask_token=_lowercase , src_lang=_lowercase , tgt_lang=_lowercase , additional_special_tokens=_lowercase , **_lowercase , )
lowercase_ : List[Any] = vocab_file
lowercase_ : List[Any] = False if not self.vocab_file else True
lowercase_ : Optional[int] = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({'additional_special_tokens': _additional_special_tokens} )
lowercase_ : Tuple = {
lang_code: self.convert_tokens_to_ids(_lowercase ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
lowercase_ : List[Any] = src_lang if src_lang is not None else 'en_XX'
lowercase_ : int = self.convert_tokens_to_ids(self._src_lang )
lowercase_ : Any = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def lowerCamelCase__ ( self ) -> str:
return self._src_lang
@src_lang.setter
def lowerCamelCase__ ( self , _lowercase ) -> None:
lowercase_ : int = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def lowerCamelCase__ ( self , _lowercase , _lowercase = None ) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def lowerCamelCase__ ( self , _lowercase , _lowercase = None ) -> List[int]:
lowercase_ : Union[str, Any] = [self.sep_token_id]
lowercase_ : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCamelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase , **_lowercase ) -> Union[str, Any]:
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
lowercase_ : int = src_lang
lowercase_ : Any = self(_lowercase , add_special_tokens=_lowercase , return_tensors=_lowercase , **_lowercase )
lowercase_ : Optional[int] = self.convert_tokens_to_ids(_lowercase )
lowercase_ : Optional[int] = tgt_lang_id
return inputs
def lowerCamelCase__ ( self , _lowercase , _lowercase = "en_XX" , _lowercase = None , _lowercase = "ro_RO" , **_lowercase , ) -> BatchEncoding:
lowercase_ : Optional[int] = src_lang
lowercase_ : Union[str, Any] = tgt_lang
return super().prepare_seqaseq_batch(_lowercase , _lowercase , **_lowercase )
def lowerCamelCase__ ( self ) -> Union[str, Any]:
return self.set_src_lang_special_tokens(self.src_lang )
def lowerCamelCase__ ( self ) -> int:
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def lowerCamelCase__ ( self , _lowercase ) -> None:
lowercase_ : str = self.convert_tokens_to_ids(_lowercase )
lowercase_ : Any = []
lowercase_ : Optional[int] = [self.eos_token_id, self.cur_lang_code]
lowercase_ : Optional[int] = self.convert_ids_to_tokens(self.prefix_tokens )
lowercase_ : Optional[int] = self.convert_ids_to_tokens(self.suffix_tokens )
lowercase_ : Any = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def lowerCamelCase__ ( self , _lowercase ) -> None:
lowercase_ : Union[str, Any] = self.convert_tokens_to_ids(_lowercase )
lowercase_ : List[str] = []
lowercase_ : Optional[int] = [self.eos_token_id, self.cur_lang_code]
lowercase_ : Any = self.convert_ids_to_tokens(self.prefix_tokens )
lowercase_ : List[Any] = self.convert_ids_to_tokens(self.suffix_tokens )
lowercase_ : Union[str, Any] = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def lowerCamelCase__ ( self , _lowercase , _lowercase = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(_lowercase ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory." )
return
lowercase_ : Tuple = os.path.join(
_lowercase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowercase ):
copyfile(self.vocab_file , _lowercase )
return (out_vocab_file,)
| 703 |
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
A: List[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
A: Union[str, Any] = "\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline\n >>> from diffusers.utils import load_image\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... \"kandinsky-community/kandinsky-2-2-prior\", torch_dtype=torch.float16\n ... )\n >>> pipe_prior.to(\"cuda\")\n\n >>> prompt = \"A red cartoon frog, 4k\"\n >>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False)\n\n >>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained(\n ... \"kandinsky-community/kandinsky-2-2-decoder\", torch_dtype=torch.float16\n ... )\n >>> pipe.to(\"cuda\")\n\n >>> init_image = load_image(\n ... \"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main\"\n ... \"/kandinsky/frog.png\"\n ... )\n\n >>> image = pipe(\n ... image=init_image,\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=100,\n ... strength=0.2,\n ... ).images\n\n >>> image[0].save(\"red_frog.png\")\n ```\n"
def _UpperCAmelCase ( a : Tuple , a : Union[str, Any] , a : List[Any]=8 ) -> Dict:
"""simple docstring"""
lowercase_ : List[Any] = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
lowercase_ : List[str] = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
def _UpperCAmelCase ( a : Any , a : Dict=5_1_2 , a : Optional[Any]=5_1_2 ) -> Tuple:
"""simple docstring"""
lowercase_ : int = pil_image.resize((w, h) , resample=Image.BICUBIC , reducing_gap=1 )
lowercase_ : int = np.array(pil_image.convert('RGB' ) )
lowercase_ : Optional[int] = arr.astype(np.floataa ) / 1_27.5 - 1
lowercase_ : Any = np.transpose(a , [2, 0, 1] )
lowercase_ : Any = torch.from_numpy(a ).unsqueeze(0 )
return image
class __magic_name__ ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self , _lowercase , _lowercase , _lowercase , ) -> List[Any]:
super().__init__()
self.register_modules(
unet=_lowercase , scheduler=_lowercase , movq=_lowercase , )
lowercase_ : Dict = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def lowerCamelCase__ ( self , _lowercase , _lowercase , _lowercase ) -> int:
# get the original timestep using init_timestep
lowercase_ : List[Any] = min(int(num_inference_steps * strength ) , _lowercase )
lowercase_ : Tuple = max(num_inference_steps - init_timestep , 0 )
lowercase_ : Optional[Any] = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def lowerCamelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase=None ) -> Any:
if not isinstance(_lowercase , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(_lowercase )}" )
lowercase_ : Dict = image.to(device=_lowercase , dtype=_lowercase )
lowercase_ : Dict = batch_size * num_images_per_prompt
if image.shape[1] == 4:
lowercase_ : str = image
else:
if isinstance(_lowercase , _lowercase ) and len(_lowercase ) != batch_size:
raise ValueError(
f"You have passed a list of generators of length {len(_lowercase )}, but requested an effective batch"
f" size of {batch_size}. Make sure the batch size matches the length of the generators." )
elif isinstance(_lowercase , _lowercase ):
lowercase_ : List[Any] = [
self.movq.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(_lowercase )
]
lowercase_ : Union[str, Any] = torch.cat(_lowercase , dim=0 )
else:
lowercase_ : Union[str, Any] = self.movq.encode(_lowercase ).latent_dist.sample(_lowercase )
lowercase_ : str = self.movq.config.scaling_factor * init_latents
lowercase_ : int = torch.cat([init_latents] , dim=0 )
lowercase_ : Dict = init_latents.shape
lowercase_ : Dict = randn_tensor(_lowercase , generator=_lowercase , device=_lowercase , dtype=_lowercase )
# get latents
lowercase_ : List[str] = self.scheduler.add_noise(_lowercase , _lowercase , _lowercase )
lowercase_ : Optional[Any] = init_latents
return latents
def lowerCamelCase__ ( self , _lowercase=0 ) -> int:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
lowercase_ : List[Any] = torch.device(f"cuda:{gpu_id}" )
lowercase_ : Optional[Any] = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(_lowercase , _lowercase )
def lowerCamelCase__ ( self , _lowercase=0 ) -> int:
if is_accelerate_available() and is_accelerate_version('>=' , '0.17.0.dev0' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.' )
lowercase_ : List[Any] = torch.device(f"cuda:{gpu_id}" )
if self.device.type != "cpu":
self.to('cpu' , silence_dtype_warnings=_lowercase )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
lowercase_ : Tuple = None
for cpu_offloaded_model in [self.unet, self.movq]:
lowercase_ , lowercase_ : Dict = cpu_offload_with_hook(_lowercase , _lowercase , prev_module_hook=_lowercase )
# We'll offload the last model manually.
lowercase_ : List[str] = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def lowerCamelCase__ ( self ) -> List[str]:
if not hasattr(self.unet , '_hf_hook' ):
return self.device
for module in self.unet.modules():
if (
hasattr(_lowercase , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(_lowercase )
def __call__( self , _lowercase , _lowercase , _lowercase , _lowercase = 512 , _lowercase = 512 , _lowercase = 100 , _lowercase = 4.0 , _lowercase = 0.3 , _lowercase = 1 , _lowercase = None , _lowercase = "pil" , _lowercase = True , ) -> str:
lowercase_ : List[Any] = self._execution_device
lowercase_ : List[Any] = guidance_scale > 1.0
if isinstance(_lowercase , _lowercase ):
lowercase_ : Union[str, Any] = torch.cat(_lowercase , dim=0 )
lowercase_ : Optional[Any] = image_embeds.shape[0]
if isinstance(_lowercase , _lowercase ):
lowercase_ : List[str] = torch.cat(_lowercase , dim=0 )
if do_classifier_free_guidance:
lowercase_ : List[str] = image_embeds.repeat_interleave(_lowercase , dim=0 )
lowercase_ : Dict = negative_image_embeds.repeat_interleave(_lowercase , dim=0 )
lowercase_ : Dict = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=_lowercase )
if not isinstance(_lowercase , _lowercase ):
lowercase_ : Union[str, Any] = [image]
if not all(isinstance(_lowercase , (PIL.Image.Image, torch.Tensor) ) for i in image ):
raise ValueError(
f"Input is in incorrect format: {[type(_lowercase ) for i in image]}. Currently, we only support PIL image and pytorch tensor" )
lowercase_ : List[Any] = torch.cat([prepare_image(_lowercase , _lowercase , _lowercase ) for i in image] , dim=0 )
lowercase_ : Dict = image.to(dtype=image_embeds.dtype , device=_lowercase )
lowercase_ : Dict = self.movq.encode(_lowercase )['latents']
lowercase_ : Optional[Any] = latents.repeat_interleave(_lowercase , dim=0 )
self.scheduler.set_timesteps(_lowercase , device=_lowercase )
lowercase_ , lowercase_ : str = self.get_timesteps(_lowercase , _lowercase , _lowercase )
lowercase_ : int = timesteps[:1].repeat(batch_size * num_images_per_prompt )
lowercase_ , lowercase_ : Union[str, Any] = downscale_height_and_width(_lowercase , _lowercase , self.movq_scale_factor )
lowercase_ : List[str] = self.prepare_latents(
_lowercase , _lowercase , _lowercase , _lowercase , image_embeds.dtype , _lowercase , _lowercase )
for i, t in enumerate(self.progress_bar(_lowercase ) ):
# expand the latents if we are doing classifier free guidance
lowercase_ : int = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowercase_ : str = {'image_embeds': image_embeds}
lowercase_ : str = self.unet(
sample=_lowercase , timestep=_lowercase , encoder_hidden_states=_lowercase , added_cond_kwargs=_lowercase , return_dict=_lowercase , )[0]
if do_classifier_free_guidance:
lowercase_ , lowercase_ : Dict = noise_pred.split(latents.shape[1] , dim=1 )
lowercase_ , lowercase_ : Optional[int] = noise_pred.chunk(2 )
lowercase_ , lowercase_ : Tuple = variance_pred.chunk(2 )
lowercase_ : List[str] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
lowercase_ : Tuple = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , 'variance_type' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
lowercase_ , lowercase_ : List[str] = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
lowercase_ : Dict = self.scheduler.step(
_lowercase , _lowercase , _lowercase , generator=_lowercase , )[0]
# post-processing
lowercase_ : Any = self.movq.decode(_lowercase , force_not_quantize=_lowercase )['sample']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f"Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}" )
if output_type in ["np", "pil"]:
lowercase_ : Dict = image * 0.5 + 0.5
lowercase_ : Dict = image.clamp(0 , 1 )
lowercase_ : int = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
lowercase_ : int = self.numpy_to_pil(_lowercase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_lowercase )
| 7 | 0 |
'''simple docstring'''
def _UpperCAmelCase ( a : str ) -> str:
"""simple docstring"""
return " ".join(
''.join(word[::-1] ) if len(a ) > 4 else word for word in sentence.split() )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(reverse_long_words("Hey wollef sroirraw"))
| 704 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ..utils import _LazyModule
A: int = {
"config": [
"EXTERNAL_DATA_FORMAT_SIZE_LIMIT",
"OnnxConfig",
"OnnxConfigWithPast",
"OnnxSeq2SeqConfigWithPast",
"PatchingSpec",
],
"convert": ["export", "validate_model_outputs"],
"features": ["FeaturesManager"],
"utils": ["ParameterFormat", "compute_serialized_parameters_size"],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
A: Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 7 | 0 |
'''simple docstring'''
def _UpperCAmelCase ( a : int = 1_0_0 ) -> int:
"""simple docstring"""
lowercase_ : Dict = n * (n + 1) * (2 * n + 1) / 6
lowercase_ : Tuple = (n * (n + 1) / 2) ** 2
return int(square_of_sum - sum_of_squares )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 705 |
'''simple docstring'''
import io
import json
import unittest
from parameterized import parameterized
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device
from utils import calculate_bleu
A: Any = get_tests_dir() + "/test_data/fsmt/fsmt_val_data.json"
with io.open(filename, "r", encoding="utf-8") as f:
A: List[Any] = json.load(f)
@require_torch
class __magic_name__ ( unittest.TestCase ):
"""simple docstring"""
def lowerCamelCase__ ( self , _lowercase ) -> Tuple:
return FSMTTokenizer.from_pretrained(_lowercase )
def lowerCamelCase__ ( self , _lowercase ) -> Optional[int]:
lowercase_ : str = FSMTForConditionalGeneration.from_pretrained(_lowercase ).to(_lowercase )
if torch_device == "cuda":
model.half()
return model
@parameterized.expand(
[
['en-ru', 26.0],
['ru-en', 22.0],
['en-de', 22.0],
['de-en', 29.0],
] )
@slow
def lowerCamelCase__ ( self , _lowercase , _lowercase ) -> Optional[int]:
# note: this test is not testing the best performance since it only evals a small batch
# but it should be enough to detect a regression in the output quality
lowercase_ : Optional[Any] = f"facebook/wmt19-{pair}"
lowercase_ : str = self.get_tokenizer(_lowercase )
lowercase_ : Any = self.get_model(_lowercase )
lowercase_ : Any = bleu_data[pair]['src']
lowercase_ : Any = bleu_data[pair]['tgt']
lowercase_ : Dict = tokenizer(_lowercase , return_tensors='pt' , truncation=_lowercase , padding='longest' ).to(_lowercase )
lowercase_ : str = model.generate(
input_ids=batch.input_ids , num_beams=8 , )
lowercase_ : Any = tokenizer.batch_decode(
_lowercase , skip_special_tokens=_lowercase , clean_up_tokenization_spaces=_lowercase )
lowercase_ : Union[str, Any] = calculate_bleu(_lowercase , _lowercase )
print(_lowercase )
self.assertGreaterEqual(scores['bleu'] , _lowercase )
| 7 | 0 |
'''simple docstring'''
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
A: Optional[Any] = 2_0_0
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
A: int = 5_0
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
A: int = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 1_0_0_0))
def _UpperCAmelCase ( a : str , a : str ) -> tuple[str, float]:
"""simple docstring"""
lowercase_ : List[str] = len([g for position, g in enumerate(a ) if g == main_target[position]] )
return (item, float(a ))
def _UpperCAmelCase ( a : str , a : str ) -> tuple[str, str]:
"""simple docstring"""
lowercase_ : Dict = random.randint(0 , len(a ) - 1 )
lowercase_ : List[Any] = parent_a[:random_slice] + parent_a[random_slice:]
lowercase_ : List[str] = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def _UpperCAmelCase ( a : str , a : list[str] ) -> str:
"""simple docstring"""
lowercase_ : int = list(a )
if random.uniform(0 , 1 ) < MUTATION_PROBABILITY:
lowercase_ : List[str] = random.choice(a )
return "".join(a )
def _UpperCAmelCase ( a : tuple[str, float] , a : list[tuple[str, float]] , a : list[str] , ) -> list[str]:
"""simple docstring"""
lowercase_ : int = []
# Generate more children proportionally to the fitness score.
lowercase_ : List[str] = int(parent_a[1] * 1_0_0 ) + 1
lowercase_ : Optional[Any] = 1_0 if child_n >= 1_0 else child_n
for _ in range(a ):
lowercase_ : List[str] = population_score[random.randint(0 , a )][0]
lowercase_ : str = crossover(parent_a[0] , a )
# Append new string to the population list.
pop.append(mutate(a , a ) )
pop.append(mutate(a , a ) )
return pop
def _UpperCAmelCase ( a : str , a : list[str] , a : bool = True ) -> tuple[int, int, str]:
"""simple docstring"""
# Verify if N_POPULATION is bigger than N_SELECTED
if N_POPULATION < N_SELECTED:
lowercase_ : str = f"{N_POPULATION} must be bigger than {N_SELECTED}"
raise ValueError(a )
# Verify that the target contains no genes besides the ones inside genes variable.
lowercase_ : List[Any] = sorted({c for c in target if c not in genes} )
if not_in_genes_list:
lowercase_ : Any = f"{not_in_genes_list} is not in genes list, evolution cannot converge"
raise ValueError(a )
# Generate random starting population.
lowercase_ : Tuple = []
for _ in range(a ):
population.append(''.join([random.choice(a ) for i in range(len(a ) )] ) )
# Just some logs to know what the algorithms is doing.
lowercase_ : Optional[Any] = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(a )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
lowercase_ : Optional[Any] = [evaluate(a , a ) for item in population]
# Check if there is a matching evolution.
lowercase_ : int = sorted(a , key=lambda a : x[1] , reverse=a )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 1_0 == 0:
print(
f"\nGeneration: {generation}"
f"\nTotal Population:{total_population}"
f"\nBest score: {population_score[0][1]}"
f"\nBest string: {population_score[0][0]}" )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
lowercase_ : int = population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(a )
# Normalize population score to be between 0 and 1.
lowercase_ : Union[str, Any] = [
(item, score / len(a )) for item, score in population_score
]
# This is selection
for i in range(a ):
population.extend(select(population_score[int(a )] , a , a ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(a ) > N_POPULATION:
break
if __name__ == "__main__":
A: Dict = (
"This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!"
)
A: Any = list(
" ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm"
"nopqrstuvwxyz.,;!?+-*#@^'èéòà€ù=)(&%$£/\\"
)
A: Union[str, Any] = basic(target_str, genes_list)
print(
f"""\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}"""
)
| 706 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A: int = {
"configuration_trajectory_transformer": [
"TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"TrajectoryTransformerConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A: Union[str, Any] = [
"TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TrajectoryTransformerModel",
"TrajectoryTransformerPreTrainedModel",
"load_tf_weights_in_trajectory_transformer",
]
if TYPE_CHECKING:
from .configuration_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TrajectoryTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TrajectoryTransformerModel,
TrajectoryTransformerPreTrainedModel,
load_tf_weights_in_trajectory_transformer,
)
else:
import sys
A: int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 7 | 0 |
'''simple docstring'''
from ....configuration_utils import PretrainedConfig
from ....utils import logging
A: int = logging.get_logger(__name__)
A: Optional[int] = {
"CarlCochet/trajectory-transformer-halfcheetah-medium-v2": (
"https://huggingface.co/CarlCochet/trajectory-transformer-halfcheetah-medium-v2/resolve/main/config.json"
),
# See all TrajectoryTransformer models at https://huggingface.co/models?filter=trajectory_transformer
}
class __magic_name__ ( UpperCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = 'trajectory_transformer'
SCREAMING_SNAKE_CASE_ : int = ['past_key_values']
SCREAMING_SNAKE_CASE_ : Optional[Any] = {
'hidden_size': 'n_embd',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self , _lowercase=100 , _lowercase=5 , _lowercase=1 , _lowercase=1 , _lowercase=249 , _lowercase=6 , _lowercase=17 , _lowercase=25 , _lowercase=4 , _lowercase=4 , _lowercase=128 , _lowercase=0.1 , _lowercase=0.1 , _lowercase=0.1 , _lowercase=0.00_06 , _lowercase=512 , _lowercase=0.02 , _lowercase=1E-1_2 , _lowercase=1 , _lowercase=True , _lowercase=1 , _lowercase=5_0256 , _lowercase=5_0256 , **_lowercase , ) -> Tuple:
lowercase_ : Optional[Any] = vocab_size
lowercase_ : Union[str, Any] = action_weight
lowercase_ : Any = reward_weight
lowercase_ : str = value_weight
lowercase_ : List[str] = max_position_embeddings
lowercase_ : Dict = block_size
lowercase_ : Union[str, Any] = action_dim
lowercase_ : Tuple = observation_dim
lowercase_ : Any = transition_dim
lowercase_ : Optional[int] = learning_rate
lowercase_ : Optional[int] = n_layer
lowercase_ : Tuple = n_head
lowercase_ : int = n_embd
lowercase_ : List[str] = embd_pdrop
lowercase_ : Optional[Any] = attn_pdrop
lowercase_ : Tuple = resid_pdrop
lowercase_ : List[str] = initializer_range
lowercase_ : List[str] = layer_norm_eps
lowercase_ : int = kaiming_initializer_range
lowercase_ : int = use_cache
super().__init__(pad_token_id=_lowercase , bos_token_id=_lowercase , eos_token_id=_lowercase , **_lowercase )
| 707 |
'''simple docstring'''
def _UpperCAmelCase ( a : str ) -> str:
"""simple docstring"""
lowercase_ : Dict = 0
# if input_string is "aba" than new_input_string become "a|b|a"
lowercase_ : Dict = ''
lowercase_ : Any = ''
# append each character + "|" in new_string for range(0, length-1)
for i in input_string[: len(a ) - 1]:
new_input_string += i + "|"
# append last character
new_input_string += input_string[-1]
# we will store the starting and ending of previous furthest ending palindromic
# substring
lowercase_ , lowercase_ : Dict = 0, 0
# length[i] shows the length of palindromic substring with center i
lowercase_ : List[Any] = [1 for i in range(len(a ) )]
# for each character in new_string find corresponding palindromic string
lowercase_ : Dict = 0
for j in range(len(a ) ):
lowercase_ : Tuple = 1 if j > r else min(length[l + r - j] // 2 , r - j + 1 )
while (
j - k >= 0
and j + k < len(a )
and new_input_string[k + j] == new_input_string[j - k]
):
k += 1
lowercase_ : int = 2 * k - 1
# does this string is ending after the previously explored end (that is r) ?
# if yes the update the new r to the last index of this
if j + k - 1 > r:
lowercase_ : Tuple = j - k + 1 # noqa: E741
lowercase_ : Tuple = j + k - 1
# update max_length and start position
if max_length < length[j]:
lowercase_ : Tuple = length[j]
lowercase_ : List[Any] = j
# create that string
lowercase_ : str = new_input_string[start - max_length // 2 : start + max_length // 2 + 1]
for i in s:
if i != "|":
output_string += i
return output_string
if __name__ == "__main__":
import doctest
doctest.testmod()
| 7 | 0 |
'''simple docstring'''
import gc
import random
import unittest
import torch
from diffusers import (
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
)
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
from . import IFPipelineTesterMixin
@skip_mps
class __magic_name__ ( UpperCAmelCase_, UpperCAmelCase_, unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = IFPipeline
SCREAMING_SNAKE_CASE_ : Any = TEXT_TO_IMAGE_PARAMS - {'width', 'height', 'latents'}
SCREAMING_SNAKE_CASE_ : List[str] = TEXT_TO_IMAGE_BATCH_PARAMS
SCREAMING_SNAKE_CASE_ : Dict = PipelineTesterMixin.required_optional_params - {'latents'}
def snake_case__ ( self ) -> Any:
return self._get_dummy_components()
def snake_case__ ( self , _lowercase , _lowercase=0 ) -> Optional[int]:
if str(_lowercase ).startswith('mps' ):
lowercase_ : List[Any] = torch.manual_seed(_lowercase )
else:
lowercase_ : Optional[Any] = torch.Generator(device=_lowercase ).manual_seed(_lowercase )
lowercase_ : Tuple = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def snake_case__ ( self ) -> str:
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA' )
def snake_case__ ( self ) -> int:
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1 )
def snake_case__ ( self ) -> List[Any]:
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def snake_case__ ( self ) -> Union[str, Any]:
self._test_save_load_local()
def snake_case__ ( self ) -> Tuple:
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def snake_case__ ( self ) -> List[str]:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
@slow
@require_torch_gpu
class __magic_name__ ( unittest.TestCase ):
"""simple docstring"""
def snake_case__ ( self ) -> Tuple:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case__ ( self ) -> int:
# if
lowercase_ : List[Any] = IFPipeline.from_pretrained('DeepFloyd/IF-I-XL-v1.0' , variant='fp16' , torch_dtype=torch.floataa )
lowercase_ : Dict = IFSuperResolutionPipeline.from_pretrained(
'DeepFloyd/IF-II-L-v1.0' , variant='fp16' , torch_dtype=torch.floataa , text_encoder=_lowercase , tokenizer=_lowercase )
# pre compute text embeddings and remove T5 to save memory
pipe_a.text_encoder.to('cuda' )
lowercase_ : Union[str, Any] = pipe_a.encode_prompt('anime turtle' , device='cuda' )
del pipe_a.tokenizer
del pipe_a.text_encoder
gc.collect()
lowercase_ : str = None
lowercase_ : Optional[Any] = None
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if(_lowercase , _lowercase , _lowercase , _lowercase )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# img2img
lowercase_ : Union[str, Any] = IFImgaImgPipeline(**pipe_a.components )
lowercase_ : str = IFImgaImgSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_imgaimg(_lowercase , _lowercase , _lowercase , _lowercase )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# inpainting
lowercase_ : int = IFInpaintingPipeline(**pipe_a.components )
lowercase_ : Optional[int] = IFInpaintingSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_inpainting(_lowercase , _lowercase , _lowercase , _lowercase )
def snake_case__ ( self , _lowercase , _lowercase , _lowercase , _lowercase ) -> List[Any]:
# pipeline 1
_start_torch_memory_measurement()
lowercase_ : int = torch.Generator(device='cpu' ).manual_seed(0 )
lowercase_ : Any = pipe_a(
prompt_embeds=_lowercase , negative_prompt_embeds=_lowercase , num_inference_steps=2 , generator=_lowercase , output_type='np' , )
lowercase_ : int = output.images[0]
assert image.shape == (64, 64, 3)
lowercase_ : Tuple = torch.cuda.max_memory_allocated()
assert mem_bytes < 13 * 10**9
lowercase_ : Any = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy' )
assert_mean_pixel_difference(_lowercase , _lowercase )
# pipeline 2
_start_torch_memory_measurement()
lowercase_ : Optional[Any] = torch.Generator(device='cpu' ).manual_seed(0 )
lowercase_ : List[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(_lowercase )
lowercase_ : Any = pipe_a(
prompt_embeds=_lowercase , negative_prompt_embeds=_lowercase , image=_lowercase , generator=_lowercase , num_inference_steps=2 , output_type='np' , )
lowercase_ : Optional[int] = output.images[0]
assert image.shape == (256, 256, 3)
lowercase_ : List[Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
lowercase_ : str = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy' )
assert_mean_pixel_difference(_lowercase , _lowercase )
def snake_case__ ( self , _lowercase , _lowercase , _lowercase , _lowercase ) -> Any:
# pipeline 1
_start_torch_memory_measurement()
lowercase_ : Union[str, Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(_lowercase )
lowercase_ : Union[str, Any] = torch.Generator(device='cpu' ).manual_seed(0 )
lowercase_ : List[Any] = pipe_a(
prompt_embeds=_lowercase , negative_prompt_embeds=_lowercase , image=_lowercase , num_inference_steps=2 , generator=_lowercase , output_type='np' , )
lowercase_ : Any = output.images[0]
assert image.shape == (64, 64, 3)
lowercase_ : Any = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
lowercase_ : List[Any] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy' )
assert_mean_pixel_difference(_lowercase , _lowercase )
# pipeline 2
_start_torch_memory_measurement()
lowercase_ : Any = torch.Generator(device='cpu' ).manual_seed(0 )
lowercase_ : Any = floats_tensor((1, 3, 256, 256) , rng=random.Random(0 ) ).to(_lowercase )
lowercase_ : List[str] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(_lowercase )
lowercase_ : Any = pipe_a(
prompt_embeds=_lowercase , negative_prompt_embeds=_lowercase , image=_lowercase , original_image=_lowercase , generator=_lowercase , num_inference_steps=2 , output_type='np' , )
lowercase_ : Dict = output.images[0]
assert image.shape == (256, 256, 3)
lowercase_ : int = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
lowercase_ : Any = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy' )
assert_mean_pixel_difference(_lowercase , _lowercase )
def snake_case__ ( self , _lowercase , _lowercase , _lowercase , _lowercase ) -> Optional[int]:
# pipeline 1
_start_torch_memory_measurement()
lowercase_ : Optional[int] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(_lowercase )
lowercase_ : int = floats_tensor((1, 3, 64, 64) , rng=random.Random(1 ) ).to(_lowercase )
lowercase_ : List[str] = torch.Generator(device='cpu' ).manual_seed(0 )
lowercase_ : List[str] = pipe_a(
prompt_embeds=_lowercase , negative_prompt_embeds=_lowercase , image=_lowercase , mask_image=_lowercase , num_inference_steps=2 , generator=_lowercase , output_type='np' , )
lowercase_ : List[str] = output.images[0]
assert image.shape == (64, 64, 3)
lowercase_ : List[Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
lowercase_ : Optional[int] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy' )
assert_mean_pixel_difference(_lowercase , _lowercase )
# pipeline 2
_start_torch_memory_measurement()
lowercase_ : Any = torch.Generator(device='cpu' ).manual_seed(0 )
lowercase_ : Any = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(_lowercase )
lowercase_ : str = floats_tensor((1, 3, 256, 256) , rng=random.Random(0 ) ).to(_lowercase )
lowercase_ : Tuple = floats_tensor((1, 3, 256, 256) , rng=random.Random(1 ) ).to(_lowercase )
lowercase_ : str = pipe_a(
prompt_embeds=_lowercase , negative_prompt_embeds=_lowercase , image=_lowercase , mask_image=_lowercase , original_image=_lowercase , generator=_lowercase , num_inference_steps=2 , output_type='np' , )
lowercase_ : Any = output.images[0]
assert image.shape == (256, 256, 3)
lowercase_ : Union[str, Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
lowercase_ : Any = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy' )
assert_mean_pixel_difference(_lowercase , _lowercase )
def _UpperCAmelCase ( ) -> Any:
"""simple docstring"""
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
| 708 |
'''simple docstring'''
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class __magic_name__ ( UpperCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ''
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 'hf-legacy' # "hf://"" is reserved for hffs
def __init__( self , _lowercase = None , _lowercase = None , **_lowercase , ) -> Optional[Any]:
super().__init__(self , **_lowercase )
lowercase_ : int = repo_info
lowercase_ : List[Any] = token
lowercase_ : Union[str, Any] = None
def lowerCamelCase__ ( self ) -> Optional[Any]:
if self.dir_cache is None:
lowercase_ : Optional[Any] = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
lowercase_ : str = {
'name': hf_file.rfilename,
'size': None,
'type': 'file',
}
self.dir_cache.update(
{
str(_lowercase ): {'name': str(_lowercase ), 'size': None, 'type': 'directory'}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def lowerCamelCase__ ( self , _lowercase , _lowercase = "rb" , **_lowercase , ) -> Dict:
if not isinstance(self.repo_info , _lowercase ):
raise NotImplementedError(f"Open is only implemented for dataset repositories, but got {self.repo_info}" )
lowercase_ : Optional[int] = hf_hub_url(self.repo_info.id , _lowercase , revision=self.repo_info.sha )
return fsspec.open(
_lowercase , mode=_lowercase , headers=get_authentication_headers_for_url(_lowercase , use_auth_token=self.token ) , client_kwargs={'trust_env': True} , ).open()
def lowerCamelCase__ ( self , _lowercase , **_lowercase ) -> Tuple:
self._get_dirs()
lowercase_ : str = self._strip_protocol(_lowercase )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(_lowercase )
def lowerCamelCase__ ( self , _lowercase , _lowercase=False , **_lowercase ) -> List[str]:
self._get_dirs()
lowercase_ : List[str] = PurePosixPath(path.strip('/' ) )
lowercase_ : List[str] = {}
for p, f in self.dir_cache.items():
lowercase_ : Tuple = PurePosixPath(p.strip('/' ) )
lowercase_ : Optional[int] = p.parent
if root == path:
lowercase_ : List[str] = f
lowercase_ : List[str] = list(paths.values() )
if detail:
return out
else:
return sorted(f['name'] for f in out )
| 7 | 0 |
'''simple docstring'''
from manim import *
class __magic_name__ ( UpperCAmelCase_ ):
"""simple docstring"""
def lowerCamelCase__ ( self ) -> Optional[Any]:
lowercase_ : List[Any] = Rectangle(height=0.5 , width=0.5 )
lowercase_ : List[str] = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
lowercase_ : List[Any] = Rectangle(height=0.25 , width=0.25 )
lowercase_ : List[Any] = [mem.copy() for i in range(6 )]
lowercase_ : Union[str, Any] = [mem.copy() for i in range(6 )]
lowercase_ : int = VGroup(*_lowercase ).arrange(_lowercase , buff=0 )
lowercase_ : Optional[int] = VGroup(*_lowercase ).arrange(_lowercase , buff=0 )
lowercase_ : Any = VGroup(_lowercase , _lowercase ).arrange(_lowercase , buff=0 )
lowercase_ : str = Text('CPU' , font_size=24 )
lowercase_ : Optional[Any] = Group(_lowercase , _lowercase ).arrange(_lowercase , buff=0.5 , aligned_edge=_lowercase )
cpu.move_to([-2.5, -0.5, 0] )
self.add(_lowercase )
lowercase_ : Optional[Any] = [mem.copy() for i in range(4 )]
lowercase_ : Dict = VGroup(*_lowercase ).arrange(_lowercase , buff=0 )
lowercase_ : List[Any] = Text('GPU' , font_size=24 )
lowercase_ : Tuple = Group(_lowercase , _lowercase ).arrange(_lowercase , buff=0.5 , aligned_edge=_lowercase )
gpu.move_to([-1, -1, 0] )
self.add(_lowercase )
lowercase_ : Union[str, Any] = [mem.copy() for i in range(6 )]
lowercase_ : Dict = VGroup(*_lowercase ).arrange(_lowercase , buff=0 )
lowercase_ : Dict = Text('Model' , font_size=24 )
lowercase_ : List[str] = Group(_lowercase , _lowercase ).arrange(_lowercase , buff=0.5 , aligned_edge=_lowercase )
model.move_to([3, -1.0, 0] )
self.add(_lowercase )
lowercase_ : Optional[Any] = []
lowercase_ : Optional[int] = []
for i, rect in enumerate(_lowercase ):
lowercase_ : List[Any] = fill.copy().set_fill(_lowercase , opacity=0.8 )
target.move_to(_lowercase )
model_arr.append(_lowercase )
lowercase_ : str = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(_lowercase , opacity=0.8 )
cpu_target.move_to(cpu_left_col_base[i] )
model_cpu_arr.append(_lowercase )
self.add(*_lowercase , *_lowercase )
lowercase_ : Union[str, Any] = [meta_mem.copy() for i in range(6 )]
lowercase_ : str = [meta_mem.copy() for i in range(6 )]
lowercase_ : int = VGroup(*_lowercase ).arrange(_lowercase , buff=0 )
lowercase_ : List[str] = VGroup(*_lowercase ).arrange(_lowercase , buff=0 )
lowercase_ : Optional[Any] = VGroup(_lowercase , _lowercase ).arrange(_lowercase , buff=0 )
lowercase_ : Union[str, Any] = Text('Disk' , font_size=24 )
lowercase_ : Union[str, Any] = Group(_lowercase , _lowercase ).arrange(_lowercase , buff=0.5 , aligned_edge=_lowercase )
disk.move_to([-4, -1.25, 0] )
self.add(_lowercase , _lowercase )
lowercase_ : int = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
lowercase_ : str = MarkupText(
f"<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(_lowercase , _lowercase )
lowercase_ : Tuple = MarkupText(
f"<span fgcolor='{BLUE}'>●</span> Checkpoint" , font_size=18 , )
blue_text.next_to(_lowercase , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(_lowercase )
lowercase_ : Optional[int] = MarkupText(
f"Now watch as an input is passed through the model\nand how the memory is utilized and handled." , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(_lowercase ) )
lowercase_ : Union[str, Any] = Square(0.3 )
input.set_fill(_lowercase , opacity=1.0 )
input.set_stroke(width=0.0 )
input.next_to(model_base[0] , _lowercase , buff=0.5 )
self.play(Write(_lowercase ) )
input.generate_target()
input.target.next_to(model_arr[0] , direction=_lowercase , buff=0.02 )
self.play(MoveToTarget(_lowercase ) )
self.play(FadeOut(_lowercase ) )
lowercase_ : int = Arrow(start=_lowercase , end=_lowercase , color=_lowercase , buff=0.5 )
a.next_to(model_arr[0].get_left() , _lowercase , buff=0.2 )
model_cpu_arr[0].generate_target()
model_cpu_arr[0].target.move_to(gpu_rect[0] )
lowercase_ : Any = MarkupText(
f"As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back." , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(_lowercase , run_time=3 ) )
lowercase_ : str = {'run_time': 1, 'fade_in': True, 'fade_out': True, 'buff': 0.02}
self.play(
Write(_lowercase ) , Circumscribe(model_arr[0] , color=_lowercase , **_lowercase ) , Circumscribe(model_cpu_arr[0] , color=_lowercase , **_lowercase ) , Circumscribe(gpu_rect[0] , color=_lowercase , **_lowercase ) , )
self.play(MoveToTarget(model_cpu_arr[0] ) )
lowercase_ : Optional[int] = a.copy()
for i in range(6 ):
a_c.next_to(model_arr[i].get_right() + 0.02 , _lowercase , buff=0.2 )
input.generate_target()
input.target.move_to(model_arr[i].get_right() + 0.02 )
lowercase_ : Any = AnimationGroup(
FadeOut(_lowercase , run_time=0.5 ) , MoveToTarget(_lowercase , run_time=0.5 ) , FadeIn(_lowercase , run_time=0.5 ) , lag_ratio=0.2 )
self.play(_lowercase )
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[i] )
if i < 5:
model_cpu_arr[i + 1].generate_target()
model_cpu_arr[i + 1].target.move_to(gpu_rect[0] )
if i >= 1:
lowercase_ : Optional[Any] = 0.7
self.play(
Circumscribe(model_arr[i] , **_lowercase ) , Circumscribe(cpu_left_col_base[i] , **_lowercase ) , Circumscribe(cpu_left_col_base[i + 1] , color=_lowercase , **_lowercase ) , Circumscribe(gpu_rect[0] , color=_lowercase , **_lowercase ) , Circumscribe(model_arr[i + 1] , color=_lowercase , **_lowercase ) , )
if i < 1:
self.play(
MoveToTarget(model_cpu_arr[i] ) , MoveToTarget(model_cpu_arr[i + 1] ) , )
else:
self.play(
MoveToTarget(model_cpu_arr[i] , run_time=0.7 ) , MoveToTarget(model_cpu_arr[i + 1] , run_time=0.7 ) , )
else:
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[-1] )
input.generate_target()
input.target.next_to(model_arr[-1].get_right() , RIGHT + 0.02 , buff=0.2 )
self.play(
Circumscribe(model_arr[-1] , color=_lowercase , **_lowercase ) , Circumscribe(cpu_left_col_base[-1] , color=_lowercase , **_lowercase ) , Circumscribe(gpu_rect[0] , color=_lowercase , **_lowercase ) , )
self.play(MoveToTarget(model_cpu_arr[i] ) )
lowercase_ : Any = a_c
lowercase_ : Any = a_c.copy()
input.generate_target()
input.target.next_to(model_base[-1] , RIGHT + 0.02 , buff=0.5 )
self.play(
FadeOut(_lowercase ) , FadeOut(_lowercase , run_time=0.5 ) , )
lowercase_ : Dict = MarkupText(f"Inference on a model too large for GPU memory\nis successfully completed." , font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(_lowercase , run_time=3 ) , MoveToTarget(_lowercase ) )
self.wait()
| 709 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
BertTokenizer,
ViltConfig,
ViltForImageAndTextRetrieval,
ViltForImagesAndTextClassification,
ViltForMaskedLM,
ViltForQuestionAnswering,
ViltImageProcessor,
ViltProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
A: List[Any] = logging.get_logger(__name__)
def _UpperCAmelCase ( a : Any , a : Dict=False , a : Union[str, Any]=False , a : Tuple=False ) -> List[str]:
"""simple docstring"""
lowercase_ : int = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"transformer.blocks.{i}.norm1.weight", f"vilt.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((f"transformer.blocks.{i}.norm1.bias", f"vilt.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append(
(f"transformer.blocks.{i}.attn.proj.weight", f"vilt.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append(
(f"transformer.blocks.{i}.attn.proj.bias", f"vilt.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((f"transformer.blocks.{i}.norm2.weight", f"vilt.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((f"transformer.blocks.{i}.norm2.bias", f"vilt.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append(
(f"transformer.blocks.{i}.mlp.fc1.weight", f"vilt.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((f"transformer.blocks.{i}.mlp.fc1.bias", f"vilt.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((f"transformer.blocks.{i}.mlp.fc2.weight", f"vilt.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((f"transformer.blocks.{i}.mlp.fc2.bias", f"vilt.encoder.layer.{i}.output.dense.bias") )
# embeddings
rename_keys.extend(
[
# text embeddings
('text_embeddings.word_embeddings.weight', 'vilt.embeddings.text_embeddings.word_embeddings.weight'),
(
'text_embeddings.position_embeddings.weight',
'vilt.embeddings.text_embeddings.position_embeddings.weight',
),
('text_embeddings.position_ids', 'vilt.embeddings.text_embeddings.position_ids'),
(
'text_embeddings.token_type_embeddings.weight',
'vilt.embeddings.text_embeddings.token_type_embeddings.weight',
),
('text_embeddings.LayerNorm.weight', 'vilt.embeddings.text_embeddings.LayerNorm.weight'),
('text_embeddings.LayerNorm.bias', 'vilt.embeddings.text_embeddings.LayerNorm.bias'),
# patch embeddings
('transformer.cls_token', 'vilt.embeddings.cls_token'),
('transformer.patch_embed.proj.weight', 'vilt.embeddings.patch_embeddings.projection.weight'),
('transformer.patch_embed.proj.bias', 'vilt.embeddings.patch_embeddings.projection.bias'),
('transformer.pos_embed', 'vilt.embeddings.position_embeddings'),
# token type embeddings
('token_type_embeddings.weight', 'vilt.embeddings.token_type_embeddings.weight'),
] )
# final layernorm + pooler
rename_keys.extend(
[
('transformer.norm.weight', 'vilt.layernorm.weight'),
('transformer.norm.bias', 'vilt.layernorm.bias'),
('pooler.dense.weight', 'vilt.pooler.dense.weight'),
('pooler.dense.bias', 'vilt.pooler.dense.bias'),
] )
# classifier head(s)
if vqa_model:
# classification head
rename_keys.extend(
[
('vqa_classifier.0.weight', 'classifier.0.weight'),
('vqa_classifier.0.bias', 'classifier.0.bias'),
('vqa_classifier.1.weight', 'classifier.1.weight'),
('vqa_classifier.1.bias', 'classifier.1.bias'),
('vqa_classifier.3.weight', 'classifier.3.weight'),
('vqa_classifier.3.bias', 'classifier.3.bias'),
] )
elif nlvr_model:
# classification head
rename_keys.extend(
[
('nlvr2_classifier.0.weight', 'classifier.0.weight'),
('nlvr2_classifier.0.bias', 'classifier.0.bias'),
('nlvr2_classifier.1.weight', 'classifier.1.weight'),
('nlvr2_classifier.1.bias', 'classifier.1.bias'),
('nlvr2_classifier.3.weight', 'classifier.3.weight'),
('nlvr2_classifier.3.bias', 'classifier.3.bias'),
] )
else:
pass
return rename_keys
def _UpperCAmelCase ( a : Dict , a : Tuple ) -> Dict:
"""simple docstring"""
for i in range(config.num_hidden_layers ):
lowercase_ : Optional[int] = 'vilt.'
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowercase_ : str = state_dict.pop(f"transformer.blocks.{i}.attn.qkv.weight" )
lowercase_ : int = state_dict.pop(f"transformer.blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
lowercase_ : Dict = in_proj_weight[
: config.hidden_size, :
]
lowercase_ : List[str] = in_proj_bias[: config.hidden_size]
lowercase_ : int = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowercase_ : Optional[int] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowercase_ : Tuple = in_proj_weight[
-config.hidden_size :, :
]
lowercase_ : Dict = in_proj_bias[-config.hidden_size :]
def _UpperCAmelCase ( a : List[str] ) -> Optional[int]:
"""simple docstring"""
lowercase_ : Union[str, Any] = ['head.weight', 'head.bias']
for k in ignore_keys:
state_dict.pop(a , a )
def _UpperCAmelCase ( a : Optional[Any] , a : Tuple , a : Union[str, Any] ) -> Tuple:
"""simple docstring"""
lowercase_ : List[Any] = dct.pop(a )
lowercase_ : Dict = val
@torch.no_grad()
def _UpperCAmelCase ( a : List[Any] , a : List[Any] ) -> Optional[Any]:
"""simple docstring"""
lowercase_ : str = ViltConfig(image_size=3_8_4 , patch_size=3_2 , tie_word_embeddings=a )
lowercase_ : int = False
lowercase_ : Union[str, Any] = False
lowercase_ : List[str] = False
lowercase_ : str = False
if "vqa" in checkpoint_url:
lowercase_ : str = True
lowercase_ : Optional[int] = 3_1_2_9
lowercase_ : Any = 'huggingface/label-files'
lowercase_ : Optional[Any] = 'vqa2-id2label.json'
lowercase_ : int = json.load(open(hf_hub_download(a , a , repo_type='dataset' ) , 'r' ) )
lowercase_ : Optional[int] = {int(a ): v for k, v in idalabel.items()}
lowercase_ : List[Any] = idalabel
lowercase_ : str = {v: k for k, v in idalabel.items()}
lowercase_ : List[Any] = ViltForQuestionAnswering(a )
elif "nlvr" in checkpoint_url:
lowercase_ : Dict = True
lowercase_ : List[str] = 2
lowercase_ : Tuple = {0: 'False', 1: 'True'}
lowercase_ : Optional[int] = {v: k for k, v in config.idalabel.items()}
lowercase_ : int = 3
lowercase_ : Any = ViltForImagesAndTextClassification(a )
elif "irtr" in checkpoint_url:
lowercase_ : Union[str, Any] = True
lowercase_ : Dict = ViltForImageAndTextRetrieval(a )
elif "mlm_itm" in checkpoint_url:
lowercase_ : int = True
lowercase_ : Tuple = ViltForMaskedLM(a )
else:
raise ValueError('Unknown model type' )
# load state_dict of original model, remove and rename some keys
lowercase_ : List[Any] = torch.hub.load_state_dict_from_url(a , map_location='cpu' )['state_dict']
lowercase_ : Union[str, Any] = create_rename_keys(a , a , a , a )
for src, dest in rename_keys:
rename_key(a , a , a )
read_in_q_k_v(a , a )
if mlm_model or irtr_model:
lowercase_ : str = ['itm_score.fc.weight', 'itm_score.fc.bias']
for k in ignore_keys:
state_dict.pop(a , a )
# load state dict into HuggingFace model
model.eval()
if mlm_model:
lowercase_ , lowercase_ : Dict = model.load_state_dict(a , strict=a )
assert missing_keys == ["mlm_score.decoder.bias"]
else:
model.load_state_dict(a )
# Define processor
lowercase_ : Optional[int] = ViltImageProcessor(size=3_8_4 )
lowercase_ : Optional[int] = BertTokenizer.from_pretrained('bert-base-uncased' )
lowercase_ : Any = ViltProcessor(a , a )
# Forward pass on example inputs (image + text)
if nlvr_model:
lowercase_ : Union[str, Any] = Image.open(requests.get('https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg' , stream=a ).raw )
lowercase_ : Optional[Any] = Image.open(requests.get('https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg' , stream=a ).raw )
lowercase_ : Any = (
'The left image contains twice the number of dogs as the right image, and at least two dogs in total are'
' standing.'
)
lowercase_ : Union[str, Any] = processor(a , a , return_tensors='pt' )
lowercase_ : List[str] = processor(a , a , return_tensors='pt' )
lowercase_ : Union[str, Any] = model(
input_ids=encoding_a.input_ids , pixel_values=encoding_a.pixel_values , pixel_values_a=encoding_a.pixel_values , )
else:
lowercase_ : List[str] = Image.open(requests.get('http://images.cocodataset.org/val2017/000000039769.jpg' , stream=a ).raw )
if mlm_model:
lowercase_ : Dict = 'a bunch of [MASK] laying on a [MASK].'
else:
lowercase_ : List[Any] = 'How many cats are there?'
lowercase_ : List[Any] = processor(a , a , return_tensors='pt' )
lowercase_ : Optional[int] = model(**a )
# Verify outputs
if mlm_model:
lowercase_ : Union[str, Any] = torch.Size([1, 1_1, 3_0_5_2_2] )
lowercase_ : Optional[Any] = torch.tensor([-12.50_61, -12.51_23, -12.51_74] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , a , atol=1e-4 )
# verify masked token prediction equals "cats"
lowercase_ : int = outputs.logits[0, 4, :].argmax(-1 ).item()
assert tokenizer.decode([predicted_id] ) == "cats"
elif vqa_model:
lowercase_ : Optional[Any] = torch.Size([1, 3_1_2_9] )
lowercase_ : Tuple = torch.tensor([-15.94_95, -18.14_72, -10.30_41] )
assert torch.allclose(outputs.logits[0, :3] , a , atol=1e-4 )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , a , atol=1e-4 )
# verify vqa prediction equals "2"
lowercase_ : Union[str, Any] = outputs.logits.argmax(-1 ).item()
assert model.config.idalabel[predicted_idx] == "2"
elif nlvr_model:
lowercase_ : Optional[Any] = torch.Size([1, 2] )
lowercase_ : Optional[Any] = torch.tensor([-2.87_21, 2.12_91] )
assert torch.allclose(outputs.logits[0, :3] , a , atol=1e-4 )
assert outputs.logits.shape == expected_shape
Path(a ).mkdir(exist_ok=a )
print(f"Saving model and processor to {pytorch_dump_folder_path}" )
model.save_pretrained(a )
processor.save_pretrained(a )
if __name__ == "__main__":
A: Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://github.com/dandelin/ViLT/releases/download/200k/vilt_200k_mlm_itm.ckpt",
type=str,
help="URL of the checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
A: Union[str, Any] = parser.parse_args()
convert_vilt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 7 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__a: Tuple = logging.get_logger(__name__)
__a: List[Any] = {
"facebook/vit-mae-base": "https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json",
# See all ViT MAE models at https://huggingface.co/models?filter=vit-mae
}
class __magic_name__ ( UpperCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = 'vit_mae'
def __init__( self , _lowercase=768 , _lowercase=12 , _lowercase=12 , _lowercase=3072 , _lowercase="gelu" , _lowercase=0.0 , _lowercase=0.0 , _lowercase=0.02 , _lowercase=1E-1_2 , _lowercase=224 , _lowercase=16 , _lowercase=3 , _lowercase=True , _lowercase=16 , _lowercase=512 , _lowercase=8 , _lowercase=2048 , _lowercase=0.75 , _lowercase=False , **_lowercase , ) -> Tuple:
super().__init__(**_lowercase )
lowercase_ : str = hidden_size
lowercase_ : List[Any] = num_hidden_layers
lowercase_ : str = num_attention_heads
lowercase_ : Dict = intermediate_size
lowercase_ : List[Any] = hidden_act
lowercase_ : int = hidden_dropout_prob
lowercase_ : int = attention_probs_dropout_prob
lowercase_ : str = initializer_range
lowercase_ : Union[str, Any] = layer_norm_eps
lowercase_ : Optional[int] = image_size
lowercase_ : str = patch_size
lowercase_ : Dict = num_channels
lowercase_ : int = qkv_bias
lowercase_ : Optional[Any] = decoder_num_attention_heads
lowercase_ : Any = decoder_hidden_size
lowercase_ : Optional[int] = decoder_num_hidden_layers
lowercase_ : Optional[int] = decoder_intermediate_size
lowercase_ : Optional[Any] = mask_ratio
lowercase_ : Optional[int] = norm_pix_loss
| 710 |
'''simple docstring'''
def _UpperCAmelCase ( a : list ) -> list:
"""simple docstring"""
for i in range(len(a ) - 1 , 0 , -1 ):
lowercase_ : Any = False
for j in range(a , 0 , -1 ):
if unsorted[j] < unsorted[j - 1]:
lowercase_ , lowercase_ : Any = unsorted[j - 1], unsorted[j]
lowercase_ : int = True
for j in range(a ):
if unsorted[j] > unsorted[j + 1]:
lowercase_ , lowercase_ : Union[str, Any] = unsorted[j + 1], unsorted[j]
lowercase_ : Optional[Any] = True
if not swapped:
break
return unsorted
if __name__ == "__main__":
import doctest
doctest.testmod()
A: Union[str, Any] = input("Enter numbers separated by a comma:\n").strip()
A: Tuple = [int(item) for item in user_input.split(",")]
print(f"""{cocktail_shaker_sort(unsorted) = }""")
| 7 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
A: Tuple = {
"configuration_llama": ["LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP", "LlamaConfig"],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A: Dict = ["LlamaTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A: Tuple = ["LlamaTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A: Optional[int] = [
"LlamaForCausalLM",
"LlamaModel",
"LlamaPreTrainedModel",
"LlamaForSequenceClassification",
]
if TYPE_CHECKING:
from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama import LlamaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama_fast import LlamaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel
else:
import sys
A: List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 711 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class __magic_name__ ( metaclass=UpperCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = ['transformers', 'torch', 'note_seq']
def __init__( self , *_lowercase , **_lowercase ) -> Dict:
requires_backends(self , ['transformers', 'torch', 'note_seq'] )
@classmethod
def lowerCamelCase__ ( cls , *_lowercase , **_lowercase ) -> List[str]:
requires_backends(cls , ['transformers', 'torch', 'note_seq'] )
@classmethod
def lowerCamelCase__ ( cls , *_lowercase , **_lowercase ) -> Dict:
requires_backends(cls , ['transformers', 'torch', 'note_seq'] )
| 7 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
A: Tuple = {
"configuration_convnext": ["CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ConvNextConfig", "ConvNextOnnxConfig"]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A: Dict = ["ConvNextFeatureExtractor"]
A: Dict = ["ConvNextImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A: Dict = [
"CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST",
"ConvNextForImageClassification",
"ConvNextModel",
"ConvNextPreTrainedModel",
"ConvNextBackbone",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A: List[str] = [
"TFConvNextForImageClassification",
"TFConvNextModel",
"TFConvNextPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig, ConvNextOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_convnext import ConvNextFeatureExtractor
from .image_processing_convnext import ConvNextImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convnext import (
CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvNextBackbone,
ConvNextForImageClassification,
ConvNextModel,
ConvNextPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel
else:
import sys
A: Any = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 712 |
'''simple docstring'''
def _UpperCAmelCase ( a : str , a : str ) -> float:
"""simple docstring"""
def get_matched_characters(a : str , a : str ) -> str:
lowercase_ : Union[str, Any] = []
lowercase_ : Tuple = min(len(_stra ) , len(_stra ) ) // 2
for i, l in enumerate(_stra ):
lowercase_ : Optional[int] = int(max(0 , i - limit ) )
lowercase_ : Union[str, Any] = int(min(i + limit + 1 , len(_stra ) ) )
if l in _stra[left:right]:
matched.append(a )
lowercase_ : Union[str, Any] = f"{_stra[0:_stra.index(a )]} {_stra[_stra.index(a ) + 1:]}"
return "".join(a )
# matching characters
lowercase_ : Union[str, Any] = get_matched_characters(a , a )
lowercase_ : Optional[Any] = get_matched_characters(a , a )
lowercase_ : Optional[int] = len(a )
# transposition
lowercase_ : Dict = (
len([(ca, ca) for ca, ca in zip(a , a ) if ca != ca] ) // 2
)
if not match_count:
lowercase_ : List[str] = 0.0
else:
lowercase_ : Any = (
1
/ 3
* (
match_count / len(a )
+ match_count / len(a )
+ (match_count - transpositions) / match_count
)
)
# common prefix up to 4 characters
lowercase_ : Optional[Any] = 0
for ca, ca in zip(stra[:4] , stra[:4] ):
if ca == ca:
prefix_len += 1
else:
break
return jaro + 0.1 * prefix_len * (1 - jaro)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(jaro_winkler("hello", "world"))
| 7 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A: str = {
"configuration_clipseg": [
"CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP",
"CLIPSegConfig",
"CLIPSegTextConfig",
"CLIPSegVisionConfig",
],
"processing_clipseg": ["CLIPSegProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A: Union[str, Any] = [
"CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST",
"CLIPSegModel",
"CLIPSegPreTrainedModel",
"CLIPSegTextModel",
"CLIPSegVisionModel",
"CLIPSegForImageSegmentation",
]
if TYPE_CHECKING:
from .configuration_clipseg import (
CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPSegConfig,
CLIPSegTextConfig,
CLIPSegVisionConfig,
)
from .processing_clipseg import CLIPSegProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clipseg import (
CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPSegForImageSegmentation,
CLIPSegModel,
CLIPSegPreTrainedModel,
CLIPSegTextModel,
CLIPSegVisionModel,
)
else:
import sys
A: Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 713 |
'''simple docstring'''
from __future__ import annotations
def _UpperCAmelCase ( a : int = 4 ) -> list[list[int]]:
"""simple docstring"""
lowercase_ : Tuple = abs(a ) or 4
return [[1 + x + y * row_size for x in range(a )] for y in range(a )]
def _UpperCAmelCase ( a : list[list[int]] ) -> list[list[int]]:
"""simple docstring"""
return reverse_row(transpose(a ) )
# OR.. transpose(reverse_column(matrix))
def _UpperCAmelCase ( a : list[list[int]] ) -> list[list[int]]:
"""simple docstring"""
return reverse_row(reverse_column(a ) )
# OR.. reverse_column(reverse_row(matrix))
def _UpperCAmelCase ( a : list[list[int]] ) -> list[list[int]]:
"""simple docstring"""
return reverse_column(transpose(a ) )
# OR.. transpose(reverse_row(matrix))
def _UpperCAmelCase ( a : list[list[int]] ) -> list[list[int]]:
"""simple docstring"""
lowercase_ : Any = [list(a ) for x in zip(*a )]
return matrix
def _UpperCAmelCase ( a : list[list[int]] ) -> list[list[int]]:
"""simple docstring"""
lowercase_ : List[str] = matrix[::-1]
return matrix
def _UpperCAmelCase ( a : list[list[int]] ) -> list[list[int]]:
"""simple docstring"""
lowercase_ : str = [x[::-1] for x in matrix]
return matrix
def _UpperCAmelCase ( a : list[list[int]] ) -> None:
"""simple docstring"""
for i in matrix:
print(*a )
if __name__ == "__main__":
A: Dict = make_matrix()
print("\norigin:\n")
print_matrix(matrix)
print("\nrotate 90 counterclockwise:\n")
print_matrix(rotate_aa(matrix))
A: List[Any] = make_matrix()
print("\norigin:\n")
print_matrix(matrix)
print("\nrotate 180:\n")
print_matrix(rotate_aaa(matrix))
A: List[str] = make_matrix()
print("\norigin:\n")
print_matrix(matrix)
print("\nrotate 270 counterclockwise:\n")
print_matrix(rotate_aaa(matrix))
| 7 | 0 |
'''simple docstring'''
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot import BlenderbotTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
A: List[Any] = logging.get_logger(__name__)
A: Optional[int] = {
"vocab_file": "vocab.json",
"merges_file": "merges.txt",
"tokenizer_config_file": "tokenizer_config.json",
}
A: int = {
"vocab_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json"},
"merges_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt"},
"tokenizer_config_file": {
"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json"
},
}
A: List[str] = {"facebook/blenderbot-3B": 1_2_8}
class __magic_name__ ( UpperCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ : Any = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ['input_ids', 'attention_mask']
SCREAMING_SNAKE_CASE_ : Optional[Any] = BlenderbotTokenizer
def __init__( self , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase="replace" , _lowercase="<s>" , _lowercase="</s>" , _lowercase="</s>" , _lowercase="<s>" , _lowercase="<unk>" , _lowercase="<pad>" , _lowercase="<mask>" , _lowercase=False , _lowercase=True , **_lowercase , ) -> Optional[Any]:
super().__init__(
_lowercase , _lowercase , tokenizer_file=_lowercase , errors=_lowercase , bos_token=_lowercase , eos_token=_lowercase , sep_token=_lowercase , cls_token=_lowercase , unk_token=_lowercase , pad_token=_lowercase , mask_token=_lowercase , add_prefix_space=_lowercase , trim_offsets=_lowercase , **_lowercase , )
lowercase_ : Optional[int] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , _lowercase ) != add_prefix_space:
lowercase_ : str = getattr(_lowercase , pre_tok_state.pop('type' ) )
lowercase_ : str = add_prefix_space
lowercase_ : Any = pre_tok_class(**_lowercase )
lowercase_ : List[Any] = add_prefix_space
lowercase_ : Optional[int] = 'post_processor'
lowercase_ : Optional[int] = getattr(self.backend_tokenizer , _lowercase , _lowercase )
if tokenizer_component_instance:
lowercase_ : Any = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
lowercase_ : Dict = tuple(state['sep'] )
if "cls" in state:
lowercase_ : Dict = tuple(state['cls'] )
lowercase_ : Optional[int] = False
if state.get('add_prefix_space' , _lowercase ) != add_prefix_space:
lowercase_ : int = add_prefix_space
lowercase_ : str = True
if state.get('trim_offsets' , _lowercase ) != trim_offsets:
lowercase_ : Dict = trim_offsets
lowercase_ : Any = True
if changes_to_apply:
lowercase_ : Optional[Any] = getattr(_lowercase , state.pop('type' ) )
lowercase_ : Tuple = component_class(**_lowercase )
setattr(self.backend_tokenizer , _lowercase , _lowercase )
@property
# Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.mask_token with Roberta->Blenderbot, RoBERTa->Blenderbot
def lowerCamelCase__ ( self ) -> str:
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.' )
return None
return str(self._mask_token )
@mask_token.setter
def lowerCamelCase__ ( self , _lowercase ) -> str:
lowercase_ : Dict = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else value
lowercase_ : Any = value
def lowerCamelCase__ ( self , *_lowercase , **_lowercase ) -> BatchEncoding:
lowercase_ : Tuple = kwargs.get('is_split_into_words' , _lowercase )
assert self.add_prefix_space or not is_split_into_words, (
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*_lowercase , **_lowercase )
def lowerCamelCase__ ( self , *_lowercase , **_lowercase ) -> BatchEncoding:
lowercase_ : str = kwargs.get('is_split_into_words' , _lowercase )
assert self.add_prefix_space or not is_split_into_words, (
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._encode_plus(*_lowercase , **_lowercase )
def lowerCamelCase__ ( self , _lowercase , _lowercase = None ) -> Tuple[str]:
lowercase_ : List[str] = self._tokenizer.model.save(_lowercase , name=_lowercase )
return tuple(_lowercase )
def lowerCamelCase__ ( self , _lowercase , _lowercase = None ) -> List[int]:
lowercase_ : Tuple = [self.sep_token_id]
lowercase_ : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCamelCase__ ( self , _lowercase , _lowercase = None ) -> Dict:
return token_ids_a + [self.eos_token_id]
def lowerCamelCase__ ( self , _lowercase ) -> List[int]:
lowercase_ : Dict = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(' ' + text )
else:
# Generated responses should contain them already.
inputs.append(_lowercase )
lowercase_ : Optional[Any] = ' '.join(_lowercase )
lowercase_ : Optional[int] = self.encode(_lowercase )
if len(_lowercase ) > self.model_max_length:
lowercase_ : Dict = input_ids[-self.model_max_length :]
logger.warning(f"Trimmed input from conversation as it was longer than {self.model_max_length} tokens." )
return input_ids | 714 |
'''simple docstring'''
# Usage:
# ./gen-card-facebook-wmt19.py
import os
from pathlib import Path
def _UpperCAmelCase ( a : Dict , a : Optional[int] , a : Tuple ) -> Optional[int]:
"""simple docstring"""
lowercase_ : Any = {
'en': 'Machine learning is great, isn\'t it?',
'ru': 'Машинное обучение - это здорово, не так ли?',
'de': 'Maschinelles Lernen ist großartig, oder?',
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
lowercase_ : List[str] = {
'ru-en': ['[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)', '39.20'],
'en-ru': ['[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)', '33.47'],
'en-de': ['[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)', '42.83'],
'de-en': ['[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)', '41.35'],
}
lowercase_ : Optional[Any] = f"{src_lang}-{tgt_lang}"
lowercase_ : Optional[Any] = f"\n---\nlanguage: \n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt19\n- facebook\nlicense: apache-2.0\ndatasets:\n- wmt19\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.\n\nFor more details, please see, [Facebook FAIR's WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).\n\nThe abbreviation FSMT stands for FairSeqMachineTranslation\n\nAll four models are available:\n\n* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)\n* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)\n* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)\n* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = \"facebook/wmt19-{src_lang}-{tgt_lang}\"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = \"{texts[src_lang]}\"\ninput_ids = tokenizer.encode(input, return_tensors=\"pt\")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n- The original (and this ported model) doesn't seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)\n\n## Training data\n\nPretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).\n\n## Eval results\n\npair | fairseq | transformers\n-------|---------|----------\n{pair} | {scores[pair][0]} | {scores[pair][1]}\n\nThe score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn't support:\n- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).\n- re-ranking\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=15\nmkdir -p $DATA_DIR\nsacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\nnote: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt19/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)\n\n\n### BibTeX entry and citation info\n\n```bibtex\n@inproceedings{{...,\n year={{2020}},\n title={{Facebook FAIR's WMT19 News Translation Task Submission}},\n author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},\n booktitle={{Proc. of WMT}},\n}}\n```\n\n\n## TODO\n\n- port model ensemble (fairseq uses 4 model checkpoints)\n\n"
os.makedirs(a , exist_ok=a )
lowercase_ : int = os.path.join(a , 'README.md' )
print(f"Generating {path}" )
with open(a , 'w' , encoding='utf-8' ) as f:
f.write(a )
# make sure we are under the root of the project
A: List[str] = Path(__file__).resolve().parent.parent.parent
A: List[str] = repo_dir / "model_cards"
for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
A , A , A: Any = model_name.split("-")
A: int = model_cards_dir / "facebook" / model_name
write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
| 7 | 0 |
from collections.abc import Sequence
def _UpperCAmelCase ( a : Sequence[float] , a : bool = False ) -> float:
"""simple docstring"""
if not arr:
return 0
lowercase_ : Dict = 0 if allow_empty_subarrays else float('-inf' )
lowercase_ : str = 0.0
for num in arr:
lowercase_ : int = max(0 if allow_empty_subarrays else num , curr_sum + num )
lowercase_ : Tuple = max(a , a )
return max_sum
if __name__ == "__main__":
from doctest import testmod
testmod()
A: int = [-2, 1, -3, 4, -1, 2, 1, -5, 4]
print(f"""{max_subarray_sum(nums) = }""")
| 715 |
'''simple docstring'''
import json
import logging
import os
import socket
import git
import numpy as np
import torch
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - PID: %(process)d - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
)
A: Tuple = logging.getLogger(__name__)
def _UpperCAmelCase ( a : str ) -> List[Any]:
"""simple docstring"""
lowercase_ : List[str] = git.Repo(search_parent_directories=a )
lowercase_ : Union[str, Any] = {
'repo_id': str(a ),
'repo_sha': str(repo.head.object.hexsha ),
'repo_branch': str(repo.active_branch ),
}
with open(os.path.join(a , 'git_log.json' ) , 'w' ) as f:
json.dump(a , a , indent=4 )
def _UpperCAmelCase ( a : str ) -> Union[str, Any]:
"""simple docstring"""
if params.n_gpu <= 0:
lowercase_ : int = 0
lowercase_ : Union[str, Any] = -1
lowercase_ : List[str] = True
lowercase_ : Optional[Any] = False
return
assert torch.cuda.is_available()
logger.info('Initializing GPUs' )
if params.n_gpu > 1:
assert params.local_rank != -1
lowercase_ : Dict = int(os.environ['WORLD_SIZE'] )
lowercase_ : Union[str, Any] = int(os.environ['N_GPU_NODE'] )
lowercase_ : Optional[int] = int(os.environ['RANK'] )
# number of nodes / node ID
lowercase_ : int = params.world_size // params.n_gpu_per_node
lowercase_ : str = params.global_rank // params.n_gpu_per_node
lowercase_ : Dict = True
assert params.n_nodes == int(os.environ['N_NODES'] )
assert params.node_id == int(os.environ['NODE_RANK'] )
# local job (single GPU)
else:
assert params.local_rank == -1
lowercase_ : str = 1
lowercase_ : Dict = 0
lowercase_ : Tuple = 0
lowercase_ : List[Any] = 0
lowercase_ : int = 1
lowercase_ : Tuple = 1
lowercase_ : str = False
# sanity checks
assert params.n_nodes >= 1
assert 0 <= params.node_id < params.n_nodes
assert 0 <= params.local_rank <= params.global_rank < params.world_size
assert params.world_size == params.n_nodes * params.n_gpu_per_node
# define whether this is the master process / if we are in multi-node distributed mode
lowercase_ : List[str] = params.node_id == 0 and params.local_rank == 0
lowercase_ : Optional[Any] = params.n_nodes > 1
# summary
lowercase_ : int = f"--- Global rank: {params.global_rank} - "
logger.info(PREFIX + 'Number of nodes: %i' % params.n_nodes )
logger.info(PREFIX + 'Node ID : %i' % params.node_id )
logger.info(PREFIX + 'Local rank : %i' % params.local_rank )
logger.info(PREFIX + 'World size : %i' % params.world_size )
logger.info(PREFIX + 'GPUs per node : %i' % params.n_gpu_per_node )
logger.info(PREFIX + 'Master : %s' % str(params.is_master ) )
logger.info(PREFIX + 'Multi-node : %s' % str(params.multi_node ) )
logger.info(PREFIX + 'Multi-GPU : %s' % str(params.multi_gpu ) )
logger.info(PREFIX + 'Hostname : %s' % socket.gethostname() )
# set GPU device
torch.cuda.set_device(params.local_rank )
# initialize multi-GPU
if params.multi_gpu:
logger.info('Initializing PyTorch distributed' )
torch.distributed.init_process_group(
init_method='env://' , backend='nccl' , )
def _UpperCAmelCase ( a : Dict ) -> Optional[int]:
"""simple docstring"""
np.random.seed(args.seed )
torch.manual_seed(args.seed )
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed )
| 7 | 0 |
'''simple docstring'''
from typing import Dict, Optional
import numpy as np
import datasets
A: int = "\nIoU is the area of overlap between the predicted segmentation and the ground truth divided by the area of union\nbetween the predicted segmentation and the ground truth. For binary (two classes) or multi-class segmentation,\nthe mean IoU of the image is calculated by taking the IoU of each class and averaging them.\n"
A: List[str] = "\nArgs:\n predictions (`List[ndarray]`):\n List of predicted segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n references (`List[ndarray]`):\n List of ground truth segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n num_labels (`int`):\n Number of classes (categories).\n ignore_index (`int`):\n Index that will be ignored during evaluation.\n nan_to_num (`int`, *optional*):\n If specified, NaN values will be replaced by the number defined by the user.\n label_map (`dict`, *optional*):\n If specified, dictionary mapping old label indices to new label indices.\n reduce_labels (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background,\n and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by 255.\n\nReturns:\n `Dict[str, float | ndarray]` comprising various elements:\n - *mean_iou* (`float`):\n Mean Intersection-over-Union (IoU averaged over all categories).\n - *mean_accuracy* (`float`):\n Mean accuracy (averaged over all categories).\n - *overall_accuracy* (`float`):\n Overall accuracy on all images.\n - *per_category_accuracy* (`ndarray` of shape `(num_labels,)`):\n Per category accuracy.\n - *per_category_iou* (`ndarray` of shape `(num_labels,)`):\n Per category IoU.\n\nExamples:\n\n >>> import numpy as np\n\n >>> mean_iou = datasets.load_metric(\"mean_iou\")\n\n >>> # suppose one has 3 different segmentation maps predicted\n >>> predicted_1 = np.array([[1, 2], [3, 4], [5, 255]])\n >>> actual_1 = np.array([[0, 3], [5, 4], [6, 255]])\n\n >>> predicted_2 = np.array([[2, 7], [9, 2], [3, 6]])\n >>> actual_2 = np.array([[1, 7], [9, 2], [3, 6]])\n\n >>> predicted_3 = np.array([[2, 2, 3], [8, 2, 4], [3, 255, 2]])\n >>> actual_3 = np.array([[1, 2, 2], [8, 2, 1], [3, 255, 1]])\n\n >>> predicted = [predicted_1, predicted_2, predicted_3]\n >>> ground_truth = [actual_1, actual_2, actual_3]\n\n >>> results = mean_iou.compute(predictions=predicted, references=ground_truth, num_labels=10, ignore_index=255, reduce_labels=False)\n >>> print(results) # doctest: +NORMALIZE_WHITESPACE\n {'mean_iou': 0.47750000000000004, 'mean_accuracy': 0.5916666666666666, 'overall_accuracy': 0.5263157894736842, 'per_category_iou': array([0. , 0. , 0.375, 0.4 , 0.5 , 0. , 0.5 , 1. , 1. , 1. ]), 'per_category_accuracy': array([0. , 0. , 0.75 , 0.66666667, 1. , 0. , 0.5 , 1. , 1. , 1. ])}\n"
A: Union[str, Any] = "\\n@software{MMSegmentation_Contributors_OpenMMLab_Semantic_Segmentation_2020,\nauthor = {{MMSegmentation Contributors}},\nlicense = {Apache-2.0},\nmonth = {7},\ntitle = {{OpenMMLab Semantic Segmentation Toolbox and Benchmark}},\nurl = {https://github.com/open-mmlab/mmsegmentation},\nyear = {2020}\n}"
def _UpperCAmelCase ( a : str , a : Union[str, Any] , a : Dict , a : bool , a : Optional[Dict[int, int]] = None , a : bool = False , ) -> str:
"""simple docstring"""
if label_map is not None:
for old_id, new_id in label_map.items():
lowercase_ : Union[str, Any] = new_id
# turn into Numpy arrays
lowercase_ : List[Any] = np.array(a )
lowercase_ : Optional[Any] = np.array(a )
if reduce_labels:
lowercase_ : Any = 2_5_5
lowercase_ : Dict = label - 1
lowercase_ : List[Any] = 2_5_5
lowercase_ : Any = label != ignore_index
lowercase_ : List[Any] = np.not_equal(a , a )
lowercase_ : Optional[int] = pred_label[mask]
lowercase_ : Union[str, Any] = np.array(a )[mask]
lowercase_ : Optional[int] = pred_label[pred_label == label]
lowercase_ : Optional[int] = np.histogram(a , bins=a , range=(0, num_labels - 1) )[0]
lowercase_ : Optional[int] = np.histogram(a , bins=a , range=(0, num_labels - 1) )[0]
lowercase_ : Dict = np.histogram(a , bins=a , range=(0, num_labels - 1) )[0]
lowercase_ : Optional[Any] = area_pred_label + area_label - area_intersect
return area_intersect, area_union, area_pred_label, area_label
def _UpperCAmelCase ( a : int , a : Optional[Any] , a : Optional[int] , a : bool , a : Optional[Dict[int, int]] = None , a : bool = False , ) -> Dict:
"""simple docstring"""
lowercase_ : Dict = np.zeros((num_labels,) , dtype=np.floataa )
lowercase_ : List[str] = np.zeros((num_labels,) , dtype=np.floataa )
lowercase_ : List[Any] = np.zeros((num_labels,) , dtype=np.floataa )
lowercase_ : str = np.zeros((num_labels,) , dtype=np.floataa )
for result, gt_seg_map in zip(a , a ):
lowercase_ : Tuple = intersect_and_union(
a , a , a , a , a , a )
total_area_intersect += area_intersect
total_area_union += area_union
total_area_pred_label += area_pred_label
total_area_label += area_label
return total_area_intersect, total_area_union, total_area_pred_label, total_area_label
def _UpperCAmelCase ( a : Optional[Any] , a : List[str] , a : Optional[Any] , a : bool , a : Optional[int] = None , a : Optional[Dict[int, int]] = None , a : bool = False , ) -> Optional[int]:
"""simple docstring"""
lowercase_ : Optional[Any] = total_intersect_and_union(
a , a , a , a , a , a )
# compute metrics
lowercase_ : str = {}
lowercase_ : str = total_area_intersect.sum() / total_area_label.sum()
lowercase_ : Optional[Any] = total_area_intersect / total_area_union
lowercase_ : List[Any] = total_area_intersect / total_area_label
lowercase_ : Any = np.nanmean(a )
lowercase_ : Optional[Any] = np.nanmean(a )
lowercase_ : int = all_acc
lowercase_ : Union[str, Any] = iou
lowercase_ : Optional[Any] = acc
if nan_to_num is not None:
lowercase_ : Optional[int] = {metric: np.nan_to_num(a , nan=a ) for metric, metric_value in metrics.items()}
return metrics
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class __magic_name__ ( datasets.Metric ):
"""simple docstring"""
def lowerCamelCase__ ( self ) -> int:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
# 1st Seq - height dim, 2nd - width dim
{
'predictions': datasets.Sequence(datasets.Sequence(datasets.Value('uint16' ) ) ),
'references': datasets.Sequence(datasets.Sequence(datasets.Value('uint16' ) ) ),
} ) , reference_urls=[
'https://github.com/open-mmlab/mmsegmentation/blob/71c201b1813267d78764f306a297ca717827c4bf/mmseg/core/evaluation/metrics.py'
] , )
def lowerCamelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase = None , _lowercase = None , _lowercase = False , ) -> Tuple:
lowercase_ : Optional[int] = mean_iou(
results=_lowercase , gt_seg_maps=_lowercase , num_labels=_lowercase , ignore_index=_lowercase , nan_to_num=_lowercase , label_map=_lowercase , reduce_labels=_lowercase , )
return iou_result
| 716 |
'''simple docstring'''
import os
from distutils.util import strtobool
def _UpperCAmelCase ( a : Any , a : int ) -> Any:
"""simple docstring"""
for e in env_keys:
lowercase_ : Optional[Any] = int(os.environ.get(a , -1 ) )
if val >= 0:
return val
return default
def _UpperCAmelCase ( a : List[Any] , a : Dict=False ) -> Optional[Any]:
"""simple docstring"""
lowercase_ : Optional[int] = os.environ.get(a , str(a ) )
return strtobool(a ) == 1 # As its name indicates `strtobool` actually returns an int...
def _UpperCAmelCase ( a : List[Any] , a : Dict="no" ) -> str:
"""simple docstring"""
lowercase_ : List[Any] = os.environ.get(a , str(a ) )
return value
| 7 | 0 |
'''simple docstring'''
import argparse
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoTokenizer, RobertaPreLayerNormConfig, RobertaPreLayerNormForMaskedLM
from transformers.utils import logging
logging.set_verbosity_info()
A: Union[str, Any] = logging.get_logger(__name__)
def _UpperCAmelCase ( a : str , a : str ) -> Union[str, Any]:
"""simple docstring"""
lowercase_ : Dict = RobertaPreLayerNormConfig.from_pretrained(
a , architectures=['RobertaPreLayerNormForMaskedLM'] )
# convert state_dict
lowercase_ : Dict = torch.load(hf_hub_download(repo_id=a , filename='pytorch_model.bin' ) )
lowercase_ : Union[str, Any] = {}
for tensor_key, tensor_value in original_state_dict.items():
# The transformer implementation gives the model a unique name, rather than overwiriting 'roberta'
if tensor_key.startswith('roberta.' ):
lowercase_ : Optional[Any] = 'roberta_prelayernorm.' + tensor_key[len('roberta.' ) :]
# The original implementation contains weights which are not used, remove them from the state_dict
if tensor_key.endswith('.self.LayerNorm.weight' ) or tensor_key.endswith('.self.LayerNorm.bias' ):
continue
lowercase_ : int = tensor_value
lowercase_ : str = RobertaPreLayerNormForMaskedLM.from_pretrained(
pretrained_model_name_or_path=a , config=a , state_dict=a )
model.save_pretrained(a )
# convert tokenizer
lowercase_ : List[Any] = AutoTokenizer.from_pretrained(a )
tokenizer.save_pretrained(a )
if __name__ == "__main__":
A: str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint-repo",
default=None,
type=str,
required=True,
help="Path the official PyTorch dump, e.g. 'andreasmadsen/efficient_mlm_m0.40'.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
A: str = parser.parse_args()
convert_roberta_prelayernorm_checkpoint_to_pytorch(args.checkpoint_repo, args.pytorch_dump_folder_path)
| 717 |
'''simple docstring'''
from typing import Dict, Optional
import numpy as np
import datasets
A: int = "\nIoU is the area of overlap between the predicted segmentation and the ground truth divided by the area of union\nbetween the predicted segmentation and the ground truth. For binary (two classes) or multi-class segmentation,\nthe mean IoU of the image is calculated by taking the IoU of each class and averaging them.\n"
A: List[str] = "\nArgs:\n predictions (`List[ndarray]`):\n List of predicted segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n references (`List[ndarray]`):\n List of ground truth segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n num_labels (`int`):\n Number of classes (categories).\n ignore_index (`int`):\n Index that will be ignored during evaluation.\n nan_to_num (`int`, *optional*):\n If specified, NaN values will be replaced by the number defined by the user.\n label_map (`dict`, *optional*):\n If specified, dictionary mapping old label indices to new label indices.\n reduce_labels (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background,\n and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by 255.\n\nReturns:\n `Dict[str, float | ndarray]` comprising various elements:\n - *mean_iou* (`float`):\n Mean Intersection-over-Union (IoU averaged over all categories).\n - *mean_accuracy* (`float`):\n Mean accuracy (averaged over all categories).\n - *overall_accuracy* (`float`):\n Overall accuracy on all images.\n - *per_category_accuracy* (`ndarray` of shape `(num_labels,)`):\n Per category accuracy.\n - *per_category_iou* (`ndarray` of shape `(num_labels,)`):\n Per category IoU.\n\nExamples:\n\n >>> import numpy as np\n\n >>> mean_iou = datasets.load_metric(\"mean_iou\")\n\n >>> # suppose one has 3 different segmentation maps predicted\n >>> predicted_1 = np.array([[1, 2], [3, 4], [5, 255]])\n >>> actual_1 = np.array([[0, 3], [5, 4], [6, 255]])\n\n >>> predicted_2 = np.array([[2, 7], [9, 2], [3, 6]])\n >>> actual_2 = np.array([[1, 7], [9, 2], [3, 6]])\n\n >>> predicted_3 = np.array([[2, 2, 3], [8, 2, 4], [3, 255, 2]])\n >>> actual_3 = np.array([[1, 2, 2], [8, 2, 1], [3, 255, 1]])\n\n >>> predicted = [predicted_1, predicted_2, predicted_3]\n >>> ground_truth = [actual_1, actual_2, actual_3]\n\n >>> results = mean_iou.compute(predictions=predicted, references=ground_truth, num_labels=10, ignore_index=255, reduce_labels=False)\n >>> print(results) # doctest: +NORMALIZE_WHITESPACE\n {'mean_iou': 0.47750000000000004, 'mean_accuracy': 0.5916666666666666, 'overall_accuracy': 0.5263157894736842, 'per_category_iou': array([0. , 0. , 0.375, 0.4 , 0.5 , 0. , 0.5 , 1. , 1. , 1. ]), 'per_category_accuracy': array([0. , 0. , 0.75 , 0.66666667, 1. , 0. , 0.5 , 1. , 1. , 1. ])}\n"
A: Union[str, Any] = "\\n@software{MMSegmentation_Contributors_OpenMMLab_Semantic_Segmentation_2020,\nauthor = {{MMSegmentation Contributors}},\nlicense = {Apache-2.0},\nmonth = {7},\ntitle = {{OpenMMLab Semantic Segmentation Toolbox and Benchmark}},\nurl = {https://github.com/open-mmlab/mmsegmentation},\nyear = {2020}\n}"
def _UpperCAmelCase ( a : str , a : Union[str, Any] , a : Dict , a : bool , a : Optional[Dict[int, int]] = None , a : bool = False , ) -> str:
"""simple docstring"""
if label_map is not None:
for old_id, new_id in label_map.items():
lowercase_ : Union[str, Any] = new_id
# turn into Numpy arrays
lowercase_ : List[Any] = np.array(a )
lowercase_ : Optional[Any] = np.array(a )
if reduce_labels:
lowercase_ : Any = 2_5_5
lowercase_ : Dict = label - 1
lowercase_ : List[Any] = 2_5_5
lowercase_ : Any = label != ignore_index
lowercase_ : List[Any] = np.not_equal(a , a )
lowercase_ : Optional[int] = pred_label[mask]
lowercase_ : Union[str, Any] = np.array(a )[mask]
lowercase_ : Optional[int] = pred_label[pred_label == label]
lowercase_ : Optional[int] = np.histogram(a , bins=a , range=(0, num_labels - 1) )[0]
lowercase_ : Optional[int] = np.histogram(a , bins=a , range=(0, num_labels - 1) )[0]
lowercase_ : Dict = np.histogram(a , bins=a , range=(0, num_labels - 1) )[0]
lowercase_ : Optional[Any] = area_pred_label + area_label - area_intersect
return area_intersect, area_union, area_pred_label, area_label
def _UpperCAmelCase ( a : int , a : Optional[Any] , a : Optional[int] , a : bool , a : Optional[Dict[int, int]] = None , a : bool = False , ) -> Dict:
"""simple docstring"""
lowercase_ : Dict = np.zeros((num_labels,) , dtype=np.floataa )
lowercase_ : List[str] = np.zeros((num_labels,) , dtype=np.floataa )
lowercase_ : List[Any] = np.zeros((num_labels,) , dtype=np.floataa )
lowercase_ : str = np.zeros((num_labels,) , dtype=np.floataa )
for result, gt_seg_map in zip(a , a ):
lowercase_ , lowercase_ , lowercase_ , lowercase_ : Tuple = intersect_and_union(
a , a , a , a , a , a )
total_area_intersect += area_intersect
total_area_union += area_union
total_area_pred_label += area_pred_label
total_area_label += area_label
return total_area_intersect, total_area_union, total_area_pred_label, total_area_label
def _UpperCAmelCase ( a : Optional[Any] , a : List[str] , a : Optional[Any] , a : bool , a : Optional[int] = None , a : Optional[Dict[int, int]] = None , a : bool = False , ) -> Optional[int]:
"""simple docstring"""
lowercase_ , lowercase_ , lowercase_ , lowercase_ : Optional[Any] = total_intersect_and_union(
a , a , a , a , a , a )
# compute metrics
lowercase_ : str = {}
lowercase_ : str = total_area_intersect.sum() / total_area_label.sum()
lowercase_ : Optional[Any] = total_area_intersect / total_area_union
lowercase_ : List[Any] = total_area_intersect / total_area_label
lowercase_ : Any = np.nanmean(a )
lowercase_ : Optional[Any] = np.nanmean(a )
lowercase_ : int = all_acc
lowercase_ : Union[str, Any] = iou
lowercase_ : Optional[Any] = acc
if nan_to_num is not None:
lowercase_ : Optional[int] = {metric: np.nan_to_num(a , nan=a ) for metric, metric_value in metrics.items()}
return metrics
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class __magic_name__ ( datasets.Metric ):
"""simple docstring"""
def lowerCamelCase__ ( self ) -> int:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
# 1st Seq - height dim, 2nd - width dim
{
'predictions': datasets.Sequence(datasets.Sequence(datasets.Value('uint16' ) ) ),
'references': datasets.Sequence(datasets.Sequence(datasets.Value('uint16' ) ) ),
} ) , reference_urls=[
'https://github.com/open-mmlab/mmsegmentation/blob/71c201b1813267d78764f306a297ca717827c4bf/mmseg/core/evaluation/metrics.py'
] , )
def lowerCamelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase = None , _lowercase = None , _lowercase = False , ) -> Tuple:
lowercase_ : Optional[int] = mean_iou(
results=_lowercase , gt_seg_maps=_lowercase , num_labels=_lowercase , ignore_index=_lowercase , nan_to_num=_lowercase , label_map=_lowercase , reduce_labels=_lowercase , )
return iou_result
| 7 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
class __magic_name__ ( UpperCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 'bert-generation'
def __init__( self , _lowercase=5_0358 , _lowercase=1024 , _lowercase=24 , _lowercase=16 , _lowercase=4096 , _lowercase="gelu" , _lowercase=0.1 , _lowercase=0.1 , _lowercase=512 , _lowercase=0.02 , _lowercase=1E-1_2 , _lowercase=0 , _lowercase=2 , _lowercase=1 , _lowercase="absolute" , _lowercase=True , **_lowercase , ) -> Optional[Any]:
super().__init__(pad_token_id=_lowercase , bos_token_id=_lowercase , eos_token_id=_lowercase , **_lowercase )
lowercase_ : Any = vocab_size
lowercase_ : List[Any] = hidden_size
lowercase_ : Any = num_hidden_layers
lowercase_ : List[Any] = num_attention_heads
lowercase_ : Dict = hidden_act
lowercase_ : Union[str, Any] = intermediate_size
lowercase_ : str = hidden_dropout_prob
lowercase_ : Any = attention_probs_dropout_prob
lowercase_ : Union[str, Any] = max_position_embeddings
lowercase_ : str = initializer_range
lowercase_ : Tuple = layer_norm_eps
lowercase_ : Dict = position_embedding_type
lowercase_ : int = use_cache
| 718 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A: Dict = logging.get_logger(__name__)
A: Optional[Any] = {
"google/vit-base-patch16-224": "https://huggingface.co/vit-base-patch16-224/resolve/main/config.json",
# See all ViT models at https://huggingface.co/models?filter=vit
}
class __magic_name__ ( UpperCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = 'vit'
def __init__( self , _lowercase=768 , _lowercase=12 , _lowercase=12 , _lowercase=3072 , _lowercase="gelu" , _lowercase=0.0 , _lowercase=0.0 , _lowercase=0.02 , _lowercase=1E-1_2 , _lowercase=224 , _lowercase=16 , _lowercase=3 , _lowercase=True , _lowercase=16 , **_lowercase , ) -> List[str]:
super().__init__(**_lowercase )
lowercase_ : Optional[int] = hidden_size
lowercase_ : str = num_hidden_layers
lowercase_ : str = num_attention_heads
lowercase_ : int = intermediate_size
lowercase_ : List[Any] = hidden_act
lowercase_ : Any = hidden_dropout_prob
lowercase_ : List[str] = attention_probs_dropout_prob
lowercase_ : str = initializer_range
lowercase_ : List[str] = layer_norm_eps
lowercase_ : Any = image_size
lowercase_ : Tuple = patch_size
lowercase_ : Optional[Any] = num_channels
lowercase_ : str = qkv_bias
lowercase_ : List[str] = encoder_stride
class __magic_name__ ( UpperCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = version.parse('1.11' )
@property
def lowerCamelCase__ ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def lowerCamelCase__ ( self ) -> float:
return 1E-4
| 7 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
A: Optional[int] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A: int = ["MLukeTokenizer"]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mluke import MLukeTokenizer
else:
import sys
A: Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 719 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A: int = logging.get_logger(__name__)
A: int = {
"bigcode/gpt_bigcode-santacoder": "https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json",
}
class __magic_name__ ( UpperCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = 'gpt_bigcode'
SCREAMING_SNAKE_CASE_ : int = ['past_key_values']
SCREAMING_SNAKE_CASE_ : Any = {
'hidden_size': 'n_embd',
'max_position_embeddings': 'n_positions',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self , _lowercase=5_0257 , _lowercase=1024 , _lowercase=768 , _lowercase=12 , _lowercase=12 , _lowercase=None , _lowercase="gelu_pytorch_tanh" , _lowercase=0.1 , _lowercase=0.1 , _lowercase=0.1 , _lowercase=1E-5 , _lowercase=0.02 , _lowercase=True , _lowercase=True , _lowercase=5_0256 , _lowercase=5_0256 , _lowercase=True , _lowercase=True , _lowercase=True , **_lowercase , ) -> Any:
lowercase_ : Tuple = vocab_size
lowercase_ : str = n_positions
lowercase_ : List[str] = n_embd
lowercase_ : str = n_layer
lowercase_ : Optional[Any] = n_head
lowercase_ : Optional[int] = n_inner
lowercase_ : Union[str, Any] = activation_function
lowercase_ : Dict = resid_pdrop
lowercase_ : str = embd_pdrop
lowercase_ : Optional[Any] = attn_pdrop
lowercase_ : List[Any] = layer_norm_epsilon
lowercase_ : Optional[int] = initializer_range
lowercase_ : List[Any] = scale_attn_weights
lowercase_ : Any = use_cache
lowercase_ : List[str] = attention_softmax_in_fpaa
lowercase_ : Any = scale_attention_softmax_in_fpaa
lowercase_ : Optional[Any] = multi_query
lowercase_ : Optional[Any] = bos_token_id
lowercase_ : Optional[Any] = eos_token_id
super().__init__(bos_token_id=_lowercase , eos_token_id=_lowercase , **_lowercase )
| 7 | 0 |
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
A: Tuple = logging.get_logger(__name__)
class __magic_name__ ( UpperCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = 'upernet'
def __init__( self , _lowercase=None , _lowercase=512 , _lowercase=0.02 , _lowercase=[1, 2, 3, 6] , _lowercase=True , _lowercase=0.4 , _lowercase=384 , _lowercase=256 , _lowercase=1 , _lowercase=False , _lowercase=255 , **_lowercase , ) -> List[str]:
super().__init__(**_lowercase )
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.' )
lowercase_ : str = CONFIG_MAPPING['resnet'](out_features=['stage1', 'stage2', 'stage3', 'stage4'] )
elif isinstance(_lowercase , _lowercase ):
lowercase_ : Any = backbone_config.get('model_type' )
lowercase_ : List[str] = CONFIG_MAPPING[backbone_model_type]
lowercase_ : Tuple = config_class.from_dict(_lowercase )
lowercase_ : int = backbone_config
lowercase_ : Union[str, Any] = hidden_size
lowercase_ : str = initializer_range
lowercase_ : Optional[Any] = pool_scales
lowercase_ : Union[str, Any] = use_auxiliary_head
lowercase_ : Dict = auxiliary_loss_weight
lowercase_ : List[str] = auxiliary_in_channels
lowercase_ : List[str] = auxiliary_channels
lowercase_ : Optional[Any] = auxiliary_num_convs
lowercase_ : Tuple = auxiliary_concat_input
lowercase_ : Any = loss_ignore_index
def lowerCamelCase__ ( self ) -> Optional[Any]:
lowercase_ : Tuple = copy.deepcopy(self.__dict__ )
lowercase_ : int = self.backbone_config.to_dict()
lowercase_ : Union[str, Any] = self.__class__.model_type
return output
| 720 |
'''simple docstring'''
import unittest
from diffusers.pipelines.pipeline_utils import is_safetensors_compatible
class __magic_name__ ( unittest.TestCase ):
"""simple docstring"""
def lowerCamelCase__ ( self ) -> Optional[Any]:
lowercase_ : Tuple = [
'safety_checker/pytorch_model.bin',
'safety_checker/model.safetensors',
'vae/diffusion_pytorch_model.bin',
'vae/diffusion_pytorch_model.safetensors',
'text_encoder/pytorch_model.bin',
'text_encoder/model.safetensors',
'unet/diffusion_pytorch_model.bin',
'unet/diffusion_pytorch_model.safetensors',
]
self.assertTrue(is_safetensors_compatible(_lowercase ) )
def lowerCamelCase__ ( self ) -> Dict:
lowercase_ : Tuple = [
'unet/diffusion_pytorch_model.bin',
'unet/diffusion_pytorch_model.safetensors',
]
self.assertTrue(is_safetensors_compatible(_lowercase ) )
def lowerCamelCase__ ( self ) -> str:
lowercase_ : int = [
'safety_checker/pytorch_model.bin',
'safety_checker/model.safetensors',
'vae/diffusion_pytorch_model.bin',
'vae/diffusion_pytorch_model.safetensors',
'text_encoder/pytorch_model.bin',
'text_encoder/model.safetensors',
'unet/diffusion_pytorch_model.bin',
# Removed: 'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(_lowercase ) )
def lowerCamelCase__ ( self ) -> Any:
lowercase_ : Dict = [
'text_encoder/pytorch_model.bin',
'text_encoder/model.safetensors',
]
self.assertTrue(is_safetensors_compatible(_lowercase ) )
def lowerCamelCase__ ( self ) -> Optional[Any]:
lowercase_ : List[Any] = [
'safety_checker/pytorch_model.bin',
'safety_checker/model.safetensors',
'vae/diffusion_pytorch_model.bin',
'vae/diffusion_pytorch_model.safetensors',
'text_encoder/pytorch_model.bin',
# Removed: 'text_encoder/model.safetensors',
'unet/diffusion_pytorch_model.bin',
'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(_lowercase ) )
def lowerCamelCase__ ( self ) -> Optional[int]:
lowercase_ : str = [
'safety_checker/pytorch_model.fp16.bin',
'safety_checker/model.fp16.safetensors',
'vae/diffusion_pytorch_model.fp16.bin',
'vae/diffusion_pytorch_model.fp16.safetensors',
'text_encoder/pytorch_model.fp16.bin',
'text_encoder/model.fp16.safetensors',
'unet/diffusion_pytorch_model.fp16.bin',
'unet/diffusion_pytorch_model.fp16.safetensors',
]
lowercase_ : Union[str, Any] = 'fp16'
self.assertTrue(is_safetensors_compatible(_lowercase , variant=_lowercase ) )
def lowerCamelCase__ ( self ) -> int:
lowercase_ : Optional[int] = [
'unet/diffusion_pytorch_model.fp16.bin',
'unet/diffusion_pytorch_model.fp16.safetensors',
]
lowercase_ : Dict = 'fp16'
self.assertTrue(is_safetensors_compatible(_lowercase , variant=_lowercase ) )
def lowerCamelCase__ ( self ) -> int:
# pass variant but use the non-variant filenames
lowercase_ : Optional[int] = [
'unet/diffusion_pytorch_model.bin',
'unet/diffusion_pytorch_model.safetensors',
]
lowercase_ : Dict = 'fp16'
self.assertTrue(is_safetensors_compatible(_lowercase , variant=_lowercase ) )
def lowerCamelCase__ ( self ) -> Union[str, Any]:
lowercase_ : int = [
'safety_checker/pytorch_model.fp16.bin',
'safety_checker/model.fp16.safetensors',
'vae/diffusion_pytorch_model.fp16.bin',
'vae/diffusion_pytorch_model.fp16.safetensors',
'text_encoder/pytorch_model.fp16.bin',
'text_encoder/model.fp16.safetensors',
'unet/diffusion_pytorch_model.fp16.bin',
# Removed: 'unet/diffusion_pytorch_model.fp16.safetensors',
]
lowercase_ : Dict = 'fp16'
self.assertFalse(is_safetensors_compatible(_lowercase , variant=_lowercase ) )
def lowerCamelCase__ ( self ) -> int:
lowercase_ : str = [
'text_encoder/pytorch_model.fp16.bin',
'text_encoder/model.fp16.safetensors',
]
lowercase_ : str = 'fp16'
self.assertTrue(is_safetensors_compatible(_lowercase , variant=_lowercase ) )
def lowerCamelCase__ ( self ) -> List[str]:
# pass variant but use the non-variant filenames
lowercase_ : List[Any] = [
'text_encoder/pytorch_model.bin',
'text_encoder/model.safetensors',
]
lowercase_ : Dict = 'fp16'
self.assertTrue(is_safetensors_compatible(_lowercase , variant=_lowercase ) )
def lowerCamelCase__ ( self ) -> List[str]:
lowercase_ : Union[str, Any] = [
'safety_checker/pytorch_model.fp16.bin',
'safety_checker/model.fp16.safetensors',
'vae/diffusion_pytorch_model.fp16.bin',
'vae/diffusion_pytorch_model.fp16.safetensors',
'text_encoder/pytorch_model.fp16.bin',
# 'text_encoder/model.fp16.safetensors',
'unet/diffusion_pytorch_model.fp16.bin',
'unet/diffusion_pytorch_model.fp16.safetensors',
]
lowercase_ : Dict = 'fp16'
self.assertFalse(is_safetensors_compatible(_lowercase , variant=_lowercase ) )
| 7 | 0 |
'''simple docstring'''
from collections import deque
class __magic_name__ :
"""simple docstring"""
def __init__( self , _lowercase , _lowercase , _lowercase ) -> None:
lowercase_ : List[str] = process_name # process name
lowercase_ : Dict = arrival_time # arrival time of the process
# completion time of finished process or last interrupted time
lowercase_ : int = arrival_time
lowercase_ : List[str] = burst_time # remaining burst time
lowercase_ : Union[str, Any] = 0 # total time of the process wait in ready queue
lowercase_ : List[str] = 0 # time from arrival time to completion time
class __magic_name__ :
"""simple docstring"""
def __init__( self , _lowercase , _lowercase , _lowercase , _lowercase , ) -> None:
# total number of mlfq's queues
lowercase_ : Any = number_of_queues
# time slice of queues that round robin algorithm applied
lowercase_ : Union[str, Any] = time_slices
# unfinished process is in this ready_queue
lowercase_ : int = queue
# current time
lowercase_ : int = current_time
# finished process is in this sequence queue
lowercase_ : deque[Process] = deque()
def lowerCamelCase__ ( self ) -> list[str]:
lowercase_ : List[Any] = []
for i in range(len(self.finish_queue ) ):
sequence.append(self.finish_queue[i].process_name )
return sequence
def lowerCamelCase__ ( self , _lowercase ) -> list[int]:
lowercase_ : Dict = []
for i in range(len(_lowercase ) ):
waiting_times.append(queue[i].waiting_time )
return waiting_times
def lowerCamelCase__ ( self , _lowercase ) -> list[int]:
lowercase_ : Optional[int] = []
for i in range(len(_lowercase ) ):
turnaround_times.append(queue[i].turnaround_time )
return turnaround_times
def lowerCamelCase__ ( self , _lowercase ) -> list[int]:
lowercase_ : int = []
for i in range(len(_lowercase ) ):
completion_times.append(queue[i].stop_time )
return completion_times
def lowerCamelCase__ ( self , _lowercase ) -> list[int]:
return [q.burst_time for q in queue]
def lowerCamelCase__ ( self , _lowercase ) -> int:
process.waiting_time += self.current_time - process.stop_time
return process.waiting_time
def lowerCamelCase__ ( self , _lowercase ) -> deque[Process]:
lowercase_ : deque[Process] = deque() # sequence deque of finished process
while len(_lowercase ) != 0:
lowercase_ : Any = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of current process
self.update_waiting_time(_lowercase )
# update current time
self.current_time += cp.burst_time
# finish the process and set the process's burst-time 0
lowercase_ : Any = 0
# set the process's turnaround time because it is finished
lowercase_ : Optional[Any] = self.current_time - cp.arrival_time
# set the completion time
lowercase_ : Dict = self.current_time
# add the process to queue that has finished queue
finished.append(_lowercase )
self.finish_queue.extend(_lowercase ) # add finished process to finish queue
# FCFS will finish all remaining processes
return finished
def lowerCamelCase__ ( self , _lowercase , _lowercase ) -> tuple[deque[Process], deque[Process]]:
lowercase_ : deque[Process] = deque() # sequence deque of terminated process
# just for 1 cycle and unfinished processes will go back to queue
for _ in range(len(_lowercase ) ):
lowercase_ : Dict = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of unfinished processes
self.update_waiting_time(_lowercase )
# if the burst time of process is bigger than time-slice
if cp.burst_time > time_slice:
# use CPU for only time-slice
self.current_time += time_slice
# update remaining burst time
cp.burst_time -= time_slice
# update end point time
lowercase_ : List[str] = self.current_time
# locate the process behind the queue because it is not finished
ready_queue.append(_lowercase )
else:
# use CPU for remaining burst time
self.current_time += cp.burst_time
# set burst time 0 because the process is finished
lowercase_ : Optional[Any] = 0
# set the finish time
lowercase_ : List[str] = self.current_time
# update the process' turnaround time because it is finished
lowercase_ : str = self.current_time - cp.arrival_time
# add the process to queue that has finished queue
finished.append(_lowercase )
self.finish_queue.extend(_lowercase ) # add finished process to finish queue
# return finished processes queue and remaining processes queue
return finished, ready_queue
def lowerCamelCase__ ( self ) -> deque[Process]:
# all queues except last one have round_robin algorithm
for i in range(self.number_of_queues - 1 ):
lowercase_ : int = self.round_robin(
self.ready_queue , self.time_slices[i] )
# the last queue has first_come_first_served algorithm
self.first_come_first_served(self.ready_queue )
return self.finish_queue
if __name__ == "__main__":
import doctest
A: List[str] = Process("P1", 0, 5_3)
A: List[str] = Process("P2", 0, 1_7)
A: Dict = Process("P3", 0, 6_8)
A: Union[str, Any] = Process("P4", 0, 2_4)
A: Any = 3
A: Optional[Any] = [1_7, 2_5]
A: List[str] = deque([Pa, Pa, Pa, Pa])
if len(time_slices) != number_of_queues - 1:
raise SystemExit(0)
doctest.testmod(extraglobs={"queue": deque([Pa, Pa, Pa, Pa])})
A: Optional[Any] = Process("P1", 0, 5_3)
A: Tuple = Process("P2", 0, 1_7)
A: Optional[int] = Process("P3", 0, 6_8)
A: int = Process("P4", 0, 2_4)
A: int = 3
A: Any = [1_7, 2_5]
A: Any = deque([Pa, Pa, Pa, Pa])
A: Tuple = MLFQ(number_of_queues, time_slices, queue, 0)
A: List[str] = mlfq.multi_level_feedback_queue()
# print total waiting times of processes(P1, P2, P3, P4)
print(
f"""waiting time:\
\t\t\t{MLFQ.calculate_waiting_time(mlfq, [Pa, Pa, Pa, Pa])}"""
)
# print completion times of processes(P1, P2, P3, P4)
print(
f"""completion time:\
\t\t{MLFQ.calculate_completion_time(mlfq, [Pa, Pa, Pa, Pa])}"""
)
# print total turnaround times of processes(P1, P2, P3, P4)
print(
f"""turnaround time:\
\t\t{MLFQ.calculate_turnaround_time(mlfq, [Pa, Pa, Pa, Pa])}"""
)
# print sequence of finished processes
print(
f"""sequence of finished processes:\
{mlfq.calculate_sequence_of_finish_queue()}"""
)
| 721 |
'''simple docstring'''
import argparse
A: List[Any] = "docs/source/_static/js/custom.js"
def _UpperCAmelCase ( a : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
with open(a , encoding='utf-8' , newline='\n' ) as f:
lowercase_ : List[Any] = f.readlines()
lowercase_ : Dict = 0
# First let's put the right version
while not lines[index].startswith('const stableVersion =' ):
index += 1
lowercase_ : Dict = f"const stableVersion = \"v{version}\"\n"
# Then update the dictionary
while not lines[index].startswith('const versionMapping = {' ):
index += 1
# We go until the end
while not lines[index].startswith('}' ):
index += 1
# We add the new version at the end
lines[index - 1] += f" \"v{version}\": \"v{version}\",\n"
with open(a , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.writelines(a )
if __name__ == "__main__":
A: str = argparse.ArgumentParser()
parser.add_argument("--version", help="Release version.")
A: List[str] = parser.parse_args()
update_custom_js(args.version)
| 7 | 0 |
'''simple docstring'''
import tempfile
import torch
from diffusers import PNDMScheduler
from .test_schedulers import SchedulerCommonTest
class __magic_name__ ( UpperCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = (PNDMScheduler,)
SCREAMING_SNAKE_CASE_ : Tuple = (('num_inference_steps', 5_0),)
def lowerCamelCase__ ( self , **_lowercase ) -> Union[str, Any]:
lowercase_ : Optional[Any] = {
'num_train_timesteps': 1000,
'beta_start': 0.00_01,
'beta_end': 0.02,
'beta_schedule': 'linear',
}
config.update(**_lowercase )
return config
def lowerCamelCase__ ( self , _lowercase=0 , **_lowercase ) -> Dict:
lowercase_ : Optional[int] = dict(self.forward_default_kwargs )
lowercase_ : Any = kwargs.pop('num_inference_steps' , _lowercase )
lowercase_ : Any = self.dummy_sample
lowercase_ : List[Any] = 0.1 * sample
lowercase_ : Tuple = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
lowercase_ : Optional[int] = self.get_scheduler_config(**_lowercase )
lowercase_ : Union[str, Any] = scheduler_class(**_lowercase )
scheduler.set_timesteps(_lowercase )
# copy over dummy past residuals
lowercase_ : str = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_lowercase )
lowercase_ : Optional[Any] = scheduler_class.from_pretrained(_lowercase )
new_scheduler.set_timesteps(_lowercase )
# copy over dummy past residuals
lowercase_ : Dict = dummy_past_residuals[:]
lowercase_ : List[Any] = scheduler.step_prk(_lowercase , _lowercase , _lowercase , **_lowercase ).prev_sample
lowercase_ : Optional[Any] = new_scheduler.step_prk(_lowercase , _lowercase , _lowercase , **_lowercase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
lowercase_ : Any = scheduler.step_plms(_lowercase , _lowercase , _lowercase , **_lowercase ).prev_sample
lowercase_ : Dict = new_scheduler.step_plms(_lowercase , _lowercase , _lowercase , **_lowercase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def lowerCamelCase__ ( self ) -> Optional[int]:
pass
def lowerCamelCase__ ( self , _lowercase=0 , **_lowercase ) -> Tuple:
lowercase_ : Optional[Any] = dict(self.forward_default_kwargs )
lowercase_ : Union[str, Any] = kwargs.pop('num_inference_steps' , _lowercase )
lowercase_ : Dict = self.dummy_sample
lowercase_ : List[str] = 0.1 * sample
lowercase_ : Union[str, Any] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
lowercase_ : int = self.get_scheduler_config()
lowercase_ : List[Any] = scheduler_class(**_lowercase )
scheduler.set_timesteps(_lowercase )
# copy over dummy past residuals (must be after setting timesteps)
lowercase_ : Optional[Any] = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_lowercase )
lowercase_ : Optional[Any] = scheduler_class.from_pretrained(_lowercase )
# copy over dummy past residuals
new_scheduler.set_timesteps(_lowercase )
# copy over dummy past residual (must be after setting timesteps)
lowercase_ : Optional[Any] = dummy_past_residuals[:]
lowercase_ : List[Any] = scheduler.step_prk(_lowercase , _lowercase , _lowercase , **_lowercase ).prev_sample
lowercase_ : Optional[int] = new_scheduler.step_prk(_lowercase , _lowercase , _lowercase , **_lowercase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
lowercase_ : Any = scheduler.step_plms(_lowercase , _lowercase , _lowercase , **_lowercase ).prev_sample
lowercase_ : Any = new_scheduler.step_plms(_lowercase , _lowercase , _lowercase , **_lowercase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def lowerCamelCase__ ( self , **_lowercase ) -> str:
lowercase_ : Optional[Any] = self.scheduler_classes[0]
lowercase_ : List[Any] = self.get_scheduler_config(**_lowercase )
lowercase_ : Any = scheduler_class(**_lowercase )
lowercase_ : Optional[Any] = 10
lowercase_ : int = self.dummy_model()
lowercase_ : str = self.dummy_sample_deter
scheduler.set_timesteps(_lowercase )
for i, t in enumerate(scheduler.prk_timesteps ):
lowercase_ : Optional[int] = model(_lowercase , _lowercase )
lowercase_ : Dict = scheduler.step_prk(_lowercase , _lowercase , _lowercase ).prev_sample
for i, t in enumerate(scheduler.plms_timesteps ):
lowercase_ : Tuple = model(_lowercase , _lowercase )
lowercase_ : Any = scheduler.step_plms(_lowercase , _lowercase , _lowercase ).prev_sample
return sample
def lowerCamelCase__ ( self ) -> str:
lowercase_ : int = dict(self.forward_default_kwargs )
lowercase_ : Optional[Any] = kwargs.pop('num_inference_steps' , _lowercase )
for scheduler_class in self.scheduler_classes:
lowercase_ : Dict = self.get_scheduler_config()
lowercase_ : Any = scheduler_class(**_lowercase )
lowercase_ : int = self.dummy_sample
lowercase_ : Tuple = 0.1 * sample
if num_inference_steps is not None and hasattr(_lowercase , 'set_timesteps' ):
scheduler.set_timesteps(_lowercase )
elif num_inference_steps is not None and not hasattr(_lowercase , 'set_timesteps' ):
lowercase_ : Dict = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
lowercase_ : List[Any] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
lowercase_ : Optional[int] = dummy_past_residuals[:]
lowercase_ : Any = scheduler.step_prk(_lowercase , 0 , _lowercase , **_lowercase ).prev_sample
lowercase_ : Optional[int] = scheduler.step_prk(_lowercase , 1 , _lowercase , **_lowercase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
lowercase_ : Optional[int] = scheduler.step_plms(_lowercase , 0 , _lowercase , **_lowercase ).prev_sample
lowercase_ : Any = scheduler.step_plms(_lowercase , 1 , _lowercase , **_lowercase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def lowerCamelCase__ ( self ) -> Optional[Any]:
for timesteps in [100, 1000]:
self.check_over_configs(num_train_timesteps=_lowercase )
def lowerCamelCase__ ( self ) -> Dict:
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=_lowercase )
lowercase_ : Optional[int] = self.scheduler_classes[0]
lowercase_ : List[Any] = self.get_scheduler_config(steps_offset=1 )
lowercase_ : Optional[Any] = scheduler_class(**_lowercase )
scheduler.set_timesteps(10 )
assert torch.equal(
scheduler.timesteps , torch.LongTensor(
[901, 851, 851, 801, 801, 751, 751, 701, 701, 651, 651, 601, 601, 501, 401, 301, 201, 101, 1] ) , )
def lowerCamelCase__ ( self ) -> Optional[Any]:
for beta_start, beta_end in zip([0.00_01, 0.0_01] , [0.0_02, 0.02] ):
self.check_over_configs(beta_start=_lowercase , beta_end=_lowercase )
def lowerCamelCase__ ( self ) -> int:
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=_lowercase )
def lowerCamelCase__ ( self ) -> Dict:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_lowercase )
def lowerCamelCase__ ( self ) -> Any:
for t in [1, 5, 10]:
self.check_over_forward(time_step=_lowercase )
def lowerCamelCase__ ( self ) -> Dict:
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ):
self.check_over_forward(num_inference_steps=_lowercase )
def lowerCamelCase__ ( self ) -> Optional[int]:
# earlier version of set_timesteps() caused an error indexing alpha's with inference steps as power of 3
lowercase_ : Optional[int] = 27
for scheduler_class in self.scheduler_classes:
lowercase_ : Any = self.dummy_sample
lowercase_ : List[Any] = 0.1 * sample
lowercase_ : Optional[Any] = self.get_scheduler_config()
lowercase_ : Dict = scheduler_class(**_lowercase )
scheduler.set_timesteps(_lowercase )
# before power of 3 fix, would error on first step, so we only need to do two
for i, t in enumerate(scheduler.prk_timesteps[:2] ):
lowercase_ : int = scheduler.step_prk(_lowercase , _lowercase , _lowercase ).prev_sample
def lowerCamelCase__ ( self ) -> Optional[Any]:
with self.assertRaises(_lowercase ):
lowercase_ : Any = self.scheduler_classes[0]
lowercase_ : Optional[int] = self.get_scheduler_config()
lowercase_ : Optional[int] = scheduler_class(**_lowercase )
scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample ).prev_sample
def lowerCamelCase__ ( self ) -> List[str]:
lowercase_ : List[Any] = self.full_loop()
lowercase_ : Tuple = torch.sum(torch.abs(_lowercase ) )
lowercase_ : int = torch.mean(torch.abs(_lowercase ) )
assert abs(result_sum.item() - 198.1318 ) < 1E-2
assert abs(result_mean.item() - 0.25_80 ) < 1E-3
def lowerCamelCase__ ( self ) -> Tuple:
lowercase_ : Dict = self.full_loop(prediction_type='v_prediction' )
lowercase_ : List[str] = torch.sum(torch.abs(_lowercase ) )
lowercase_ : int = torch.mean(torch.abs(_lowercase ) )
assert abs(result_sum.item() - 67.39_86 ) < 1E-2
assert abs(result_mean.item() - 0.08_78 ) < 1E-3
def lowerCamelCase__ ( self ) -> int:
# We specify different beta, so that the first alpha is 0.99
lowercase_ : List[str] = self.full_loop(set_alpha_to_one=_lowercase , beta_start=0.01 )
lowercase_ : Tuple = torch.sum(torch.abs(_lowercase ) )
lowercase_ : List[str] = torch.mean(torch.abs(_lowercase ) )
assert abs(result_sum.item() - 230.0399 ) < 1E-2
assert abs(result_mean.item() - 0.29_95 ) < 1E-3
def lowerCamelCase__ ( self ) -> List[Any]:
# We specify different beta, so that the first alpha is 0.99
lowercase_ : Union[str, Any] = self.full_loop(set_alpha_to_one=_lowercase , beta_start=0.01 )
lowercase_ : Optional[int] = torch.sum(torch.abs(_lowercase ) )
lowercase_ : List[Any] = torch.mean(torch.abs(_lowercase ) )
assert abs(result_sum.item() - 186.9482 ) < 1E-2
assert abs(result_mean.item() - 0.24_34 ) < 1E-3
| 700 |
'''simple docstring'''
def _UpperCAmelCase ( a : list[list[float]] ) -> list[list[float]]:
"""simple docstring"""
lowercase_ : list[list[float]] = []
for data in source_data:
for i, el in enumerate(a ):
if len(a ) < i + 1:
data_lists.append([] )
data_lists[i].append(float(a ) )
return data_lists
def _UpperCAmelCase ( a : list[list[float]] , a : list[int] ) -> list[list[float]]:
"""simple docstring"""
lowercase_ : list[list[float]] = []
for dlist, weight in zip(a , a ):
lowercase_ : Tuple = min(a )
lowercase_ : Any = max(a )
lowercase_ : list[float] = []
# for weight 0 score is 1 - actual score
if weight == 0:
for item in dlist:
try:
score.append(1 - ((item - mind) / (maxd - mind)) )
except ZeroDivisionError:
score.append(1 )
elif weight == 1:
for item in dlist:
try:
score.append((item - mind) / (maxd - mind) )
except ZeroDivisionError:
score.append(0 )
# weight not 0 or 1
else:
lowercase_ : str = f"Invalid weight of {weight:f} provided"
raise ValueError(a )
score_lists.append(a )
return score_lists
def _UpperCAmelCase ( a : list[list[float]] ) -> list[float]:
"""simple docstring"""
lowercase_ : list[float] = [0 for i in range(len(score_lists[0] ) )]
for slist in score_lists:
for j, ele in enumerate(a ):
lowercase_ : List[Any] = final_scores[j] + ele
return final_scores
def _UpperCAmelCase ( a : list[list[float]] , a : list[int] ) -> list[list[float]]:
"""simple docstring"""
lowercase_ : int = get_data(a )
lowercase_ : Optional[int] = calculate_each_score(a , a )
lowercase_ : Dict = generate_final_scores(a )
# append scores to source data
for i, ele in enumerate(a ):
source_data[i].append(a )
return source_data
| 7 | 0 |
'''simple docstring'''
import unittest
import numpy as np
def _UpperCAmelCase ( a : np.ndarray , a : np.ndarray , a : np.ndarray , a : np.ndarray | None = None , ) -> np.ndarray:
"""simple docstring"""
lowercase_ : str = np.shape(a )
lowercase_ : int = np.shape(a )
lowercase_ : Optional[int] = np.shape(a )
if shape_a[0] != shape_b[0]:
lowercase_ : Any = (
'Expected the same number of rows for A and B. '
f"Instead found A of size {shape_a} and B of size {shape_b}"
)
raise ValueError(a )
if shape_b[1] != shape_c[1]:
lowercase_ : Dict = (
'Expected the same number of columns for B and C. '
f"Instead found B of size {shape_b} and C of size {shape_c}"
)
raise ValueError(a )
lowercase_ : Dict = pseudo_inv
if a_inv is None:
try:
lowercase_ : Optional[int] = np.linalg.inv(a )
except np.linalg.LinAlgError:
raise ValueError(
'Input matrix A is not invertible. Cannot compute Schur complement.' )
return mat_c - mat_b.T @ a_inv @ mat_b
class __magic_name__ ( unittest.TestCase ):
"""simple docstring"""
def lowerCamelCase__ ( self ) -> None:
lowercase_ : List[Any] = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
lowercase_ : Union[str, Any] = np.array([[0, 3], [3, 0], [2, 3]] )
lowercase_ : Dict = np.array([[2, 1], [6, 3]] )
lowercase_ : Any = schur_complement(_lowercase , _lowercase , _lowercase )
lowercase_ : Optional[int] = np.block([[a, b], [b.T, c]] )
lowercase_ : str = np.linalg.det(_lowercase )
lowercase_ : Optional[int] = np.linalg.det(_lowercase )
lowercase_ : Tuple = np.linalg.det(_lowercase )
self.assertAlmostEqual(_lowercase , det_a * det_s )
def lowerCamelCase__ ( self ) -> None:
lowercase_ : List[str] = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
lowercase_ : Any = np.array([[0, 3], [3, 0], [2, 3]] )
lowercase_ : Optional[Any] = np.array([[2, 1], [6, 3]] )
with self.assertRaises(_lowercase ):
schur_complement(_lowercase , _lowercase , _lowercase )
def lowerCamelCase__ ( self ) -> None:
lowercase_ : Any = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
lowercase_ : Union[str, Any] = np.array([[0, 3], [3, 0], [2, 3]] )
lowercase_ : Optional[int] = np.array([[2, 1, 3], [6, 3, 5]] )
with self.assertRaises(_lowercase ):
schur_complement(_lowercase , _lowercase , _lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod()
unittest.main()
| 701 |
'''simple docstring'''
def _UpperCAmelCase ( a : int , a : int ) -> int:
"""simple docstring"""
while second != 0:
lowercase_ : Any = first & second
first ^= second
lowercase_ : List[str] = c << 1
return first
if __name__ == "__main__":
import doctest
doctest.testmod()
A: Union[str, Any] = int(input("Enter the first number: ").strip())
A: Union[str, Any] = int(input("Enter the second number: ").strip())
print(f"""{add(first, second) = }""")
| 7 | 0 |
'''simple docstring'''
def _UpperCAmelCase ( a : int , a : int ) -> int:
"""simple docstring"""
while second != 0:
lowercase_ : Any = first & second
first ^= second
lowercase_ : List[str] = c << 1
return first
if __name__ == "__main__":
import doctest
doctest.testmod()
A: Union[str, Any] = int(input("Enter the first number: ").strip())
A: Union[str, Any] = int(input("Enter the second number: ").strip())
print(f"""{add(first, second) = }""")
| 702 |
'''simple docstring'''
class __magic_name__ :
"""simple docstring"""
def __init__( self , _lowercase ) -> Union[str, Any]:
lowercase_ : Dict = n
lowercase_ : Dict = [None] * self.n
lowercase_ : Tuple = 0 # index of the first element
lowercase_ : List[Any] = 0
lowercase_ : List[Any] = 0
def __len__( self ) -> int:
return self.size
def lowerCamelCase__ ( self ) -> bool:
return self.size == 0
def lowerCamelCase__ ( self ) -> List[Any]:
return False if self.is_empty() else self.array[self.front]
def lowerCamelCase__ ( self , _lowercase ) -> Any:
if self.size >= self.n:
raise Exception('QUEUE IS FULL' )
lowercase_ : Tuple = data
lowercase_ : List[Any] = (self.rear + 1) % self.n
self.size += 1
return self
def lowerCamelCase__ ( self ) -> Any:
if self.size == 0:
raise Exception('UNDERFLOW' )
lowercase_ : Dict = self.array[self.front]
lowercase_ : Tuple = None
lowercase_ : int = (self.front + 1) % self.n
self.size -= 1
return temp
| 7 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.