code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
"""simple docstring"""
def lowerCAmelCase_ ( snake_case_ ):
if not isinstance(snake_case_,snake_case_ ):
_A : Any = f'''Input value of [number={number}] must be an integer'''
raise TypeError(snake_case_ )
if number < 0:
return False
_A : List[str] = number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 365 |
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def lowerCAmelCase_ ( snake_case_ ):
_A : Tuple = filter(lambda snake_case_ : p.requires_grad,model.parameters() )
_A : str = sum([np.prod(p.size() ) for p in model_parameters] )
return params
_snake_case = logging.getLogger(__name__)
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
if metric == "rouge2":
_A : Optional[int] = """{val_avg_rouge2:.4f}-{step_count}"""
elif metric == "bleu":
_A : Dict = """{val_avg_bleu:.4f}-{step_count}"""
elif metric == "em":
_A : List[str] = """{val_avg_em:.4f}-{step_count}"""
else:
raise NotImplementedError(
f'''seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this'''
""" function.""" )
_A : Optional[int] = ModelCheckpoint(
dirpath=snake_case_,filename=snake_case_,monitor=f'''val_{metric}''',mode="""max""",save_top_k=3,every_n_epochs=1,)
return checkpoint_callback
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
return EarlyStopping(
monitor=f'''val_{metric}''',mode="""min""" if """loss""" in metric else """max""",patience=snake_case_,verbose=snake_case_,)
class lowercase ( pl.Callback ):
def a__ ( self , _a , _a ) -> Optional[Any]:
_A : List[Any] = {F'''lr_group_{i}''': param["""lr"""] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(_a )
@rank_zero_only
def a__ ( self , _a , _a , _a , _a=True ) -> None:
logger.info(F'''***** {type_path} results at step {trainer.global_step:05d} *****''' )
_A : int = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ["""log""", """progress_bar""", """preds"""]} )
# Log results
_A : Dict = Path(pl_module.hparams.output_dir )
if type_path == "test":
_A : List[Any] = od / """test_results.txt"""
_A : List[Any] = od / """test_generations.txt"""
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
_A : Optional[int] = od / F'''{type_path}_results/{trainer.global_step:05d}.txt'''
_A : int = od / F'''{type_path}_generations/{trainer.global_step:05d}.txt'''
results_file.parent.mkdir(exist_ok=_a )
generations_file.parent.mkdir(exist_ok=_a )
with open(_a , """a+""" ) as writer:
for key in sorted(_a ):
if key in ["log", "progress_bar", "preds"]:
continue
_A : List[Any] = metrics[key]
if isinstance(_a , torch.Tensor ):
_A : str = val.item()
_A : str = F'''{key}: {val:.6f}\n'''
writer.write(_a )
if not save_generations:
return
if "preds" in metrics:
_A : List[Any] = """\n""".join(metrics["""preds"""] )
generations_file.open("""w+""" ).write(_a )
@rank_zero_only
def a__ ( self , _a , _a ) -> str:
try:
_A : int = pl_module.model.model.num_parameters()
except AttributeError:
_A : str = pl_module.model.num_parameters()
_A : Optional[int] = count_trainable_parameters(_a )
# mp stands for million parameters
trainer.logger.log_metrics({"""n_params""": npars, """mp""": npars / 1e6, """grad_mp""": n_trainable_pars / 1e6} )
@rank_zero_only
def a__ ( self , _a , _a ) -> Optional[int]:
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(_a , _a , """test""" )
@rank_zero_only
def a__ ( self , _a , _a ) -> Tuple:
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 343 | 0 |
import os
import unittest
from transformers.models.phobert.tokenization_phobert import VOCAB_FILES_NAMES, PhobertTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class lowercase ( UpperCamelCase__,unittest.TestCase ):
_a = PhobertTokenizer
_a = False
def a__ ( self ) -> str:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_A : Optional[Any] = ["""T@@""", """i""", """I""", """R@@""", """r""", """e@@"""]
_A : Optional[int] = dict(zip(_a , range(len(_a ) ) ) )
_A : str = ["""#version: 0.2""", """l à</w>"""]
_A : Optional[int] = {"""unk_token""": """<unk>"""}
_A : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
_A : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
for token in vocab_tokens:
fp.write(F'''{token} {vocab_tokens[token]}\n''' )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(_a ) )
def a__ ( self , **_a ) -> str:
kwargs.update(self.special_tokens_map )
return PhobertTokenizer.from_pretrained(self.tmpdirname , **_a )
def a__ ( self , _a ) -> Optional[Any]:
_A : Optional[Any] = """Tôi là VinAI Research"""
_A : Union[str, Any] = """T<unk> i <unk> <unk> <unk> <unk> <unk> <unk> I Re<unk> e<unk> <unk> <unk> <unk>"""
return input_text, output_text
def a__ ( self ) -> Optional[int]:
_A : Any = PhobertTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
_A : Union[str, Any] = """Tôi là VinAI Research"""
_A : Any = """T@@ ô@@ i l@@ à V@@ i@@ n@@ A@@ I R@@ e@@ s@@ e@@ a@@ r@@ c@@ h""".split()
_A : Union[str, Any] = tokenizer.tokenize(_a )
print(_a )
self.assertListEqual(_a , _a )
_A : List[Any] = tokens + [tokenizer.unk_token]
_A : Union[str, Any] = [4, 3, 5, 3, 3, 3, 3, 3, 3, 6, 7, 9, 3, 9, 3, 3, 3, 3, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_a ) , _a )
| 366 |
from __future__ import annotations
from collections.abc import Callable
_snake_case = list[list[float | int]]
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_A : int = len(snake_case_ )
_A : Matrix = [[0 for _ in range(size + 1 )] for _ in range(snake_case_ )]
_A : int
_A : int
_A : int
_A : int
_A : int
_A : float
for row in range(snake_case_ ):
for col in range(snake_case_ ):
_A : Dict = matrix[row][col]
_A : List[Any] = vector[row][0]
_A : List[Any] = 0
_A : Optional[Any] = 0
while row < size and col < size:
# pivoting
_A : Any = max((abs(augmented[rowa][col] ), rowa) for rowa in range(snake_case_,snake_case_ ) )[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
_A , _A : Optional[Any] = augmented[pivot_row], augmented[row]
for rowa in range(row + 1,snake_case_ ):
_A : str = augmented[rowa][col] / augmented[row][col]
_A : List[Any] = 0
for cola in range(col + 1,size + 1 ):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1,snake_case_ ):
for row in range(snake_case_ ):
_A : int = augmented[row][col] / augmented[col][col]
for cola in range(snake_case_,size + 1 ):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row],10 )] for row in range(snake_case_ )
]
def lowerCAmelCase_ ( snake_case_ ):
_A : int = len(snake_case_ )
_A : Matrix = [[0 for _ in range(snake_case_ )] for _ in range(snake_case_ )]
_A : Matrix = [[0] for _ in range(snake_case_ )]
_A : Matrix
_A : int
_A : int
_A : int
for x_val, y_val in enumerate(snake_case_ ):
for col in range(snake_case_ ):
_A : str = (x_val + 1) ** (size - col - 1)
_A : List[str] = y_val
_A : Any = solve(snake_case_,snake_case_ )
def interpolated_func(snake_case_ ) -> int:
return sum(
round(coeffs[x_val][0] ) * (var ** (size - x_val - 1))
for x_val in range(snake_case_ ) )
return interpolated_func
def lowerCAmelCase_ ( snake_case_ ):
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**10
)
def lowerCAmelCase_ ( snake_case_ = question_function,snake_case_ = 10 ):
_A : list[int] = [func(snake_case_ ) for x_val in range(1,order + 1 )]
_A : list[Callable[[int], int]] = [
interpolate(data_points[:max_coeff] ) for max_coeff in range(1,order + 1 )
]
_A : int = 0
_A : Callable[[int], int]
_A : int
for poly in polynomials:
_A : Optional[int] = 1
while func(snake_case_ ) == poly(snake_case_ ):
x_val += 1
ret += poly(snake_case_ )
return ret
if __name__ == "__main__":
print(f"""{solution() = }""")
| 343 | 0 |
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all feature extractors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...feature_extraction_utils import FeatureExtractionMixin
from ...utils import CONFIG_NAME, FEATURE_EXTRACTOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
_snake_case = logging.get_logger(__name__)
_snake_case = OrderedDict(
[
("audio-spectrogram-transformer", "ASTFeatureExtractor"),
("beit", "BeitFeatureExtractor"),
("chinese_clip", "ChineseCLIPFeatureExtractor"),
("clap", "ClapFeatureExtractor"),
("clip", "CLIPFeatureExtractor"),
("clipseg", "ViTFeatureExtractor"),
("conditional_detr", "ConditionalDetrFeatureExtractor"),
("convnext", "ConvNextFeatureExtractor"),
("cvt", "ConvNextFeatureExtractor"),
("data2vec-audio", "Wav2Vec2FeatureExtractor"),
("data2vec-vision", "BeitFeatureExtractor"),
("deformable_detr", "DeformableDetrFeatureExtractor"),
("deit", "DeiTFeatureExtractor"),
("detr", "DetrFeatureExtractor"),
("dinat", "ViTFeatureExtractor"),
("donut-swin", "DonutFeatureExtractor"),
("dpt", "DPTFeatureExtractor"),
("encodec", "EncodecFeatureExtractor"),
("flava", "FlavaFeatureExtractor"),
("glpn", "GLPNFeatureExtractor"),
("groupvit", "CLIPFeatureExtractor"),
("hubert", "Wav2Vec2FeatureExtractor"),
("imagegpt", "ImageGPTFeatureExtractor"),
("layoutlmv2", "LayoutLMv2FeatureExtractor"),
("layoutlmv3", "LayoutLMv3FeatureExtractor"),
("levit", "LevitFeatureExtractor"),
("maskformer", "MaskFormerFeatureExtractor"),
("mctct", "MCTCTFeatureExtractor"),
("mobilenet_v1", "MobileNetV1FeatureExtractor"),
("mobilenet_v2", "MobileNetV2FeatureExtractor"),
("mobilevit", "MobileViTFeatureExtractor"),
("nat", "ViTFeatureExtractor"),
("owlvit", "OwlViTFeatureExtractor"),
("perceiver", "PerceiverFeatureExtractor"),
("poolformer", "PoolFormerFeatureExtractor"),
("regnet", "ConvNextFeatureExtractor"),
("resnet", "ConvNextFeatureExtractor"),
("segformer", "SegformerFeatureExtractor"),
("sew", "Wav2Vec2FeatureExtractor"),
("sew-d", "Wav2Vec2FeatureExtractor"),
("speech_to_text", "Speech2TextFeatureExtractor"),
("speecht5", "SpeechT5FeatureExtractor"),
("swiftformer", "ViTFeatureExtractor"),
("swin", "ViTFeatureExtractor"),
("swinv2", "ViTFeatureExtractor"),
("table-transformer", "DetrFeatureExtractor"),
("timesformer", "VideoMAEFeatureExtractor"),
("tvlt", "TvltFeatureExtractor"),
("unispeech", "Wav2Vec2FeatureExtractor"),
("unispeech-sat", "Wav2Vec2FeatureExtractor"),
("van", "ConvNextFeatureExtractor"),
("videomae", "VideoMAEFeatureExtractor"),
("vilt", "ViltFeatureExtractor"),
("vit", "ViTFeatureExtractor"),
("vit_mae", "ViTFeatureExtractor"),
("vit_msn", "ViTFeatureExtractor"),
("wav2vec2", "Wav2Vec2FeatureExtractor"),
("wav2vec2-conformer", "Wav2Vec2FeatureExtractor"),
("wavlm", "Wav2Vec2FeatureExtractor"),
("whisper", "WhisperFeatureExtractor"),
("xclip", "CLIPFeatureExtractor"),
("yolos", "YolosFeatureExtractor"),
]
)
_snake_case = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FEATURE_EXTRACTOR_MAPPING_NAMES)
def lowerCAmelCase_ ( snake_case_ ):
for module_name, extractors in FEATURE_EXTRACTOR_MAPPING_NAMES.items():
if class_name in extractors:
_A : int = model_type_to_module_name(snake_case_ )
_A : Tuple = importlib.import_module(f'''.{module_name}''',"""transformers.models""" )
try:
return getattr(snake_case_,snake_case_ )
except AttributeError:
continue
for _, extractor in FEATURE_EXTRACTOR_MAPPING._extra_content.items():
if getattr(snake_case_,"""__name__""",snake_case_ ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
_A : Dict = importlib.import_module("""transformers""" )
if hasattr(snake_case_,snake_case_ ):
return getattr(snake_case_,snake_case_ )
return None
def lowerCAmelCase_ ( snake_case_,snake_case_ = None,snake_case_ = False,snake_case_ = False,snake_case_ = None,snake_case_ = None,snake_case_ = None,snake_case_ = False,**snake_case_,):
_A : Optional[int] = get_file_from_repo(
snake_case_,snake_case_,cache_dir=snake_case_,force_download=snake_case_,resume_download=snake_case_,proxies=snake_case_,use_auth_token=snake_case_,revision=snake_case_,local_files_only=snake_case_,)
if resolved_config_file is None:
logger.info(
"""Could not locate the feature extractor configuration file, will try to use the model config instead.""" )
return {}
with open(snake_case_,encoding="""utf-8""" ) as reader:
return json.load(snake_case_ )
class lowercase :
def __init__( self ) -> Any:
raise EnvironmentError(
"""AutoFeatureExtractor is designed to be instantiated """
"""using the `AutoFeatureExtractor.from_pretrained(pretrained_model_name_or_path)` method.""" )
@classmethod
@replace_list_option_in_docstrings(_a )
def a__ ( cls , _a , **_a ) -> str:
_A : Any = kwargs.pop("""config""" , _a )
_A : Tuple = kwargs.pop("""trust_remote_code""" , _a )
_A : Dict = True
_A : List[str] = FeatureExtractionMixin.get_feature_extractor_dict(_a , **_a )
_A : str = config_dict.get("""feature_extractor_type""" , _a )
_A : Optional[Any] = None
if "AutoFeatureExtractor" in config_dict.get("""auto_map""" , {} ):
_A : Union[str, Any] = config_dict["""auto_map"""]["""AutoFeatureExtractor"""]
# If we don't find the feature extractor class in the feature extractor config, let's try the model config.
if feature_extractor_class is None and feature_extractor_auto_map is None:
if not isinstance(_a , _a ):
_A : str = AutoConfig.from_pretrained(_a , **_a )
# It could be in `config.feature_extractor_type``
_A : Dict = getattr(_a , """feature_extractor_type""" , _a )
if hasattr(_a , """auto_map""" ) and "AutoFeatureExtractor" in config.auto_map:
_A : Optional[Any] = config.auto_map["""AutoFeatureExtractor"""]
if feature_extractor_class is not None:
_A : int = feature_extractor_class_from_name(_a )
_A : List[str] = feature_extractor_auto_map is not None
_A : Dict = feature_extractor_class is not None or type(_a ) in FEATURE_EXTRACTOR_MAPPING
_A : Optional[int] = resolve_trust_remote_code(
_a , _a , _a , _a )
if has_remote_code and trust_remote_code:
_A : str = get_class_from_dynamic_module(
_a , _a , **_a )
_A : Dict = kwargs.pop("""code_revision""" , _a )
if os.path.isdir(_a ):
feature_extractor_class.register_for_auto_class()
return feature_extractor_class.from_dict(_a , **_a )
elif feature_extractor_class is not None:
return feature_extractor_class.from_dict(_a , **_a )
# Last try: we use the FEATURE_EXTRACTOR_MAPPING.
elif type(_a ) in FEATURE_EXTRACTOR_MAPPING:
_A : Optional[int] = FEATURE_EXTRACTOR_MAPPING[type(_a )]
return feature_extractor_class.from_dict(_a , **_a )
raise ValueError(
F'''Unrecognized feature extractor in {pretrained_model_name_or_path}. Should have a '''
F'''`feature_extractor_type` key in its {FEATURE_EXTRACTOR_NAME} of {CONFIG_NAME}, or one of the following '''
F'''`model_type` keys in its {CONFIG_NAME}: {", ".join(c for c in FEATURE_EXTRACTOR_MAPPING_NAMES.keys() )}''' )
@staticmethod
def a__ ( _a , _a ) -> Any:
FEATURE_EXTRACTOR_MAPPING.register(_a , _a )
| 367 |
from __future__ import annotations
from collections.abc import Generator
import requests
from bsa import BeautifulSoup
_snake_case = "https://www.indeed.co.in/jobs?q=mobile+app+development&l="
def lowerCAmelCase_ ( snake_case_ = "mumbai" ):
_A : Optional[Any] = BeautifulSoup(requests.get(url + location ).content,"""html.parser""" )
# This attribute finds out all the specifics listed in a job
for job in soup.find_all("""div""",attrs={"""data-tn-component""": """organicJob"""} ):
_A : Tuple = job.find("""a""",attrs={"""data-tn-element""": """jobTitle"""} ).text.strip()
_A : Optional[int] = job.find("""span""",{"""class""": """company"""} ).text.strip()
yield job_title, company_name
if __name__ == "__main__":
for i, job in enumerate(fetch_jobs("Bangalore"), 1):
print(f"""Job {i:>2} is {job[0]} at {job[1]}""")
| 343 | 0 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = "▁"
_snake_case = {"vocab_file": "sentencepiece.bpe.model"}
_snake_case = {
"vocab_file": {
"xlm-roberta-base": "https://huggingface.co/xlm-roberta-base/resolve/main/sentencepiece.bpe.model",
"xlm-roberta-large": "https://huggingface.co/xlm-roberta-large/resolve/main/sentencepiece.bpe.model",
"xlm-roberta-large-finetuned-conll02-dutch": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/sentencepiece.bpe.model"
),
"xlm-roberta-large-finetuned-conll02-spanish": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/sentencepiece.bpe.model"
),
"xlm-roberta-large-finetuned-conll03-english": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/sentencepiece.bpe.model"
),
"xlm-roberta-large-finetuned-conll03-german": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/sentencepiece.bpe.model"
),
}
}
_snake_case = {
"xlm-roberta-base": 512,
"xlm-roberta-large": 512,
"xlm-roberta-large-finetuned-conll02-dutch": 512,
"xlm-roberta-large-finetuned-conll02-spanish": 512,
"xlm-roberta-large-finetuned-conll03-english": 512,
"xlm-roberta-large-finetuned-conll03-german": 512,
}
class lowercase ( UpperCamelCase__ ):
_a = VOCAB_FILES_NAMES
_a = PRETRAINED_VOCAB_FILES_MAP
_a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a = ["input_ids", "attention_mask"]
def __init__( self , _a , _a="<s>" , _a="</s>" , _a="</s>" , _a="<s>" , _a="<unk>" , _a="<pad>" , _a="<mask>" , _a = None , **_a , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
_A : Dict = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else mask_token
_A : Any = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_a , eos_token=_a , unk_token=_a , sep_token=_a , cls_token=_a , pad_token=_a , mask_token=_a , sp_model_kwargs=self.sp_model_kwargs , **_a , )
_A : str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_a ) )
_A : Optional[Any] = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
_A : Tuple = {"""<s>""": 0, """<pad>""": 1, """</s>""": 2, """<unk>""": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
_A : Any = 1
_A : List[Any] = len(self.sp_model ) + self.fairseq_offset
_A : Tuple = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ) -> Any:
_A : str = self.__dict__.copy()
_A : int = None
_A : Optional[int] = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , _a ) -> int:
_A : int = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
_A : List[str] = {}
_A : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def a__ ( self , _a , _a = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_A : Optional[Any] = [self.cls_token_id]
_A : List[Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def a__ ( self , _a , _a = None , _a = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_a , token_ids_a=_a , already_has_special_tokens=_a )
if token_ids_a is None:
return [1] + ([0] * len(_a )) + [1]
return [1] + ([0] * len(_a )) + [1, 1] + ([0] * len(_a )) + [1]
def a__ ( self , _a , _a = None ) -> List[int]:
_A : Optional[Any] = [self.sep_token_id]
_A : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def a__ ( self ) -> Dict:
return len(self.sp_model ) + self.fairseq_offset + 1 # Add the <mask> token
def a__ ( self ) -> str:
_A : Dict = {self.convert_ids_to_tokens(_a ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def a__ ( self , _a ) -> List[str]:
return self.sp_model.encode(_a , out_type=_a )
def a__ ( self , _a ) -> Any:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
_A : int = self.sp_model.PieceToId(_a )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def a__ ( self , _a ) -> str:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def a__ ( self , _a ) -> Dict:
_A : int = """""".join(_a ).replace(_a , """ """ ).strip()
return out_string
def a__ ( self , _a , _a = None ) -> Tuple[str]:
if not os.path.isdir(_a ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
_A : str = os.path.join(
_a , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_a ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _a )
elif not os.path.isfile(self.vocab_file ):
with open(_a , """wb""" ) as fi:
_A : Tuple = self.sp_model.serialized_model_proto()
fi.write(_a )
return (out_vocab_file,)
| 368 |
from __future__ import annotations
from decimal import Decimal
from numpy import array
def lowerCAmelCase_ ( snake_case_ ):
_A : Tuple = Decimal
# Check if the provided matrix has 2 rows and 2 columns
# since this implementation only works for 2x2 matrices
if len(snake_case_ ) == 2 and len(matrix[0] ) == 2 and len(matrix[1] ) == 2:
# Calculate the determinant of the matrix
_A : List[Any] = float(
d(matrix[0][0] ) * d(matrix[1][1] ) - d(matrix[1][0] ) * d(matrix[0][1] ) )
if determinant == 0:
raise ValueError("""This matrix has no inverse.""" )
# Creates a copy of the matrix with swapped positions of the elements
_A : Tuple = [[0.0, 0.0], [0.0, 0.0]]
_A , _A : List[str] = matrix[1][1], matrix[0][0]
_A , _A : List[str] = -matrix[1][0], -matrix[0][1]
# Calculate the inverse of the matrix
return [
[(float(d(snake_case_ ) ) / determinant) or 0.0 for n in row] for row in swapped_matrix
]
elif (
len(snake_case_ ) == 3
and len(matrix[0] ) == 3
and len(matrix[1] ) == 3
and len(matrix[2] ) == 3
):
# Calculate the determinant of the matrix using Sarrus rule
_A : List[str] = float(
(
(d(matrix[0][0] ) * d(matrix[1][1] ) * d(matrix[2][2] ))
+ (d(matrix[0][1] ) * d(matrix[1][2] ) * d(matrix[2][0] ))
+ (d(matrix[0][2] ) * d(matrix[1][0] ) * d(matrix[2][1] ))
)
- (
(d(matrix[0][2] ) * d(matrix[1][1] ) * d(matrix[2][0] ))
+ (d(matrix[0][1] ) * d(matrix[1][0] ) * d(matrix[2][2] ))
+ (d(matrix[0][0] ) * d(matrix[1][2] ) * d(matrix[2][1] ))
) )
if determinant == 0:
raise ValueError("""This matrix has no inverse.""" )
# Creating cofactor matrix
_A : List[Any] = [
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
]
_A : Union[str, Any] = (d(matrix[1][1] ) * d(matrix[2][2] )) - (
d(matrix[1][2] ) * d(matrix[2][1] )
)
_A : Optional[Any] = -(
(d(matrix[1][0] ) * d(matrix[2][2] )) - (d(matrix[1][2] ) * d(matrix[2][0] ))
)
_A : Any = (d(matrix[1][0] ) * d(matrix[2][1] )) - (
d(matrix[1][1] ) * d(matrix[2][0] )
)
_A : List[Any] = -(
(d(matrix[0][1] ) * d(matrix[2][2] )) - (d(matrix[0][2] ) * d(matrix[2][1] ))
)
_A : int = (d(matrix[0][0] ) * d(matrix[2][2] )) - (
d(matrix[0][2] ) * d(matrix[2][0] )
)
_A : Union[str, Any] = -(
(d(matrix[0][0] ) * d(matrix[2][1] )) - (d(matrix[0][1] ) * d(matrix[2][0] ))
)
_A : Any = (d(matrix[0][1] ) * d(matrix[1][2] )) - (
d(matrix[0][2] ) * d(matrix[1][1] )
)
_A : List[str] = -(
(d(matrix[0][0] ) * d(matrix[1][2] )) - (d(matrix[0][2] ) * d(matrix[1][0] ))
)
_A : Optional[int] = (d(matrix[0][0] ) * d(matrix[1][1] )) - (
d(matrix[0][1] ) * d(matrix[1][0] )
)
# Transpose the cofactor matrix (Adjoint matrix)
_A : List[Any] = array(snake_case_ )
for i in range(3 ):
for j in range(3 ):
_A : List[str] = cofactor_matrix[j][i]
# Inverse of the matrix using the formula (1/determinant) * adjoint matrix
_A : Union[str, Any] = array(snake_case_ )
for i in range(3 ):
for j in range(3 ):
inverse_matrix[i][j] /= d(snake_case_ )
# Calculate the inverse of the matrix
return [[float(d(snake_case_ ) ) or 0.0 for n in row] for row in inverse_matrix]
raise ValueError("""Please provide a matrix of size 2x2 or 3x3.""" )
| 343 | 0 |
"""simple docstring"""
import pandas as pd
from matplotlib import pyplot as plt
from sklearn.linear_model import LinearRegression
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
# Fitting Polynomial Regression to the dataset
from sklearn.preprocessing import PolynomialFeatures
# Importing the dataset
_snake_case = pd.read_csv(
"https://s3.us-west-2.amazonaws.com/public.gamelab.fun/dataset/"
"position_salaries.csv"
)
_snake_case = dataset.iloc[:, 1:2].values
_snake_case = dataset.iloc[:, 2].values
_snake_case , _snake_case , _snake_case , _snake_case = train_test_split(X, y, test_size=0.2, random_state=0)
_snake_case = PolynomialFeatures(degree=4)
_snake_case = poly_reg.fit_transform(X)
_snake_case = LinearRegression()
pol_reg.fit(X_poly, y)
def lowerCAmelCase_ ( ):
plt.scatter(snake_case_,snake_case_,color="""red""" )
plt.plot(snake_case_,pol_reg.predict(poly_reg.fit_transform(snake_case_ ) ),color="""blue""" )
plt.title("""Truth or Bluff (Linear Regression)""" )
plt.xlabel("""Position level""" )
plt.ylabel("""Salary""" )
plt.show()
if __name__ == "__main__":
viz_polymonial()
# Predicting a new result with Polymonial Regression
pol_reg.predict(poly_reg.fit_transform([[5.5]]))
# output should be 132148.43750003
| 369 |
from dataclasses import dataclass
from typing import Dict, Optional, Union
import torch
import torch.nn.functional as F
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .attention_processor import AttentionProcessor, AttnProcessor
from .embeddings import TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
@dataclass
class lowercase ( UpperCamelCase__ ):
_a = 42
class lowercase ( UpperCamelCase__,UpperCamelCase__ ):
@register_to_config
def __init__( self , _a = 32 , _a = 64 , _a = 20 , _a = 768 , _a=77 , _a=4 , _a = 0.0 , _a = "silu" , _a = None , _a = None , _a = "linear" , _a = "prd" , _a = None , _a = None , _a = None , ) -> Any:
super().__init__()
_A : int = num_attention_heads
_A : Union[str, Any] = attention_head_dim
_A : Tuple = num_attention_heads * attention_head_dim
_A : Any = additional_embeddings
_A : Any = time_embed_dim or inner_dim
_A : List[str] = embedding_proj_dim or embedding_dim
_A : Optional[int] = clip_embed_dim or embedding_dim
_A : Union[str, Any] = Timesteps(_a , _a , 0 )
_A : str = TimestepEmbedding(_a , _a , out_dim=_a , act_fn=_a )
_A : Dict = nn.Linear(_a , _a )
if embedding_proj_norm_type is None:
_A : int = None
elif embedding_proj_norm_type == "layer":
_A : Optional[Any] = nn.LayerNorm(_a )
else:
raise ValueError(F'''unsupported embedding_proj_norm_type: {embedding_proj_norm_type}''' )
_A : Optional[Any] = nn.Linear(_a , _a )
if encoder_hid_proj_type is None:
_A : Union[str, Any] = None
elif encoder_hid_proj_type == "linear":
_A : Tuple = nn.Linear(_a , _a )
else:
raise ValueError(F'''unsupported encoder_hid_proj_type: {encoder_hid_proj_type}''' )
_A : List[str] = nn.Parameter(torch.zeros(1 , num_embeddings + additional_embeddings , _a ) )
if added_emb_type == "prd":
_A : str = nn.Parameter(torch.zeros(1 , 1 , _a ) )
elif added_emb_type is None:
_A : Union[str, Any] = None
else:
raise ValueError(
F'''`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `\'prd\'` or `None`.''' )
_A : int = nn.ModuleList(
[
BasicTransformerBlock(
_a , _a , _a , dropout=_a , activation_fn="""gelu""" , attention_bias=_a , )
for d in range(_a )
] )
if norm_in_type == "layer":
_A : Union[str, Any] = nn.LayerNorm(_a )
elif norm_in_type is None:
_A : Tuple = None
else:
raise ValueError(F'''Unsupported norm_in_type: {norm_in_type}.''' )
_A : int = nn.LayerNorm(_a )
_A : str = nn.Linear(_a , _a )
_A : Any = torch.full(
[num_embeddings + additional_embeddings, num_embeddings + additional_embeddings] , -10000.0 )
causal_attention_mask.triu_(1 )
_A : Optional[int] = causal_attention_mask[None, ...]
self.register_buffer("""causal_attention_mask""" , _a , persistent=_a )
_A : Tuple = nn.Parameter(torch.zeros(1 , _a ) )
_A : Dict = nn.Parameter(torch.zeros(1 , _a ) )
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def a__ ( self ) -> Dict[str, AttentionProcessor]:
_A : List[str] = {}
def fn_recursive_add_processors(_a , _a , _a ):
if hasattr(_a , """set_processor""" ):
_A : Tuple = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(F'''{name}.{sub_name}''' , _a , _a )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(_a , _a , _a )
return processors
def a__ ( self , _a ) -> List[str]:
_A : Optional[int] = len(self.attn_processors.keys() )
if isinstance(_a , _a ) and len(_a ) != count:
raise ValueError(
F'''A dict of processors was passed, but the number of processors {len(_a )} does not match the'''
F''' number of attention layers: {count}. Please make sure to pass {count} processor classes.''' )
def fn_recursive_attn_processor(_a , _a , _a ):
if hasattr(_a , """set_processor""" ):
if not isinstance(_a , _a ):
module.set_processor(_a )
else:
module.set_processor(processor.pop(F'''{name}.processor''' ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(F'''{name}.{sub_name}''' , _a , _a )
for name, module in self.named_children():
fn_recursive_attn_processor(_a , _a , _a )
def a__ ( self ) -> Union[str, Any]:
self.set_attn_processor(AttnProcessor() )
def a__ ( self , _a , _a , _a , _a = None , _a = None , _a = True , ) -> Optional[Any]:
_A : Tuple = hidden_states.shape[0]
_A : List[Any] = timestep
if not torch.is_tensor(_a ):
_A : Dict = torch.tensor([timesteps] , dtype=torch.long , device=hidden_states.device )
elif torch.is_tensor(_a ) and len(timesteps.shape ) == 0:
_A : Tuple = timesteps[None].to(hidden_states.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
_A : Optional[int] = timesteps * torch.ones(_a , dtype=timesteps.dtype , device=timesteps.device )
_A : Dict = self.time_proj(_a )
# timesteps does not contain any weights and will always return f32 tensors
# but time_embedding might be fp16, so we need to cast here.
_A : Tuple = timesteps_projected.to(dtype=self.dtype )
_A : List[Any] = self.time_embedding(_a )
if self.embedding_proj_norm is not None:
_A : Dict = self.embedding_proj_norm(_a )
_A : List[Any] = self.embedding_proj(_a )
if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None:
_A : List[Any] = self.encoder_hidden_states_proj(_a )
elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None:
raise ValueError("""`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set""" )
_A : Optional[int] = self.proj_in(_a )
_A : Optional[int] = self.positional_embedding.to(hidden_states.dtype )
_A : Union[str, Any] = []
_A : List[str] = 0
if encoder_hidden_states is not None:
additional_embeds.append(_a )
additional_embeddings_len += encoder_hidden_states.shape[1]
if len(proj_embeddings.shape ) == 2:
_A : List[str] = proj_embeddings[:, None, :]
if len(hidden_states.shape ) == 2:
_A : List[str] = hidden_states[:, None, :]
_A : Dict = additional_embeds + [
proj_embeddings,
time_embeddings[:, None, :],
hidden_states,
]
if self.prd_embedding is not None:
_A : Optional[int] = self.prd_embedding.to(hidden_states.dtype ).expand(_a , -1 , -1 )
additional_embeds.append(_a )
_A : str = torch.cat(
_a , dim=1 , )
# Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens
_A : Dict = additional_embeddings_len + proj_embeddings.shape[1] + 1
if positional_embeddings.shape[1] < hidden_states.shape[1]:
_A : Union[str, Any] = F.pad(
_a , (
0,
0,
additional_embeddings_len,
self.prd_embedding.shape[1] if self.prd_embedding is not None else 0,
) , value=0.0 , )
_A : Optional[Any] = hidden_states + positional_embeddings
if attention_mask is not None:
_A : Optional[Any] = (1 - attention_mask.to(hidden_states.dtype )) * -10000.0
_A : List[Any] = F.pad(_a , (0, self.additional_embeddings) , value=0.0 )
_A : Optional[Any] = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype )
_A : int = attention_mask.repeat_interleave(self.config.num_attention_heads , dim=0 )
if self.norm_in is not None:
_A : str = self.norm_in(_a )
for block in self.transformer_blocks:
_A : List[Any] = block(_a , attention_mask=_a )
_A : Any = self.norm_out(_a )
if self.prd_embedding is not None:
_A : int = hidden_states[:, -1]
else:
_A : Any = hidden_states[:, additional_embeddings_len:]
_A : Union[str, Any] = self.proj_to_clip_embeddings(_a )
if not return_dict:
return (predicted_image_embedding,)
return PriorTransformerOutput(predicted_image_embedding=_a )
def a__ ( self , _a ) -> Tuple:
_A : List[Any] = (prior_latents * self.clip_std) + self.clip_mean
return prior_latents
| 343 | 0 |
import math
import os
import re
import sys
import unittest
from pathlib import Path
from typing import Tuple
from unittest.mock import patch
from parameterized import parameterized
from transformers.testing_utils import (
CaptureStderr,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
get_torch_dist_unique_port,
require_apex,
require_bitsandbytes,
require_fairscale,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
require_torch_non_multi_gpu,
slow,
)
from transformers.trainer_callback import TrainerState
from transformers.trainer_utils import set_seed
_snake_case = os.path.abspath(os.path.dirname(__file__))
with ExtendSysPath(f"""{bindir}/../../examples/pytorch/translation"""):
from run_translation import main # noqa
set_seed(42)
_snake_case = "sshleifer/student_marian_en_ro_6_1"
_snake_case = "sshleifer/tiny-mbart"
@require_torch
class lowercase ( UpperCamelCase__ ):
def a__ ( self , _a=False , _a=None , _a=True , _a=True , _a=True , _a=True , ) -> Tuple:
_A : str = self.run_trainer(
eval_steps=1 , max_len=12 , model_name=_a , num_train_epochs=1 , distributed=_a , extra_args_str=_a , predict_with_generate=_a , do_train=_a , do_eval=_a , do_predict=_a , )
_A : Any = TrainerState.load_from_json(os.path.join(_a , """trainer_state.json""" ) ).log_history
if not do_eval:
return
_A : Dict = [log for log in logs if """eval_loss""" in log.keys()]
_A : int = eval_metrics[0]
if predict_with_generate:
assert "eval_bleu" in first_step_stats
_A : str = eval_metrics[-1]
assert isinstance(last_step_stats["""eval_bleu"""] , _a )
assert not math.isnan(float(last_step_stats["""eval_loss"""] ) ), "eval_loss must not be `nan`"
@require_torch_non_multi_gpu
def a__ ( self ) -> Optional[int]:
self.run_seqaseq_quick()
@require_torch_multi_gpu
def a__ ( self ) -> Union[str, Any]:
self.run_seqaseq_quick(distributed=_a )
@require_torch_multi_gpu
def a__ ( self ) -> int:
self.run_seqaseq_quick(distributed=_a )
@unittest.skip("""Requires an update of the env running those tests""" )
@require_torch_multi_gpu
@require_fairscale
def a__ ( self ) -> List[Any]:
self.run_seqaseq_quick(distributed=_a , extra_args_str="""--sharded_ddp simple""" )
@unittest.skip("""Requires an update of the env running those tests""" )
@require_torch_multi_gpu
@require_fairscale
def a__ ( self ) -> Optional[Any]:
self.run_seqaseq_quick(distributed=_a , extra_args_str="""--sharded_ddp simple --fp16""" )
@unittest.skip("""Requires an update of the env running those tests""" )
@require_torch_multi_gpu
@require_fairscale
def a__ ( self ) -> Optional[int]:
self.run_seqaseq_quick(distributed=_a , extra_args_str="""--sharded_ddp zero_dp_2""" , predict_with_generate=_a )
@unittest.skip("""Requires an update of the env running those tests""" )
@require_torch_multi_gpu
@require_fairscale
def a__ ( self ) -> str:
self.run_seqaseq_quick(
distributed=_a , extra_args_str="""--sharded_ddp zero_dp_2 --fp16""" , predict_with_generate=_a )
@require_apex
@require_torch_gpu
def a__ ( self ) -> Union[str, Any]:
# XXX: apex breaks the trainer if it's run twice e.g. run_seq2seq.main() from the same
# program and it breaks other tests that run from the same pytest worker, therefore until this is
# sorted out it must be run only in an external program, that is distributed=True in this
# test and only under one or more gpus - if we want cpu will need to make a special test
#
# specifically to the problem traced it to self.optimizer.step() - if it's run 2nd time via
# 2nd main() call it botches the future eval.
#
self.run_seqaseq_quick(distributed=_a , extra_args_str="""--fp16 --fp16_backend=apex""" )
# test 2nd time - was getting eval_loss': nan'
# to reproduce the problem set distributed=False
self.run_seqaseq_quick(distributed=_a , extra_args_str="""--fp16 --fp16_backend=apex""" )
@parameterized.expand(["""base""", """low""", """high""", """mixed"""] )
@require_torch_multi_gpu
def a__ ( self , _a ) -> Union[str, Any]:
# as each sub-test is slow-ish split into multiple sub-tests to avoid CI timeout
_A : str = {
# test with the default log_level - should be info and thus log info once
"""base""": {"""extra_args_str""": """""", """n_matches""": 1},
# test with low log_level and log_level_replica - should be noisy on all processes
# now the info string should appear twice on 2 processes
"""low""": {"""extra_args_str""": """--log_level debug --log_level_replica debug""", """n_matches""": 2},
# test with high log_level and low log_level_replica
# now the info string should appear once only on the replica
"""high""": {"""extra_args_str""": """--log_level error --log_level_replica debug""", """n_matches""": 1},
# test with high log_level and log_level_replica - should be quiet on all processes
"""mixed""": {"""extra_args_str""": """--log_level error --log_level_replica error""", """n_matches""": 0},
}
_A : Dict = experiments[experiment_id]
_A : str = {"""distributed""": True, """predict_with_generate""": False, """do_eval""": False, """do_predict""": False}
_A : List[str] = """Running training"""
with CaptureStderr() as cl:
self.run_seqaseq_quick(**_a , extra_args_str=data["""extra_args_str"""] )
_A : List[Any] = len(re.findall(_a , cl.err ) )
self.assertEqual(_a , data["""n_matches"""] )
@slow
def a__ ( self ) -> Union[str, Any]:
_A : List[Any] = self.run_trainer(
eval_steps=2 , max_len=128 , model_name=_a , learning_rate=3e-4 , num_train_epochs=10 , distributed=_a , )
# Check metrics
_A : str = TrainerState.load_from_json(os.path.join(_a , """trainer_state.json""" ) ).log_history
_A : Union[str, Any] = [log for log in logs if """eval_loss""" in log.keys()]
_A : int = eval_metrics[0]
_A : Union[str, Any] = eval_metrics[-1]
assert first_step_stats["eval_loss"] > last_step_stats["eval_loss"], "model learned nothing"
assert isinstance(last_step_stats["""eval_bleu"""] , _a )
# test if do_predict saves generations and metrics
_A : Tuple = os.listdir(_a )
_A : Union[str, Any] = {os.path.basename(_a ) for p in contents}
assert "generated_predictions.txt" in contents
assert "predict_results.json" in contents
@slow
@require_bitsandbytes
def a__ ( self ) -> Dict:
from transformers.training_args import OptimizerNames
def train_and_return_metrics(_a ) -> Tuple[int, float]:
_A : Union[str, Any] = """--skip_memory_metrics 0"""
_A : Tuple = self.run_trainer(
max_len=128 , model_name=_a , learning_rate=3e-4 , num_train_epochs=1 , optim=_a , distributed=_a , extra_args_str=_a , do_eval=_a , do_predict=_a , n_gpus_to_use=1 , )
# Check metrics
_A : Union[str, Any] = TrainerState.load_from_json(Path(_a , """trainer_state.json""" ) ).log_history
_A : str = int(logs[0]["""train_mem_gpu_peaked_delta"""] / 2**20 )
_A : str = int(logs[0]["""train_mem_gpu_alloc_delta"""] / 2**20 )
_A : Dict = logs[0]["""train_loss"""]
return gpu_peak_mem_mb, gpu_alloc_mem_mb, loss
_A : Optional[int] = train_and_return_metrics(OptimizerNames.ADAMW_TORCH.value )
_A : Union[str, Any] = train_and_return_metrics(OptimizerNames.ADAMW_BNB.value )
_A : Union[str, Any] = gpu_alloc_mem_orig - gpu_alloc_mem_bnb
_A : List[str] = gpu_peak_mem_orig + gpu_alloc_mem_orig
_A : Optional[Any] = gpu_peak_mem_bnb + gpu_alloc_mem_bnb
_A : Optional[Any] = gpu_total_mem_orig - gpu_total_mem_bnb
# sshleifer/student_marian_en_ro_6_1 has 54M parameter, 29M of which is `nn.Embedding` which
# doesn't get quantized and remains in fp32. Therefore we only have 25M parameters quantized
# in 2 bytes and the diff in optim memory usage is derived as so:
#
# - normal 25*8=~200MB (8 bytes per param)
# - bnb 25*2= ~50MB (2 bytes per param)
#
# Thus we should expect ~150MB total memory saved.
#
# Peak memory should be the same - the total should be different by about that same margin
#
# After leaving a small margin to accommodate for differences between gpus let's check
# that we have at least 120MB in savings
_A : Optional[int] = 120
# uncomment the following if this test starts failing - requires py38 for a new print feature
# gpu_peak_mem_diff = gpu_peak_mem_orig - gpu_peak_mem_bnb
# print(f"{gpu_alloc_mem_orig=}MB {gpu_peak_mem_orig=}MB {gpu_alloc_mem_orig+gpu_peak_mem_orig=}MB")
# print(f" {gpu_alloc_mem_bnb=}MB {gpu_peak_mem_bnb=}MB {gpu_alloc_mem_bnb+gpu_peak_mem_bnb=}MB")
# print(f"{gpu_alloc_mem_diff=}MB")
# print(f"{gpu_peak_mem_diff=}MB")
# print(f"{gpu_total_mem_orig=}MB, {gpu_total_mem_bnb=}MB")
# print(f"{gpu_total_mem_diff=}MB, {gpu_total_mem_diff=}MB")
self.assertGreater(
_a , _a , """should use ~150MB less alloc gpu memory with BNB, compared to without it for this model but got"""
F''' a difference of {gpu_alloc_mem_diff}MB, with gpu_alloc_mem_orig={gpu_alloc_mem_orig}MB and'''
F''' gpu_alloc_mem_bnb={gpu_alloc_mem_bnb}MB''' , )
self.assertGreater(
_a , _a , """should use ~150MB less total gpu memory with BNB, compared to without it for this model but got"""
F''' a difference of {gpu_total_mem_diff}MB, with gpu_total_mem_orig={gpu_total_mem_orig}MB and'''
F''' gpu_total_mem_bnb={gpu_total_mem_bnb}MB''' , )
self.assertEqual(
_a , _a , F'''loss should be the same, but got loss_orig={loss_orig}, loss_bnb={loss_bnb}''' )
def a__ ( self , _a , _a , _a , _a = 3e-3 , _a = "adafactor" , _a = False , _a = None , _a = 0 , _a = True , _a = True , _a = True , _a = True , _a = None , ) -> int:
_A : Dict = self.test_file_dir / """../fixtures/tests_samples/wmt_en_ro"""
_A : int = self.get_auto_remove_tmp_dir()
_A : str = F'''
--model_name_or_path {model_name}
--train_file {data_dir}/train.json
--validation_file {data_dir}/val.json
--test_file {data_dir}/test.json
--output_dir {output_dir}
--overwrite_output_dir
--max_train_samples 8
--max_source_length {max_len}
--max_target_length {max_len}
--do_train
--num_train_epochs {str(_a )}
--per_device_train_batch_size 4
--learning_rate {learning_rate}
--warmup_steps 8
--logging_steps 0
--logging_strategy no
--save_steps {str(_a )}
--group_by_length
--label_smoothing_factor 0.1
--target_lang ro_RO
--source_lang en_XX
'''.split()
_A : List[Any] = F'''
--do_eval
--per_device_eval_batch_size 4
--max_eval_samples 8
--val_max_target_length {max_len}
--evaluation_strategy steps
--eval_steps {str(_a )}
'''.split()
_A : List[str] = """
--do_predict
""".split()
_A : List[str] = []
if do_train:
args += args_train
if do_eval:
args += args_eval
if do_predict:
args += args_predict
if predict_with_generate:
args += "--predict_with_generate".split()
if do_train:
if optim == "adafactor":
args += "--adafactor".split()
else:
args += F'''--optim {optim}'''.split()
if extra_args_str is not None:
args += extra_args_str.split()
if distributed:
if n_gpus_to_use is None:
_A : Union[str, Any] = get_gpu_count()
_A : List[str] = get_torch_dist_unique_port()
_A : Optional[Any] = F'''
-m torch.distributed.run
--nproc_per_node={n_gpus_to_use}
--master_port={master_port}
{self.examples_dir_str}/pytorch/translation/run_translation.py
'''.split()
_A : Optional[Any] = [sys.executable] + distributed_args + args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(_a , env=self.get_env() )
else:
_A : str = ["""run_translation.py"""] + args
with patch.object(_a , """argv""" , _a ):
main()
return output_dir
| 370 |
import argparse
import json
import math
import os
import time
import traceback
import zipfile
from collections import Counter
import requests
def lowerCAmelCase_ ( snake_case_,snake_case_=None ):
_A : Any = None
if token is not None:
_A : int = {"""Accept""": """application/vnd.github+json""", """Authorization""": f'''Bearer {token}'''}
_A : Any = f'''https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100'''
_A : Union[str, Any] = requests.get(snake_case_,headers=snake_case_ ).json()
_A : str = {}
try:
job_links.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
_A : int = math.ceil((result["""total_count"""] - 100) / 100 )
for i in range(snake_case_ ):
_A : List[str] = requests.get(url + f'''&page={i + 2}''',headers=snake_case_ ).json()
job_links.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
return job_links
except Exception:
print(f'''Unknown error, could not fetch links:\n{traceback.format_exc()}''' )
return {}
def lowerCAmelCase_ ( snake_case_,snake_case_=None ):
_A : int = None
if token is not None:
_A : List[str] = {"""Accept""": """application/vnd.github+json""", """Authorization""": f'''Bearer {token}'''}
_A : str = f'''https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100'''
_A : Optional[Any] = requests.get(snake_case_,headers=snake_case_ ).json()
_A : Any = {}
try:
artifacts.update({artifact["""name"""]: artifact["""archive_download_url"""] for artifact in result["""artifacts"""]} )
_A : Tuple = math.ceil((result["""total_count"""] - 100) / 100 )
for i in range(snake_case_ ):
_A : List[Any] = requests.get(url + f'''&page={i + 2}''',headers=snake_case_ ).json()
artifacts.update({artifact["""name"""]: artifact["""archive_download_url"""] for artifact in result["""artifacts"""]} )
return artifacts
except Exception:
print(f'''Unknown error, could not fetch links:\n{traceback.format_exc()}''' )
return {}
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_,snake_case_ ):
_A : Dict = None
if token is not None:
_A : int = {"""Accept""": """application/vnd.github+json""", """Authorization""": f'''Bearer {token}'''}
_A : Tuple = requests.get(snake_case_,headers=snake_case_,allow_redirects=snake_case_ )
_A : Tuple = result.headers["""Location"""]
_A : Union[str, Any] = requests.get(snake_case_,allow_redirects=snake_case_ )
_A : Dict = os.path.join(snake_case_,f'''{artifact_name}.zip''' )
with open(snake_case_,"""wb""" ) as fp:
fp.write(response.content )
def lowerCAmelCase_ ( snake_case_,snake_case_=None ):
_A : List[str] = []
_A : int = []
_A : Tuple = None
with zipfile.ZipFile(snake_case_ ) as z:
for filename in z.namelist():
if not os.path.isdir(snake_case_ ):
# read the file
if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]:
with z.open(snake_case_ ) as f:
for line in f:
_A : Any = line.decode("""UTF-8""" ).strip()
if filename == "failures_line.txt":
try:
# `error_line` is the place where `error` occurs
_A : Dict = line[: line.index(""": """ )]
_A : Dict = line[line.index(""": """ ) + len(""": """ ) :]
errors.append([error_line, error] )
except Exception:
# skip un-related lines
pass
elif filename == "summary_short.txt" and line.startswith("""FAILED """ ):
# `test` is the test method that failed
_A : List[str] = line[len("""FAILED """ ) :]
failed_tests.append(snake_case_ )
elif filename == "job_name.txt":
_A : Optional[int] = line
if len(snake_case_ ) != len(snake_case_ ):
raise ValueError(
f'''`errors` and `failed_tests` should have the same number of elements. Got {len(snake_case_ )} for `errors` '''
f'''and {len(snake_case_ )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some'''
""" problem.""" )
_A : Any = None
if job_name and job_links:
_A : Dict = job_links.get(snake_case_,snake_case_ )
# A list with elements of the form (line of error, error, failed test)
_A : Optional[int] = [x + [y] + [job_link] for x, y in zip(snake_case_,snake_case_ )]
return result
def lowerCAmelCase_ ( snake_case_,snake_case_=None ):
_A : Dict = []
_A : Optional[int] = [os.path.join(snake_case_,snake_case_ ) for p in os.listdir(snake_case_ ) if p.endswith(""".zip""" )]
for p in paths:
errors.extend(get_errors_from_single_artifact(snake_case_,job_links=snake_case_ ) )
return errors
def lowerCAmelCase_ ( snake_case_,snake_case_=None ):
_A : Dict = Counter()
counter.update([x[1] for x in logs] )
_A : Tuple = counter.most_common()
_A : Tuple = {}
for error, count in counts:
if error_filter is None or error not in error_filter:
_A : str = {"""count""": count, """failed_tests""": [(x[2], x[0]) for x in logs if x[1] == error]}
_A : Union[str, Any] = dict(sorted(r.items(),key=lambda snake_case_ : item[1]["count"],reverse=snake_case_ ) )
return r
def lowerCAmelCase_ ( snake_case_ ):
_A : Union[str, Any] = test.split("""::""" )[0]
if test.startswith("""tests/models/""" ):
_A : Dict = test.split("""/""" )[2]
else:
_A : str = None
return test
def lowerCAmelCase_ ( snake_case_,snake_case_=None ):
_A : str = [(x[0], x[1], get_model(x[2] )) for x in logs]
_A : Union[str, Any] = [x for x in logs if x[2] is not None]
_A : Optional[Any] = {x[2] for x in logs}
_A : List[Any] = {}
for test in tests:
_A : Any = Counter()
# count by errors in `test`
counter.update([x[1] for x in logs if x[2] == test] )
_A : Union[str, Any] = counter.most_common()
_A : Any = {error: count for error, count in counts if (error_filter is None or error not in error_filter)}
_A : str = sum(error_counts.values() )
if n_errors > 0:
_A : Optional[int] = {"""count""": n_errors, """errors""": error_counts}
_A : Union[str, Any] = dict(sorted(r.items(),key=lambda snake_case_ : item[1]["count"],reverse=snake_case_ ) )
return r
def lowerCAmelCase_ ( snake_case_ ):
_A : Optional[int] = """| no. | error | status |"""
_A : List[Any] = """|-:|:-|:-|"""
_A : List[Any] = [header, sep]
for error in reduced_by_error:
_A : List[str] = reduced_by_error[error]["""count"""]
_A : List[Any] = f'''| {count} | {error[:100]} | |'''
lines.append(snake_case_ )
return "\n".join(snake_case_ )
def lowerCAmelCase_ ( snake_case_ ):
_A : List[Any] = """| model | no. of errors | major error | count |"""
_A : Optional[Any] = """|-:|-:|-:|-:|"""
_A : Union[str, Any] = [header, sep]
for model in reduced_by_model:
_A : Dict = reduced_by_model[model]["""count"""]
_A , _A : str = list(reduced_by_model[model]["""errors"""].items() )[0]
_A : Union[str, Any] = f'''| {model} | {count} | {error[:60]} | {_count} |'''
lines.append(snake_case_ )
return "\n".join(snake_case_ )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--workflow_run_id", type=str, required=True, help="A GitHub Actions workflow run id.")
parser.add_argument(
"--output_dir",
type=str,
required=True,
help="Where to store the downloaded artifacts and other result files.",
)
parser.add_argument("--token", default=None, type=str, help="A token that has actions:read permission.")
_snake_case = parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
_snake_case = get_job_links(args.workflow_run_id, token=args.token)
_snake_case = {}
# To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee.
# For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`.
if _job_links:
for k, v in _job_links.items():
# This is how GitHub actions combine job names.
if " / " in k:
_snake_case = k.find(" / ")
_snake_case = k[index + len(" / ") :]
_snake_case = v
with open(os.path.join(args.output_dir, "job_links.json"), "w", encoding="UTF-8") as fp:
json.dump(job_links, fp, ensure_ascii=False, indent=4)
_snake_case = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, "artifacts.json"), "w", encoding="UTF-8") as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
for idx, (name, url) in enumerate(artifacts.items()):
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
_snake_case = get_all_errors(args.output_dir, job_links=job_links)
# `e[1]` is the error
_snake_case = Counter()
counter.update([e[1] for e in errors])
# print the top 30 most common test errors
_snake_case = counter.most_common(30)
for item in most_common:
print(item)
with open(os.path.join(args.output_dir, "errors.json"), "w", encoding="UTF-8") as fp:
json.dump(errors, fp, ensure_ascii=False, indent=4)
_snake_case = reduce_by_error(errors)
_snake_case = reduce_by_model(errors)
_snake_case = make_github_table(reduced_by_error)
_snake_case = make_github_table_per_model(reduced_by_model)
with open(os.path.join(args.output_dir, "reduced_by_error.txt"), "w", encoding="UTF-8") as fp:
fp.write(sa)
with open(os.path.join(args.output_dir, "reduced_by_model.txt"), "w", encoding="UTF-8") as fp:
fp.write(sa)
| 343 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_snake_case = {
"configuration_git": ["GIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "GitConfig", "GitVisionConfig"],
"processing_git": ["GitProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"GIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"GitForCausalLM",
"GitModel",
"GitPreTrainedModel",
"GitVisionModel",
]
if TYPE_CHECKING:
from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig
from .processing_git import GitProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_git import (
GIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GitForCausalLM,
GitModel,
GitPreTrainedModel,
GitVisionModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 371 |
import unittest
from accelerate import debug_launcher
from accelerate.test_utils import require_cpu, test_ops, test_script
@require_cpu
class lowercase ( unittest.TestCase ):
def a__ ( self ) -> List[str]:
debug_launcher(test_script.main )
def a__ ( self ) -> Any:
debug_launcher(test_ops.main )
| 343 | 0 |
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
_snake_case = logging.get_logger(__name__)
_snake_case = {"vocab_file": "vocab.json", "merges_file": "merges.txt"}
# See all LED models at https://huggingface.co/models?filter=LED
_snake_case = {
"vocab_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json",
},
"merges_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt",
},
"tokenizer_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json",
},
}
_snake_case = {
"allenai/led-base-16384": 16384,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def lowerCAmelCase_ ( ):
_A : Dict = (
list(range(ord("""!""" ),ord("""~""" ) + 1 ) ) + list(range(ord("""¡""" ),ord("""¬""" ) + 1 ) ) + list(range(ord("""®""" ),ord("""ÿ""" ) + 1 ) )
)
_A : Optional[int] = bs[:]
_A : Any = 0
for b in range(2**8 ):
if b not in bs:
bs.append(snake_case_ )
cs.append(2**8 + n )
n += 1
_A : int = [chr(snake_case_ ) for n in cs]
return dict(zip(snake_case_,snake_case_ ) )
def lowerCAmelCase_ ( snake_case_ ):
_A : Any = set()
_A : int = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_A : int = char
return pairs
class lowercase ( UpperCamelCase__ ):
lowerCAmelCase = VOCAB_FILES_NAMES
lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase = ["input_ids", "attention_mask"]
def __init__( self , _a , _a , _a="replace" , _a="<s>" , _a="</s>" , _a="</s>" , _a="<s>" , _a="<unk>" , _a="<pad>" , _a="<mask>" , _a=False , **_a , ) -> Any:
_A : Optional[Any] = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else bos_token
_A : List[Any] = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else eos_token
_A : Dict = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else sep_token
_A : int = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else cls_token
_A : Union[str, Any] = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else unk_token
_A : Any = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
_A : int = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else mask_token
super().__init__(
errors=_a , bos_token=_a , eos_token=_a , unk_token=_a , sep_token=_a , cls_token=_a , pad_token=_a , mask_token=_a , add_prefix_space=_a , **_a , )
with open(_a , encoding="""utf-8""" ) as vocab_handle:
_A : Optional[int] = json.load(_a )
_A : Optional[int] = {v: k for k, v in self.encoder.items()}
_A : Tuple = errors # how to handle errors in decoding
_A : Optional[Any] = bytes_to_unicode()
_A : Dict = {v: k for k, v in self.byte_encoder.items()}
with open(_a , encoding="""utf-8""" ) as merges_handle:
_A : List[str] = merges_handle.read().split("""\n""" )[1:-1]
_A : Dict = [tuple(merge.split() ) for merge in bpe_merges]
_A : Optional[Any] = dict(zip(_a , range(len(_a ) ) ) )
_A : Tuple = {}
_A : Tuple = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
_A : List[str] = re.compile(R"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""" )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def a__ ( self ) -> Optional[int]:
return len(self.encoder )
def a__ ( self ) -> Dict:
return dict(self.encoder , **self.added_tokens_encoder )
def a__ ( self , _a ) -> str:
if token in self.cache:
return self.cache[token]
_A : Optional[Any] = tuple(_a )
_A : Dict = get_pairs(_a )
if not pairs:
return token
while True:
_A : Tuple = min(_a , key=lambda _a : self.bpe_ranks.get(_a , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
_A : List[Any] = bigram
_A : Tuple = []
_A : Tuple = 0
while i < len(_a ):
try:
_A : Optional[int] = word.index(_a , _a )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
_A : List[Any] = j
if word[i] == first and i < len(_a ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
_A : Any = tuple(_a )
_A : Optional[Any] = new_word
if len(_a ) == 1:
break
else:
_A : Optional[int] = get_pairs(_a )
_A : Dict = """ """.join(_a )
_A : str = word
return word
def a__ ( self , _a ) -> Union[str, Any]:
_A : str = []
for token in re.findall(self.pat , _a ):
_A : List[str] = """""".join(
self.byte_encoder[b] for b in token.encode("""utf-8""" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(_a ).split(""" """ ) )
return bpe_tokens
def a__ ( self , _a ) -> List[str]:
return self.encoder.get(_a , self.encoder.get(self.unk_token ) )
def a__ ( self , _a ) -> Optional[Any]:
return self.decoder.get(_a )
def a__ ( self , _a ) -> Optional[int]:
_A : Tuple = """""".join(_a )
_A : Union[str, Any] = bytearray([self.byte_decoder[c] for c in text] ).decode("""utf-8""" , errors=self.errors )
return text
def a__ ( self , _a , _a = None ) -> Tuple[str]:
if not os.path.isdir(_a ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
_A : str = os.path.join(
_a , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
_A : Any = os.path.join(
_a , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(_a , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=_a , ensure_ascii=_a ) + """\n""" )
_A : Dict = 0
with open(_a , """w""" , encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda _a : kv[1] ):
if index != token_index:
logger.warning(
F'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
""" Please check that the tokenizer is not corrupted!""" )
_A : Any = token_index
writer.write(""" """.join(_a ) + """\n""" )
index += 1
return vocab_file, merge_file
def a__ ( self , _a , _a = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_A : Optional[Any] = [self.cls_token_id]
_A : Dict = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def a__ ( self , _a , _a = None , _a = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_a , token_ids_a=_a , already_has_special_tokens=_a )
if token_ids_a is None:
return [1] + ([0] * len(_a )) + [1]
return [1] + ([0] * len(_a )) + [1, 1] + ([0] * len(_a )) + [1]
def a__ ( self , _a , _a = None ) -> List[int]:
_A : Dict = [self.sep_token_id]
_A : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def a__ ( self , _a , _a=False , **_a ) -> Tuple:
_A : Any = kwargs.pop("""add_prefix_space""" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(_a ) > 0 and not text[0].isspace()):
_A : str = """ """ + text
return (text, kwargs)
def a__ ( self , _a , _a = None , _a = PaddingStrategy.DO_NOT_PAD , _a = None , _a = None , ) -> dict:
_A : int = super()._pad(
encoded_inputs=_a , max_length=_a , padding_strategy=_a , pad_to_multiple_of=_a , return_attention_mask=_a , )
# Load from model defaults
if return_attention_mask is None:
_A : Any = """attention_mask""" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
_A : int = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
_A : Dict = len(encoded_inputs["""global_attention_mask"""] ) != len(_a )
if needs_to_be_padded:
_A : Optional[int] = len(_a ) - len(encoded_inputs["""global_attention_mask"""] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
_A : Dict = (
encoded_inputs["""global_attention_mask"""] + [-1] * difference
)
elif self.padding_side == "left":
_A : Union[str, Any] = [-1] * difference + encoded_inputs[
"""global_attention_mask"""
]
else:
raise ValueError("""Invalid padding strategy:""" + str(self.padding_side ) )
return encoded_inputs
| 350 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_snake_case = logging.get_logger(__name__)
_snake_case = {
"microsoft/resnet-50": "https://huggingface.co/microsoft/resnet-50/blob/main/config.json",
}
class lowercase ( UpperCamelCase__,UpperCamelCase__ ):
_a = "resnet"
_a = ["basic", "bottleneck"]
def __init__( self , _a=3 , _a=64 , _a=[256, 512, 1024, 2048] , _a=[3, 4, 6, 3] , _a="bottleneck" , _a="relu" , _a=False , _a=None , _a=None , **_a , ) -> int:
super().__init__(**_a )
if layer_type not in self.layer_types:
raise ValueError(F'''layer_type={layer_type} is not one of {",".join(self.layer_types )}''' )
_A : Optional[Any] = num_channels
_A : List[Any] = embedding_size
_A : int = hidden_sizes
_A : Union[str, Any] = depths
_A : Optional[int] = layer_type
_A : Any = hidden_act
_A : List[Any] = downsample_in_first_stage
_A : int = ["""stem"""] + [F'''stage{idx}''' for idx in range(1 , len(_a ) + 1 )]
_A , _A : str = get_aligned_output_features_output_indices(
out_features=_a , out_indices=_a , stage_names=self.stage_names )
class lowercase ( UpperCamelCase__ ):
_a = version.parse("1.11" )
@property
def a__ ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def a__ ( self ) -> float:
return 1e-3
| 343 | 0 |
from __future__ import annotations
def lowerCAmelCase_ ( snake_case_,snake_case_ = None,snake_case_ = None ):
if start is None:
_A : List[str] = 0
if end is None:
_A : List[str] = len(snake_case_ ) - 1
if start >= end:
return
_A : Optional[Any] = (start + end) // 2
slowsort(snake_case_,snake_case_,snake_case_ )
slowsort(snake_case_,mid + 1,snake_case_ )
if sequence[end] < sequence[mid]:
_A : str = sequence[mid], sequence[end]
slowsort(snake_case_,snake_case_,end - 1 )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 351 |
import argparse
import json
import numpy
import torch
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
# Load checkpoint
_A : Optional[int] = torch.load(snake_case_,map_location="""cpu""" )
_A : Any = chkpt["""model"""]
# We have the base model one level deeper than the original XLM repository
_A : Any = {}
for k, v in state_dict.items():
if "pred_layer" in k:
_A : Tuple = v
else:
_A : Dict = v
_A : Optional[Any] = chkpt["""params"""]
_A : Union[str, Any] = {n: v for n, v in config.items() if not isinstance(snake_case_,(torch.FloatTensor, numpy.ndarray) )}
_A : str = chkpt["""dico_word2id"""]
_A : Optional[Any] = {s + """</w>""" if s.find("""@@""" ) == -1 and i > 13 else s.replace("""@@""","""""" ): i for s, i in vocab.items()}
# Save pytorch-model
_A : Dict = pytorch_dump_folder_path + """/""" + WEIGHTS_NAME
_A : Any = pytorch_dump_folder_path + """/""" + CONFIG_NAME
_A : Optional[int] = pytorch_dump_folder_path + """/""" + VOCAB_FILES_NAMES["""vocab_file"""]
print(f'''Save PyTorch model to {pytorch_weights_dump_path}''' )
torch.save(snake_case_,snake_case_ )
print(f'''Save configuration file to {pytorch_config_dump_path}''' )
with open(snake_case_,"""w""",encoding="""utf-8""" ) as f:
f.write(json.dumps(snake_case_,indent=2 ) + """\n""" )
print(f'''Save vocab file to {pytorch_config_dump_path}''' )
with open(snake_case_,"""w""",encoding="""utf-8""" ) as f:
f.write(json.dumps(snake_case_,indent=2 ) + """\n""" )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--xlm_checkpoint_path", default=None, type=str, required=True, help="Path the official PyTorch dump."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
_snake_case = parser.parse_args()
convert_xlm_checkpoint_to_pytorch(args.xlm_checkpoint_path, args.pytorch_dump_folder_path)
| 343 | 0 |
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
_snake_case = logging.get_logger(__name__)
class lowercase ( UpperCamelCase__ ):
_a = ["pixel_values"]
def __init__( self , _a = True , _a = 1 / 255 , _a = True , _a = 8 , **_a , ):
super().__init__(**_a )
_A : Tuple = do_rescale
_A : Optional[int] = rescale_factor
_A : Tuple = do_pad
_A : Tuple = pad_size
def a__ ( self , _a , _a , _a = None , **_a ):
return rescale(_a , scale=_a , data_format=_a , **_a )
def a__ ( self , _a , _a , _a = None ):
_A : Dict = get_image_size(_a )
_A : List[str] = (old_height // size + 1) * size - old_height
_A : Dict = (old_width // size + 1) * size - old_width
return pad(_a , ((0, pad_height), (0, pad_width)) , mode="""symmetric""" , data_format=_a )
def a__ ( self , _a , _a = None , _a = None , _a = None , _a = None , _a = None , _a = ChannelDimension.FIRST , **_a , ):
_A : List[str] = do_rescale if do_rescale is not None else self.do_rescale
_A : List[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
_A : Optional[int] = do_pad if do_pad is not None else self.do_pad
_A : Any = pad_size if pad_size is not None else self.pad_size
_A : List[str] = make_list_of_images(_a )
if not valid_images(_a ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
# All transformations expect numpy arrays.
_A : str = [to_numpy_array(_a ) for image in images]
if do_rescale:
_A : Any = [self.rescale(image=_a , scale=_a ) for image in images]
if do_pad:
_A : Any = [self.pad(_a , size=_a ) for image in images]
_A : Union[str, Any] = [to_channel_dimension_format(_a , _a ) for image in images]
_A : Optional[Any] = {"""pixel_values""": images}
return BatchFeature(data=_a , tensor_type=_a )
| 352 |
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowercase ( UpperCamelCase__ ):
_a = ["image_processor", "tokenizer"]
_a = "BlipImageProcessor"
_a = ("BertTokenizer", "BertTokenizerFast")
def __init__( self , _a , _a ) -> Any:
_A : List[Any] = False
super().__init__(_a , _a )
_A : Optional[int] = self.image_processor
def __call__( self , _a = None , _a = None , _a = True , _a = False , _a = None , _a = None , _a = 0 , _a = None , _a = None , _a = False , _a = False , _a = False , _a = False , _a = False , _a = True , _a = None , **_a , ) -> BatchEncoding:
if images is None and text is None:
raise ValueError("""You have to specify either images or text.""" )
# Get only text
if images is None:
_A : Dict = self.tokenizer
_A : Dict = self.tokenizer(
text=_a , add_special_tokens=_a , padding=_a , truncation=_a , max_length=_a , stride=_a , pad_to_multiple_of=_a , return_attention_mask=_a , return_overflowing_tokens=_a , return_special_tokens_mask=_a , return_offsets_mapping=_a , return_token_type_ids=_a , return_length=_a , verbose=_a , return_tensors=_a , **_a , )
return text_encoding
# add pixel_values
_A : int = self.image_processor(_a , return_tensors=_a )
if text is not None:
_A : List[Any] = self.tokenizer(
text=_a , add_special_tokens=_a , padding=_a , truncation=_a , max_length=_a , stride=_a , pad_to_multiple_of=_a , return_attention_mask=_a , return_overflowing_tokens=_a , return_special_tokens_mask=_a , return_offsets_mapping=_a , return_token_type_ids=_a , return_length=_a , verbose=_a , return_tensors=_a , **_a , )
else:
_A : int = None
if text_encoding is not None:
encoding_image_processor.update(_a )
return encoding_image_processor
def a__ ( self , *_a , **_a ) -> Any:
return self.tokenizer.batch_decode(*_a , **_a )
def a__ ( self , *_a , **_a ) -> List[str]:
return self.tokenizer.decode(*_a , **_a )
@property
def a__ ( self ) -> Optional[Any]:
_A : Any = self.tokenizer.model_input_names
_A : List[Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 343 | 0 |
import logging
import os
import sys
from dataclasses import dataclass, field
from itertools import chain
from typing import Optional, Union
import datasets
import numpy as np
import torch
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.31.0")
_snake_case = logging.getLogger(__name__)
@dataclass
class lowercase :
_a = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
_a = field(
default=UpperCamelCase__,metadata={"help": "Pretrained config name or path if not the same as model_name"} )
_a = field(
default=UpperCamelCase__,metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
_a = field(
default=UpperCamelCase__,metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"},)
_a = field(
default=UpperCamelCase__,metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."},)
_a = field(
default="main",metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."},)
_a = field(
default=UpperCamelCase__,metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
},)
@dataclass
class lowercase :
_a = field(default=UpperCamelCase__,metadata={"help": "The input training data file (a text file)."} )
_a = field(
default=UpperCamelCase__,metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."},)
_a = field(
default=UpperCamelCase__,metadata={"help": "Overwrite the cached training and evaluation sets"} )
_a = field(
default=UpperCamelCase__,metadata={"help": "The number of processes to use for the preprocessing."},)
_a = field(
default=UpperCamelCase__,metadata={
"help": (
"The maximum total input sequence length after tokenization. If passed, sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
},)
_a = field(
default=UpperCamelCase__,metadata={
"help": (
"Whether to pad all samples to the maximum sentence length. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch. More "
"efficient on GPU but very bad for TPU."
)
},)
_a = field(
default=UpperCamelCase__,metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
},)
_a = field(
default=UpperCamelCase__,metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
},)
def a__ ( self ) -> Dict:
if self.train_file is not None:
_A : List[str] = self.train_file.split(""".""" )[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
_A : List[str] = self.validation_file.split(""".""" )[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
@dataclass
class lowercase :
_a = 4_2
_a = True
_a = None
_a = None
def __call__( self , _a ) -> Optional[Any]:
_A : Tuple = """label""" if """label""" in features[0].keys() else """labels"""
_A : Any = [feature.pop(_a ) for feature in features]
_A : List[str] = len(_a )
_A : Optional[int] = len(features[0]["""input_ids"""] )
_A : Tuple = [
[{k: v[i] for k, v in feature.items()} for i in range(_a )] for feature in features
]
_A : str = list(chain(*_a ) )
_A : Optional[Any] = self.tokenizer.pad(
_a , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="""pt""" , )
# Un-flatten
_A : str = {k: v.view(_a , _a , -1 ) for k, v in batch.items()}
# Add back labels
_A : Union[str, Any] = torch.tensor(_a , dtype=torch.intaa )
return batch
def lowerCAmelCase_ ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_A : str = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_A : List[str] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_A : Tuple = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("""run_swag""",snake_case_,snake_case_ )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""",datefmt="""%m/%d/%Y %H:%M:%S""",handlers=[logging.StreamHandler(sys.stdout )],)
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
_A : Tuple = training_args.get_process_log_level()
logger.setLevel(snake_case_ )
datasets.utils.logging.set_verbosity(snake_case_ )
transformers.utils.logging.set_verbosity(snake_case_ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ f'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(f'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
_A : List[str] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_A : Any = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.train_file is not None or data_args.validation_file is not None:
_A : Tuple = {}
if data_args.train_file is not None:
_A : Optional[int] = data_args.train_file
if data_args.validation_file is not None:
_A : Any = data_args.validation_file
_A : Dict = data_args.train_file.split(""".""" )[-1]
_A : Tuple = load_dataset(
snake_case_,data_files=snake_case_,cache_dir=model_args.cache_dir,use_auth_token=True if model_args.use_auth_token else None,)
else:
# Downloading and loading the swag dataset from the hub.
_A : Optional[Any] = load_dataset(
"""swag""","""regular""",cache_dir=model_args.cache_dir,use_auth_token=True if model_args.use_auth_token else None,)
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_A : int = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path,cache_dir=model_args.cache_dir,revision=model_args.model_revision,use_auth_token=True if model_args.use_auth_token else None,)
_A : str = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path,cache_dir=model_args.cache_dir,use_fast=model_args.use_fast_tokenizer,revision=model_args.model_revision,use_auth_token=True if model_args.use_auth_token else None,)
_A : List[str] = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path,from_tf=bool(""".ckpt""" in model_args.model_name_or_path ),config=snake_case_,cache_dir=model_args.cache_dir,revision=model_args.model_revision,use_auth_token=True if model_args.use_auth_token else None,)
# When using your own dataset or a different dataset from swag, you will probably need to change this.
_A : Dict = [f'''ending{i}''' for i in range(4 )]
_A : Tuple = """sent1"""
_A : Union[str, Any] = """sent2"""
if data_args.max_seq_length is None:
_A : Optional[Any] = tokenizer.model_max_length
if max_seq_length > 1024:
logger.warning(
"""The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value"""
""" of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can"""
""" override this default with `--block_size xxx`.""" )
_A : Tuple = 1024
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f'''The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the'''
f'''model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.''' )
_A : List[Any] = min(data_args.max_seq_length,tokenizer.model_max_length )
# Preprocessing the datasets.
def preprocess_function(snake_case_ ):
_A : Optional[int] = [[context] * 4 for context in examples[context_name]]
_A : Optional[Any] = examples[question_header_name]
_A : Union[str, Any] = [
[f'''{header} {examples[end][i]}''' for end in ending_names] for i, header in enumerate(snake_case_ )
]
# Flatten out
_A : Union[str, Any] = list(chain(*snake_case_ ) )
_A : Optional[Any] = list(chain(*snake_case_ ) )
# Tokenize
_A : Union[str, Any] = tokenizer(
snake_case_,snake_case_,truncation=snake_case_,max_length=snake_case_,padding="""max_length""" if data_args.pad_to_max_length else False,)
# Un-flatten
return {k: [v[i : i + 4] for i in range(0,len(snake_case_ ),4 )] for k, v in tokenized_examples.items()}
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError("""--do_train requires a train dataset""" )
_A : List[Any] = raw_datasets["""train"""]
if data_args.max_train_samples is not None:
_A : Tuple = min(len(snake_case_ ),data_args.max_train_samples )
_A : Any = train_dataset.select(range(snake_case_ ) )
with training_args.main_process_first(desc="""train dataset map pre-processing""" ):
_A : Optional[Any] = train_dataset.map(
snake_case_,batched=snake_case_,num_proc=data_args.preprocessing_num_workers,load_from_cache_file=not data_args.overwrite_cache,)
if training_args.do_eval:
if "validation" not in raw_datasets:
raise ValueError("""--do_eval requires a validation dataset""" )
_A : List[str] = raw_datasets["""validation"""]
if data_args.max_eval_samples is not None:
_A : int = min(len(snake_case_ ),data_args.max_eval_samples )
_A : List[str] = eval_dataset.select(range(snake_case_ ) )
with training_args.main_process_first(desc="""validation dataset map pre-processing""" ):
_A : Optional[int] = eval_dataset.map(
snake_case_,batched=snake_case_,num_proc=data_args.preprocessing_num_workers,load_from_cache_file=not data_args.overwrite_cache,)
# Data collator
_A : int = (
default_data_collator
if data_args.pad_to_max_length
else DataCollatorForMultipleChoice(tokenizer=snake_case_,pad_to_multiple_of=8 if training_args.fpaa else None )
)
# Metric
def compute_metrics(snake_case_ ):
_A : Dict = eval_predictions
_A : str = np.argmax(snake_case_,axis=1 )
return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()}
# Initialize our Trainer
_A : List[str] = Trainer(
model=snake_case_,args=snake_case_,train_dataset=train_dataset if training_args.do_train else None,eval_dataset=eval_dataset if training_args.do_eval else None,tokenizer=snake_case_,data_collator=snake_case_,compute_metrics=snake_case_,)
# Training
if training_args.do_train:
_A : Tuple = None
if training_args.resume_from_checkpoint is not None:
_A : int = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
_A : Dict = last_checkpoint
_A : Optional[Any] = trainer.train(resume_from_checkpoint=snake_case_ )
trainer.save_model() # Saves the tokenizer too for easy upload
_A : Optional[int] = train_result.metrics
_A : Dict = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(snake_case_ )
)
_A : Tuple = min(snake_case_,len(snake_case_ ) )
trainer.log_metrics("""train""",snake_case_ )
trainer.save_metrics("""train""",snake_case_ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
_A : Any = trainer.evaluate()
_A : List[Any] = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(snake_case_ )
_A : Dict = min(snake_case_,len(snake_case_ ) )
trainer.log_metrics("""eval""",snake_case_ )
trainer.save_metrics("""eval""",snake_case_ )
_A : Union[str, Any] = {
"""finetuned_from""": model_args.model_name_or_path,
"""tasks""": """multiple-choice""",
"""dataset_tags""": """swag""",
"""dataset_args""": """regular""",
"""dataset""": """SWAG""",
"""language""": """en""",
}
if training_args.push_to_hub:
trainer.push_to_hub(**snake_case_ )
else:
trainer.create_model_card(**snake_case_ )
def lowerCAmelCase_ ( snake_case_ ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 353 |
from random import randint
from tempfile import TemporaryFile
import numpy as np
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ):
_A : Tuple = 0
if start < end:
_A : Tuple = randint(snake_case_,snake_case_ )
_A : Any = a[end]
_A : int = a[pivot]
_A : int = temp
_A , _A : List[Any] = _in_place_partition(snake_case_,snake_case_,snake_case_ )
count += _in_place_quick_sort(snake_case_,snake_case_,p - 1 )
count += _in_place_quick_sort(snake_case_,p + 1,snake_case_ )
return count
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ):
_A : str = 0
_A : List[str] = randint(snake_case_,snake_case_ )
_A : Union[str, Any] = a[end]
_A : List[str] = a[pivot]
_A : List[Any] = temp
_A : List[str] = start - 1
for index in range(snake_case_,snake_case_ ):
count += 1
if a[index] < a[end]: # check if current val is less than pivot value
_A : Union[str, Any] = new_pivot_index + 1
_A : List[Any] = a[new_pivot_index]
_A : Optional[int] = a[index]
_A : List[Any] = temp
_A : Optional[Any] = a[new_pivot_index + 1]
_A : Any = a[end]
_A : Dict = temp
return new_pivot_index + 1, count
_snake_case = TemporaryFile()
_snake_case = 100 # 1000 elements are to be sorted
_snake_case , _snake_case = 0, 1 # mean and standard deviation
_snake_case = np.random.normal(mu, sigma, p)
np.save(outfile, X)
print("The array is")
print(X)
outfile.seek(0) # using the same array
_snake_case = np.load(outfile)
_snake_case = len(M) - 1
_snake_case = _in_place_quick_sort(M, 0, r)
print(
"No of Comparisons for 100 elements selected from a standard normal distribution"
"is :"
)
print(z)
| 343 | 0 |
import argparse
import logging
import sys
from unittest.mock import patch
import run_glue_deebert
from transformers.testing_utils import TestCasePlus, get_gpu_count, require_torch_non_multi_gpu, slow
logging.basicConfig(level=logging.DEBUG)
_snake_case = logging.getLogger()
def lowerCAmelCase_ ( ):
_A : Optional[Any] = argparse.ArgumentParser()
parser.add_argument("""-f""" )
_A : Optional[Any] = parser.parse_args()
return args.f
class lowercase ( UpperCamelCase__ ):
def a__ ( self ) -> None:
_A : List[Any] = logging.StreamHandler(sys.stdout )
logger.addHandler(_a )
def a__ ( self , _a ) -> Dict:
_A : Tuple = get_gpu_count()
if n_gpu > 1:
pass
# XXX: doesn't quite work with n_gpu > 1 https://github.com/huggingface/transformers/issues/10560
# script = f"{self.examples_dir_str}/research_projects/deebert/run_glue_deebert.py"
# distributed_args = f"-m torch.distributed.launch --nproc_per_node={n_gpu} {script}".split()
# cmd = [sys.executable] + distributed_args + args
# execute_subprocess_async(cmd, env=self.get_env())
# XXX: test the results - need to save them first into .json file
else:
args.insert(0 , """run_glue_deebert.py""" )
with patch.object(_a , """argv""" , _a ):
_A : Optional[Any] = run_glue_deebert.main()
for value in result.values():
self.assertGreaterEqual(_a , 0.666 )
@slow
@require_torch_non_multi_gpu
def a__ ( self ) -> Optional[int]:
_A : Tuple = """
--model_type roberta
--model_name_or_path roberta-base
--task_name MRPC
--do_train
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--max_seq_length 128
--per_gpu_eval_batch_size=1
--per_gpu_train_batch_size=8
--learning_rate 2e-4
--num_train_epochs 3
--overwrite_output_dir
--seed 42
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--save_steps 0
--overwrite_cache
--eval_after_first_stage
""".split()
self.run_and_check(_a )
_A : Optional[Any] = """
--model_type roberta
--model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--task_name MRPC
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--max_seq_length 128
--eval_each_highway
--eval_highway
--overwrite_cache
--per_gpu_eval_batch_size=1
""".split()
self.run_and_check(_a )
_A : List[str] = """
--model_type roberta
--model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--task_name MRPC
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--max_seq_length 128
--early_exit_entropy 0.1
--eval_highway
--overwrite_cache
--per_gpu_eval_batch_size=1
""".split()
self.run_and_check(_a )
| 354 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"MIT/ast-finetuned-audioset-10-10-0.4593": (
"https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json"
),
}
class lowercase ( UpperCamelCase__ ):
_a = "audio-spectrogram-transformer"
def __init__( self , _a=768 , _a=12 , _a=12 , _a=3072 , _a="gelu" , _a=0.0 , _a=0.0 , _a=0.02 , _a=1e-12 , _a=16 , _a=True , _a=10 , _a=10 , _a=1024 , _a=128 , **_a , ) -> List[Any]:
super().__init__(**_a )
_A : Any = hidden_size
_A : Tuple = num_hidden_layers
_A : List[str] = num_attention_heads
_A : Any = intermediate_size
_A : Optional[Any] = hidden_act
_A : Optional[Any] = hidden_dropout_prob
_A : Any = attention_probs_dropout_prob
_A : Optional[Any] = initializer_range
_A : Optional[Any] = layer_norm_eps
_A : str = patch_size
_A : Tuple = qkv_bias
_A : Dict = frequency_stride
_A : Union[str, Any] = time_stride
_A : Any = max_length
_A : Tuple = num_mel_bins
| 343 | 0 |
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_snake_case = {
"configuration_mctct": ["MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP", "MCTCTConfig"],
"feature_extraction_mctct": ["MCTCTFeatureExtractor"],
"processing_mctct": ["MCTCTProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST",
"MCTCTForCTC",
"MCTCTModel",
"MCTCTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 355 |
import argparse
import logging
import sys
from unittest.mock import patch
import run_glue_deebert
from transformers.testing_utils import TestCasePlus, get_gpu_count, require_torch_non_multi_gpu, slow
logging.basicConfig(level=logging.DEBUG)
_snake_case = logging.getLogger()
def lowerCAmelCase_ ( ):
_A : Optional[Any] = argparse.ArgumentParser()
parser.add_argument("""-f""" )
_A : Optional[Any] = parser.parse_args()
return args.f
class lowercase ( UpperCamelCase__ ):
def a__ ( self ) -> None:
_A : List[Any] = logging.StreamHandler(sys.stdout )
logger.addHandler(_a )
def a__ ( self , _a ) -> Dict:
_A : Tuple = get_gpu_count()
if n_gpu > 1:
pass
# XXX: doesn't quite work with n_gpu > 1 https://github.com/huggingface/transformers/issues/10560
# script = f"{self.examples_dir_str}/research_projects/deebert/run_glue_deebert.py"
# distributed_args = f"-m torch.distributed.launch --nproc_per_node={n_gpu} {script}".split()
# cmd = [sys.executable] + distributed_args + args
# execute_subprocess_async(cmd, env=self.get_env())
# XXX: test the results - need to save them first into .json file
else:
args.insert(0 , """run_glue_deebert.py""" )
with patch.object(_a , """argv""" , _a ):
_A : Optional[Any] = run_glue_deebert.main()
for value in result.values():
self.assertGreaterEqual(_a , 0.666 )
@slow
@require_torch_non_multi_gpu
def a__ ( self ) -> Optional[int]:
_A : Tuple = """
--model_type roberta
--model_name_or_path roberta-base
--task_name MRPC
--do_train
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--max_seq_length 128
--per_gpu_eval_batch_size=1
--per_gpu_train_batch_size=8
--learning_rate 2e-4
--num_train_epochs 3
--overwrite_output_dir
--seed 42
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--save_steps 0
--overwrite_cache
--eval_after_first_stage
""".split()
self.run_and_check(_a )
_A : Optional[Any] = """
--model_type roberta
--model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--task_name MRPC
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--max_seq_length 128
--eval_each_highway
--eval_highway
--overwrite_cache
--per_gpu_eval_batch_size=1
""".split()
self.run_and_check(_a )
_A : List[str] = """
--model_type roberta
--model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--task_name MRPC
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--max_seq_length 128
--early_exit_entropy 0.1
--eval_highway
--overwrite_cache
--per_gpu_eval_batch_size=1
""".split()
self.run_and_check(_a )
| 343 | 0 |
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_A : int = len(snake_case_ )
_A : str = [[False] * (required_sum + 1) for _ in range(arr_len + 1 )]
# for each arr value, a sum of zero(0) can be formed by not taking any element
# hence True/1
for i in range(arr_len + 1 ):
_A : Optional[int] = True
# sum is not zero and set is empty then false
for i in range(1,required_sum + 1 ):
_A : Optional[int] = False
for i in range(1,arr_len + 1 ):
for j in range(1,required_sum + 1 ):
if arr[i - 1] > j:
_A : List[str] = subset[i - 1][j]
if arr[i - 1] <= j:
_A : Optional[int] = subset[i - 1][j] or subset[i - 1][j - arr[i - 1]]
return subset[arr_len][required_sum]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 356 |
import inspect
import unittest
from transformers import ViTMSNConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMSNForImageClassification, ViTMSNModel
from transformers.models.vit_msn.modeling_vit_msn import VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class lowercase :
def __init__( self , _a , _a=13 , _a=30 , _a=2 , _a=3 , _a=True , _a=True , _a=32 , _a=5 , _a=4 , _a=37 , _a="gelu" , _a=0.1 , _a=0.1 , _a=10 , _a=0.02 , _a=None , ) -> Union[str, Any]:
_A : Optional[int] = parent
_A : Dict = batch_size
_A : Any = image_size
_A : Optional[int] = patch_size
_A : Optional[int] = num_channels
_A : List[Any] = is_training
_A : Optional[Any] = use_labels
_A : Any = hidden_size
_A : Any = num_hidden_layers
_A : List[Any] = num_attention_heads
_A : int = intermediate_size
_A : Dict = hidden_act
_A : Optional[int] = hidden_dropout_prob
_A : str = attention_probs_dropout_prob
_A : Any = type_sequence_label_size
_A : str = initializer_range
_A : Tuple = scope
# in ViT MSN, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
_A : List[Any] = (image_size // patch_size) ** 2
_A : str = num_patches + 1
def a__ ( self ) -> Dict:
_A : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_A : List[str] = None
if self.use_labels:
_A : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_A : List[Any] = self.get_config()
return config, pixel_values, labels
def a__ ( self ) -> Union[str, Any]:
return ViTMSNConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def a__ ( self , _a , _a , _a ) -> Dict:
_A : List[str] = ViTMSNModel(config=_a )
model.to(_a )
model.eval()
_A : List[str] = model(_a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a__ ( self , _a , _a , _a ) -> List[str]:
_A : Union[str, Any] = self.type_sequence_label_size
_A : Tuple = ViTMSNForImageClassification(_a )
model.to(_a )
model.eval()
_A : Optional[int] = model(_a , labels=_a )
print("""Pixel and labels shape: {pixel_values.shape}, {labels.shape}""" )
print("""Labels: {labels}""" )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_A : Dict = 1
_A : str = ViTMSNForImageClassification(_a )
model.to(_a )
model.eval()
_A : int = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_A : int = model(_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def a__ ( self ) -> Any:
_A : Optional[int] = self.prepare_config_and_inputs()
_A , _A , _A : Dict = config_and_inputs
_A : List[Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class lowercase ( UpperCamelCase__,UpperCamelCase__,unittest.TestCase ):
_a = (ViTMSNModel, ViTMSNForImageClassification) if is_torch_available() else ()
_a = (
{"feature-extraction": ViTMSNModel, "image-classification": ViTMSNForImageClassification}
if is_torch_available()
else {}
)
_a = False
_a = False
_a = False
_a = False
def a__ ( self ) -> Tuple:
_A : Tuple = ViTMSNModelTester(self )
_A : List[Any] = ConfigTester(self , config_class=_a , has_text_modality=_a , hidden_size=37 )
def a__ ( self ) -> Optional[int]:
self.config_tester.run_common_tests()
@unittest.skip(reason="""ViTMSN does not use inputs_embeds""" )
def a__ ( self ) -> int:
pass
def a__ ( self ) -> Any:
_A , _A : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A : Tuple = model_class(_a )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_A : str = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_a , nn.Linear ) )
def a__ ( self ) -> str:
_A , _A : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A : int = model_class(_a )
_A : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_A : str = [*signature.parameters.keys()]
_A : Optional[int] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _a )
def a__ ( self ) -> List[Any]:
_A : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def a__ ( self ) -> Any:
_A : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_a )
@slow
def a__ ( self ) -> int:
for model_name in VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_A : int = ViTMSNModel.from_pretrained(_a )
self.assertIsNotNone(_a )
def lowerCAmelCase_ ( ):
_A : Dict = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class lowercase ( unittest.TestCase ):
@cached_property
def a__ ( self ) -> int:
return ViTImageProcessor.from_pretrained("""facebook/vit-msn-small""" ) if is_vision_available() else None
@slow
def a__ ( self ) -> Optional[int]:
torch.manual_seed(2 )
_A : Tuple = ViTMSNForImageClassification.from_pretrained("""facebook/vit-msn-small""" ).to(_a )
_A : Tuple = self.default_image_processor
_A : Dict = prepare_img()
_A : Optional[Any] = image_processor(images=_a , return_tensors="""pt""" ).to(_a )
# forward pass
with torch.no_grad():
_A : int = model(**_a )
# verify the logits
_A : Union[str, Any] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _a )
_A : Optional[int] = torch.tensor([-0.0803, -0.4454, -0.2375] ).to(_a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _a , atol=1e-4 ) )
| 343 | 0 |
from math import acos, sin
from typing import List, Tuple, Union
import numpy as np
import torch
from PIL import Image
from ...models import AutoencoderKL, UNetaDConditionModel
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput
from .mel import Mel
class lowercase ( UpperCamelCase__ ):
_a = ["vqvae"]
def __init__( self , _a , _a , _a , _a , ) -> Optional[int]:
super().__init__()
self.register_modules(unet=_a , scheduler=_a , mel=_a , vqvae=_a )
def a__ ( self ) -> int:
return 50 if isinstance(self.scheduler , _a ) else 1000
@torch.no_grad()
def __call__( self , _a = 1 , _a = None , _a = None , _a = 0 , _a = 0 , _a = None , _a = None , _a = 0 , _a = 0 , _a = None , _a = 0 , _a = None , _a = None , _a=True , ) -> Union[
Union[AudioPipelineOutput, ImagePipelineOutput],
Tuple[List[Image.Image], Tuple[int, List[np.ndarray]]],
]:
_A : List[Any] = steps or self.get_default_steps()
self.scheduler.set_timesteps(_a )
_A : Optional[Any] = step_generator or generator
# For backwards compatibility
if type(self.unet.config.sample_size ) == int:
_A : List[str] = (self.unet.config.sample_size, self.unet.config.sample_size)
if noise is None:
_A : str = randn_tensor(
(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size[0],
self.unet.config.sample_size[1],
) , generator=_a , device=self.device , )
_A : Optional[int] = noise
_A : Union[str, Any] = None
if audio_file is not None or raw_audio is not None:
self.mel.load_audio(_a , _a )
_A : Dict = self.mel.audio_slice_to_image(_a )
_A : List[Any] = np.frombuffer(input_image.tobytes() , dtype="""uint8""" ).reshape(
(input_image.height, input_image.width) )
_A : Union[str, Any] = (input_image / 255) * 2 - 1
_A : int = torch.tensor(input_image[np.newaxis, :, :] , dtype=torch.float ).to(self.device )
if self.vqvae is not None:
_A : int = self.vqvae.encode(torch.unsqueeze(_a , 0 ) ).latent_dist.sample(
generator=_a )[0]
_A : Dict = self.vqvae.config.scaling_factor * input_images
if start_step > 0:
_A : str = self.scheduler.add_noise(_a , _a , self.scheduler.timesteps[start_step - 1] )
_A : Optional[Any] = (
self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length
)
_A : Optional[Any] = int(mask_start_secs * pixels_per_second )
_A : Optional[int] = int(mask_end_secs * pixels_per_second )
_A : List[str] = self.scheduler.add_noise(_a , _a , torch.tensor(self.scheduler.timesteps[start_step:] ) )
for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ):
if isinstance(self.unet , _a ):
_A : Optional[int] = self.unet(_a , _a , _a )["""sample"""]
else:
_A : Any = self.unet(_a , _a )["""sample"""]
if isinstance(self.scheduler , _a ):
_A : int = self.scheduler.step(
model_output=_a , timestep=_a , sample=_a , eta=_a , generator=_a , )["""prev_sample"""]
else:
_A : Union[str, Any] = self.scheduler.step(
model_output=_a , timestep=_a , sample=_a , generator=_a , )["""prev_sample"""]
if mask is not None:
if mask_start > 0:
_A : Optional[Any] = mask[:, step, :, :mask_start]
if mask_end > 0:
_A : List[Any] = mask[:, step, :, -mask_end:]
if self.vqvae is not None:
# 0.18215 was scaling factor used in training to ensure unit variance
_A : str = 1 / self.vqvae.config.scaling_factor * images
_A : Union[str, Any] = self.vqvae.decode(_a )["""sample"""]
_A : int = (images / 2 + 0.5).clamp(0 , 1 )
_A : Tuple = images.cpu().permute(0 , 2 , 3 , 1 ).numpy()
_A : Dict = (images * 255).round().astype("""uint8""" )
_A : Tuple = list(
(Image.fromarray(_[:, :, 0] ) for _ in images)
if images.shape[3] == 1
else (Image.fromarray(_a , mode="""RGB""" ).convert("""L""" ) for _ in images) )
_A : Optional[Any] = [self.mel.image_to_audio(_a ) for _ in images]
if not return_dict:
return images, (self.mel.get_sample_rate(), audios)
return BaseOutput(**AudioPipelineOutput(np.array(_a )[:, np.newaxis, :] ) , **ImagePipelineOutput(_a ) )
@torch.no_grad()
def a__ ( self , _a , _a = 50 ) -> np.ndarray:
assert isinstance(self.scheduler , _a )
self.scheduler.set_timesteps(_a )
_A : str = np.array(
[np.frombuffer(image.tobytes() , dtype="""uint8""" ).reshape((1, image.height, image.width) ) for image in images] )
_A : str = (sample / 255) * 2 - 1
_A : Dict = torch.Tensor(_a ).to(self.device )
for t in self.progress_bar(torch.flip(self.scheduler.timesteps , (0,) ) ):
_A : Tuple = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
_A : str = self.scheduler.alphas_cumprod[t]
_A : int = (
self.scheduler.alphas_cumprod[prev_timestep]
if prev_timestep >= 0
else self.scheduler.final_alpha_cumprod
)
_A : List[str] = 1 - alpha_prod_t
_A : Any = self.unet(_a , _a )["""sample"""]
_A : Tuple = (1 - alpha_prod_t_prev) ** 0.5 * model_output
_A : List[Any] = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5)
_A : Union[str, Any] = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output
return sample
@staticmethod
def a__ ( _a , _a , _a ) -> torch.Tensor:
_A : Union[str, Any] = acos(torch.dot(torch.flatten(_a ) , torch.flatten(_a ) ) / torch.norm(_a ) / torch.norm(_a ) )
return sin((1 - alpha) * theta ) * xa / sin(_a ) + sin(alpha * theta ) * xa / sin(_a )
| 357 |
def lowerCAmelCase_ ( snake_case_ = 1000 ):
_A : List[Any] = 3
_A : Tuple = 0
while a < n:
if a % 3 == 0 or a % 5 == 0:
result += a
elif a % 15 == 0:
result -= a
a += 1
return result
if __name__ == "__main__":
print(f"""{solution() = }""")
| 343 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_snake_case = {
"configuration_blenderbot_small": [
"BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BlenderbotSmallConfig",
"BlenderbotSmallOnnxConfig",
],
"tokenization_blenderbot_small": ["BlenderbotSmallTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ["BlenderbotSmallTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST",
"BlenderbotSmallForCausalLM",
"BlenderbotSmallForConditionalGeneration",
"BlenderbotSmallModel",
"BlenderbotSmallPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"TFBlenderbotSmallForConditionalGeneration",
"TFBlenderbotSmallModel",
"TFBlenderbotSmallPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"FlaxBlenderbotSmallForConditionalGeneration",
"FlaxBlenderbotSmallModel",
"FlaxBlenderbotSmallPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotSmallConfig,
BlenderbotSmallOnnxConfig,
)
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_small_fast import BlenderbotSmallTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotSmallForCausalLM,
BlenderbotSmallForConditionalGeneration,
BlenderbotSmallModel,
BlenderbotSmallPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot_small import (
TFBlenderbotSmallForConditionalGeneration,
TFBlenderbotSmallModel,
TFBlenderbotSmallPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot_small import (
FlaxBlenderbotSmallForConditionalGeneration,
FlaxBlenderbotSmallModel,
FlaxBlenderbotSmallPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 358 |
import inspect
import unittest
from transformers import ConvNextConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextBackbone, ConvNextForImageClassification, ConvNextModel
from transformers.models.convnext.modeling_convnext import CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowercase :
def __init__( self , _a , _a=13 , _a=32 , _a=3 , _a=4 , _a=[10, 20, 30, 40] , _a=[2, 2, 3, 2] , _a=True , _a=True , _a=37 , _a="gelu" , _a=10 , _a=0.02 , _a=["stage2", "stage3", "stage4"] , _a=[2, 3, 4] , _a=None , ) -> List[Any]:
_A : Tuple = parent
_A : Any = batch_size
_A : int = image_size
_A : Tuple = num_channels
_A : List[Any] = num_stages
_A : Any = hidden_sizes
_A : Union[str, Any] = depths
_A : Union[str, Any] = is_training
_A : Tuple = use_labels
_A : Optional[Any] = intermediate_size
_A : Union[str, Any] = hidden_act
_A : Any = num_labels
_A : List[str] = initializer_range
_A : str = out_features
_A : int = out_indices
_A : List[Any] = scope
def a__ ( self ) -> str:
_A : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_A : str = None
if self.use_labels:
_A : int = ids_tensor([self.batch_size] , self.num_labels )
_A : str = self.get_config()
return config, pixel_values, labels
def a__ ( self ) -> List[str]:
return ConvNextConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=_a , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def a__ ( self , _a , _a , _a ) -> int:
_A : int = ConvNextModel(config=_a )
model.to(_a )
model.eval()
_A : int = model(_a )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def a__ ( self , _a , _a , _a ) -> List[Any]:
_A : Union[str, Any] = ConvNextForImageClassification(_a )
model.to(_a )
model.eval()
_A : List[Any] = model(_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def a__ ( self , _a , _a , _a ) -> str:
_A : List[str] = ConvNextBackbone(config=_a )
model.to(_a )
model.eval()
_A : Optional[int] = model(_a )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
_A : Optional[Any] = None
_A : str = ConvNextBackbone(config=_a )
model.to(_a )
model.eval()
_A : int = model(_a )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def a__ ( self ) -> int:
_A : int = self.prepare_config_and_inputs()
_A , _A , _A : List[Any] = config_and_inputs
_A : Any = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class lowercase ( UpperCamelCase__,UpperCamelCase__,unittest.TestCase ):
_a = (
(
ConvNextModel,
ConvNextForImageClassification,
ConvNextBackbone,
)
if is_torch_available()
else ()
)
_a = (
{"feature-extraction": ConvNextModel, "image-classification": ConvNextForImageClassification}
if is_torch_available()
else {}
)
_a = True
_a = False
_a = False
_a = False
_a = False
def a__ ( self ) -> Dict:
_A : int = ConvNextModelTester(self )
_A : List[Any] = ConfigTester(self , config_class=_a , has_text_modality=_a , hidden_size=37 )
def a__ ( self ) -> Any:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def a__ ( self ) -> str:
return
@unittest.skip(reason="""ConvNext does not use inputs_embeds""" )
def a__ ( self ) -> Tuple:
pass
@unittest.skip(reason="""ConvNext does not support input and output embeddings""" )
def a__ ( self ) -> Optional[Any]:
pass
@unittest.skip(reason="""ConvNext does not use feedforward chunking""" )
def a__ ( self ) -> List[Any]:
pass
def a__ ( self ) -> Optional[Any]:
_A , _A : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A : Optional[Any] = model_class(_a )
_A : List[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_A : List[Any] = [*signature.parameters.keys()]
_A : int = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _a )
def a__ ( self ) -> Union[str, Any]:
_A : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def a__ ( self ) -> Tuple:
_A : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*_a )
def a__ ( self ) -> Tuple:
def check_hidden_states_output(_a , _a , _a ):
_A : Tuple = model_class(_a )
model.to(_a )
model.eval()
with torch.no_grad():
_A : Dict = model(**self._prepare_for_class(_a , _a ) )
_A : Optional[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_A : Dict = self.model_tester.num_stages
self.assertEqual(len(_a ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
_A , _A : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A : List[Any] = True
check_hidden_states_output(_a , _a , _a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_A : Union[str, Any] = True
check_hidden_states_output(_a , _a , _a )
def a__ ( self ) -> int:
_A : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_a )
@slow
def a__ ( self ) -> Optional[int]:
for model_name in CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_A : Optional[Any] = ConvNextModel.from_pretrained(_a )
self.assertIsNotNone(_a )
def lowerCAmelCase_ ( ):
_A : Optional[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class lowercase ( unittest.TestCase ):
@cached_property
def a__ ( self ) -> str:
return AutoImageProcessor.from_pretrained("""facebook/convnext-tiny-224""" ) if is_vision_available() else None
@slow
def a__ ( self ) -> Optional[Any]:
_A : Any = ConvNextForImageClassification.from_pretrained("""facebook/convnext-tiny-224""" ).to(_a )
_A : List[str] = self.default_image_processor
_A : int = prepare_img()
_A : Union[str, Any] = image_processor(images=_a , return_tensors="""pt""" ).to(_a )
# forward pass
with torch.no_grad():
_A : Dict = model(**_a )
# verify the logits
_A : Optional[Any] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _a )
_A : Any = torch.tensor([-0.0260, -0.4739, 0.1911] ).to(_a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _a , atol=1e-4 ) )
@require_torch
class lowercase ( unittest.TestCase,UpperCamelCase__ ):
_a = (ConvNextBackbone,) if is_torch_available() else ()
_a = ConvNextConfig
_a = False
def a__ ( self ) -> List[str]:
_A : Optional[int] = ConvNextModelTester(self )
| 343 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"MIT/ast-finetuned-audioset-10-10-0.4593": (
"https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json"
),
}
class lowercase ( UpperCamelCase__ ):
_a = "audio-spectrogram-transformer"
def __init__( self , _a=768 , _a=12 , _a=12 , _a=3072 , _a="gelu" , _a=0.0 , _a=0.0 , _a=0.02 , _a=1e-12 , _a=16 , _a=True , _a=10 , _a=10 , _a=1024 , _a=128 , **_a , ) -> List[Any]:
super().__init__(**_a )
_A : Any = hidden_size
_A : Tuple = num_hidden_layers
_A : List[str] = num_attention_heads
_A : Any = intermediate_size
_A : Optional[Any] = hidden_act
_A : Optional[Any] = hidden_dropout_prob
_A : Any = attention_probs_dropout_prob
_A : Optional[Any] = initializer_range
_A : Optional[Any] = layer_norm_eps
_A : str = patch_size
_A : Tuple = qkv_bias
_A : Dict = frequency_stride
_A : Union[str, Any] = time_stride
_A : Any = max_length
_A : Tuple = num_mel_bins
| 359 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_snake_case = {
"configuration_roc_bert": ["ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "RoCBertConfig"],
"tokenization_roc_bert": ["RoCBertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
pass
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"RoCBertForCausalLM",
"RoCBertForMaskedLM",
"RoCBertForMultipleChoice",
"RoCBertForPreTraining",
"RoCBertForQuestionAnswering",
"RoCBertForSequenceClassification",
"RoCBertForTokenClassification",
"RoCBertLayer",
"RoCBertModel",
"RoCBertPreTrainedModel",
"load_tf_weights_in_roc_bert",
]
if TYPE_CHECKING:
from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig
from .tokenization_roc_bert import RoCBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
raise OptionalDependencyNotAvailable()
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roc_bert import (
ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RoCBertForCausalLM,
RoCBertForMaskedLM,
RoCBertForMultipleChoice,
RoCBertForPreTraining,
RoCBertForQuestionAnswering,
RoCBertForSequenceClassification,
RoCBertForTokenClassification,
RoCBertLayer,
RoCBertModel,
RoCBertPreTrainedModel,
load_tf_weights_in_roc_bert,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 343 | 0 |
"""simple docstring"""
from __future__ import annotations
_snake_case = list[tuple[int, int]]
_snake_case = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
_snake_case = ([-1, 0], [0, -1], [1, 0], [0, 1]) # up, left, down, right
class lowercase :
def __init__( self , _a , _a , _a , _a , _a , _a , ) -> Dict:
_A : Union[str, Any] = pos_x
_A : Optional[int] = pos_y
_A : str = (pos_y, pos_x)
_A : int = goal_x
_A : str = goal_y
_A : Optional[Any] = g_cost
_A : Any = parent
_A : List[Any] = self.calculate_heuristic()
def a__ ( self ) -> float:
_A : int = abs(self.pos_x - self.goal_x )
_A : List[str] = abs(self.pos_y - self.goal_y )
return dx + dy
def __lt__( self , _a ) -> bool:
return self.f_cost < other.f_cost
class lowercase :
def __init__( self , _a , _a ) -> int:
_A : Dict = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , _a )
_A : List[str] = Node(goal[1] , goal[0] , goal[1] , goal[0] , 9_9999 , _a )
_A : List[Any] = [self.start]
_A : list[Node] = []
_A : List[Any] = False
def a__ ( self ) -> Path | None:
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
_A : Tuple = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
_A : List[Any] = True
return self.retrace_path(_a )
self.closed_nodes.append(_a )
_A : Any = self.get_successors(_a )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(_a )
else:
# retrieve the best current path
_A : int = self.open_nodes.pop(self.open_nodes.index(_a ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(_a )
else:
self.open_nodes.append(_a )
if not self.reached:
return [self.start.pos]
return None
def a__ ( self , _a ) -> list[Node]:
_A : Tuple = []
for action in delta:
_A : Tuple = parent.pos_x + action[1]
_A : int = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(_a ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
_a , _a , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , _a , ) )
return successors
def a__ ( self , _a ) -> Path:
_A : Any = node
_A : Optional[int] = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
_A : List[str] = current_node.parent
path.reverse()
return path
if __name__ == "__main__":
_snake_case = (0, 0)
_snake_case = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
print("------")
_snake_case = GreedyBestFirst(init, goal)
_snake_case = greedy_bf.search()
if path:
for pos_x, pos_y in path:
_snake_case = 2
for elem in grid:
print(elem)
| 360 |
# DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax
import jax.numpy as jnp
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils_flax import (
CommonSchedulerState,
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
add_noise_common,
get_velocity_common,
)
@flax.struct.dataclass
class lowercase :
_a = 42
# setable values
_a = 42
_a = 42
_a = None
@classmethod
def a__ ( cls , _a , _a , _a ) -> Tuple:
return cls(common=_a , init_noise_sigma=_a , timesteps=_a )
@dataclass
class lowercase ( UpperCamelCase__ ):
_a = 42
class lowercase ( UpperCamelCase__,UpperCamelCase__ ):
_a = [e.name for e in FlaxKarrasDiffusionSchedulers]
_a = 42
@property
def a__ ( self ) -> Dict:
return True
@register_to_config
def __init__( self , _a = 1000 , _a = 0.0001 , _a = 0.02 , _a = "linear" , _a = None , _a = "fixed_small" , _a = True , _a = "epsilon" , _a = jnp.floataa , ) -> Tuple:
_A : Tuple = dtype
def a__ ( self , _a = None ) -> DDPMSchedulerState:
if common is None:
_A : Dict = CommonSchedulerState.create(self )
# standard deviation of the initial noise distribution
_A : Union[str, Any] = jnp.array(1.0 , dtype=self.dtype )
_A : Tuple = jnp.arange(0 , self.config.num_train_timesteps ).round()[::-1]
return DDPMSchedulerState.create(
common=_a , init_noise_sigma=_a , timesteps=_a , )
def a__ ( self , _a , _a , _a = None ) -> jnp.ndarray:
return sample
def a__ ( self , _a , _a , _a = () ) -> DDPMSchedulerState:
_A : Any = self.config.num_train_timesteps // num_inference_steps
# creates integer timesteps by multiplying by ratio
# rounding to avoid issues when num_inference_step is power of 3
_A : Dict = (jnp.arange(0 , _a ) * step_ratio).round()[::-1]
return state.replace(
num_inference_steps=_a , timesteps=_a , )
def a__ ( self , _a , _a , _a=None , _a=None ) -> Optional[int]:
_A : Optional[Any] = state.common.alphas_cumprod[t]
_A : int = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
_A : List[str] = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t]
if variance_type is None:
_A : Optional[Any] = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small":
_A : Optional[Any] = jnp.clip(_a , a_min=1e-20 )
# for rl-diffuser https://arxiv.org/abs/2205.09991
elif variance_type == "fixed_small_log":
_A : Any = jnp.log(jnp.clip(_a , a_min=1e-20 ) )
elif variance_type == "fixed_large":
_A : Optional[Any] = state.common.betas[t]
elif variance_type == "fixed_large_log":
# Glide max_log
_A : Tuple = jnp.log(state.common.betas[t] )
elif variance_type == "learned":
return predicted_variance
elif variance_type == "learned_range":
_A : str = variance
_A : Union[str, Any] = state.common.betas[t]
_A : Tuple = (predicted_variance + 1) / 2
_A : List[str] = frac * max_log + (1 - frac) * min_log
return variance
def a__ ( self , _a , _a , _a , _a , _a = None , _a = True , ) -> Union[FlaxDDPMSchedulerOutput, Tuple]:
_A : Dict = timestep
if key is None:
_A : int = jax.random.PRNGKey(0 )
if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]:
_A , _A : List[str] = jnp.split(_a , sample.shape[1] , axis=1 )
else:
_A : int = None
# 1. compute alphas, betas
_A : int = state.common.alphas_cumprod[t]
_A : List[str] = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
_A : Union[str, Any] = 1 - alpha_prod_t
_A : Optional[int] = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
_A : Dict = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
_A : Optional[int] = model_output
elif self.config.prediction_type == "v_prediction":
_A : Any = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
else:
raise ValueError(
F'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` '''
""" for the FlaxDDPMScheduler.""" )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
_A : Union[str, Any] = jnp.clip(_a , -1 , 1 )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
_A : List[Any] = (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t
_A : Dict = state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
_A : int = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
def random_variance():
_A : Tuple = jax.random.split(_a , num=1 )
_A : Dict = jax.random.normal(_a , shape=model_output.shape , dtype=self.dtype )
return (self._get_variance(_a , _a , predicted_variance=_a ) ** 0.5) * noise
_A : int = jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype ) )
_A : Union[str, Any] = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample, state)
return FlaxDDPMSchedulerOutput(prev_sample=_a , state=_a )
def a__ ( self , _a , _a , _a , _a , ) -> jnp.ndarray:
return add_noise_common(state.common , _a , _a , _a )
def a__ ( self , _a , _a , _a , _a , ) -> jnp.ndarray:
return get_velocity_common(state.common , _a , _a , _a )
def __len__( self ) -> List[Any]:
return self.config.num_train_timesteps
| 343 | 0 |
import unittest
import numpy as np
from transformers import RobertaPreLayerNormConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roberta_prelayernorm.modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
)
class lowercase ( unittest.TestCase ):
def __init__( self , _a , _a=13 , _a=7 , _a=True , _a=True , _a=True , _a=True , _a=99 , _a=32 , _a=5 , _a=4 , _a=37 , _a="gelu" , _a=0.1 , _a=0.1 , _a=512 , _a=16 , _a=2 , _a=0.02 , _a=4 , ) -> List[str]:
_A : Any = parent
_A : Dict = batch_size
_A : Optional[int] = seq_length
_A : int = is_training
_A : List[str] = use_attention_mask
_A : List[str] = use_token_type_ids
_A : Optional[Any] = use_labels
_A : Optional[int] = vocab_size
_A : Optional[Any] = hidden_size
_A : Optional[Any] = num_hidden_layers
_A : Optional[int] = num_attention_heads
_A : Dict = intermediate_size
_A : List[str] = hidden_act
_A : Optional[int] = hidden_dropout_prob
_A : str = attention_probs_dropout_prob
_A : Tuple = max_position_embeddings
_A : Tuple = type_vocab_size
_A : List[Any] = type_sequence_label_size
_A : Union[str, Any] = initializer_range
_A : Tuple = num_choices
def a__ ( self ) -> Optional[int]:
_A : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_A : List[str] = None
if self.use_attention_mask:
_A : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
_A : str = None
if self.use_token_type_ids:
_A : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_A : Tuple = RobertaPreLayerNormConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_a , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def a__ ( self ) -> int:
_A : Union[str, Any] = self.prepare_config_and_inputs()
_A : List[str] = config_and_inputs
_A : int = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
def a__ ( self ) -> int:
_A : Tuple = self.prepare_config_and_inputs()
_A : int = config_and_inputs
_A : Dict = True
_A : int = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
_A : str = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
# Copied from tests.models.roberta.test_modelling_flax_roberta.FlaxRobertaPreLayerNormModelTest with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta-base->andreasmadsen/efficient_mlm_m0.40
class lowercase ( UpperCamelCase__,unittest.TestCase ):
_a = True
_a = (
(
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
)
if is_flax_available()
else ()
)
def a__ ( self ) -> Union[str, Any]:
_A : Optional[Any] = FlaxRobertaPreLayerNormModelTester(self )
@slow
def a__ ( self ) -> List[Any]:
for model_class_name in self.all_model_classes:
_A : Union[str, Any] = model_class_name.from_pretrained("""andreasmadsen/efficient_mlm_m0.40""" , from_pt=_a )
_A : Optional[Any] = model(np.ones((1, 1) ) )
self.assertIsNotNone(_a )
@require_flax
class lowercase ( unittest.TestCase ):
@slow
def a__ ( self ) -> List[str]:
_A : Optional[int] = FlaxRobertaPreLayerNormForMaskedLM.from_pretrained("""andreasmadsen/efficient_mlm_m0.40""" , from_pt=_a )
_A : Union[str, Any] = np.array([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]] , dtype=jnp.intaa )
_A : int = model(_a )[0]
_A : Tuple = [1, 11, 5_0265]
self.assertEqual(list(output.shape ) , _a )
# compare the actual values for a slice.
_A : str = np.array(
[[[40.4880, 18.0199, -5.2367], [-1.8877, -4.0885, 10.7085], [-2.2613, -5.6110, 7.2665]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , _a , atol=1e-4 ) )
@slow
def a__ ( self ) -> Optional[Any]:
_A : str = FlaxRobertaPreLayerNormModel.from_pretrained("""andreasmadsen/efficient_mlm_m0.40""" , from_pt=_a )
_A : Tuple = np.array([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]] , dtype=jnp.intaa )
_A : Dict = model(_a )[0]
# compare the actual values for a slice.
_A : Dict = np.array(
[[[0.0208, -0.0356, 0.0237], [-0.1569, -0.0411, -0.2626], [0.1879, 0.0125, -0.0089]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , _a , atol=1e-4 ) )
| 361 |
# Copyright (c) 2021-, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
####################################################################################################
#
# Note: If when running this conversion script you're getting an exception:
# ModuleNotFoundError: No module named 'megatron.model.enums'
# you need to tell python where to find the clone of Megatron-LM, e.g.:
#
# cd /tmp
# git clone https://github.com/NVIDIA/Megatron-LM
# PYTHONPATH=/tmp/Megatron-LM python src/transformers/models/megatron_gpt2/convert_megatron_gpt2_checkpoint.py ...
#
# if you already have it cloned elsewhere, simply adjust the path to the existing path
#
# If the training was done using a Megatron-LM fork, e.g.,
# https://github.com/microsoft/Megatron-DeepSpeed/ then chances are that you need to have that one
# in your path, i.e., /path/to/Megatron-DeepSpeed/
#
import argparse
import os
import re
import zipfile
import torch
from transformers import AutoTokenizer, GPTaConfig
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_=0 ):
# Format the message.
if name is None:
_A : Union[str, Any] = None
else:
_A : Dict = """.""" * max(0,spaces - 2 ) + """# {:""" + str(50 - spaces ) + """s}"""
_A : Tuple = fmt.format(snake_case_ )
# Print and recurse (if needed).
if isinstance(snake_case_,snake_case_ ):
if msg is not None:
print(snake_case_ )
for k in val.keys():
recursive_print(snake_case_,val[k],spaces + 2 )
elif isinstance(snake_case_,torch.Tensor ):
print(snake_case_,""":""",val.size() )
else:
print(snake_case_,""":""",snake_case_ )
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_,snake_case_,snake_case_ ):
# Permutes layout of param tensor to [num_splits * num_heads * hidden_size, :]
# for compatibility with later versions of NVIDIA Megatron-LM.
# The inverse operation is performed inside Megatron-LM to read checkpoints:
# https://github.com/NVIDIA/Megatron-LM/blob/v2.4/megatron/checkpointing.py#L209
# If param is the weight tensor of the self-attention block, the returned tensor
# will have to be transposed one more time to be read by HuggingFace GPT2.
_A : str = param.size()
if checkpoint_version == 1.0:
# version 1.0 stores [num_heads * hidden_size * num_splits, :]
_A : Union[str, Any] = (num_heads, hidden_size, num_splits) + input_shape[1:]
_A : Tuple = param.view(*snake_case_ )
_A : Any = param.transpose(0,2 )
_A : int = param.transpose(1,2 ).contiguous()
elif checkpoint_version >= 2.0:
# other versions store [num_heads * num_splits * hidden_size, :]
_A : Optional[Any] = (num_heads, num_splits, hidden_size) + input_shape[1:]
_A : int = param.view(*snake_case_ )
_A : Any = param.transpose(0,1 ).contiguous()
_A : Optional[int] = param.view(*snake_case_ )
return param
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ):
# The converted output model.
_A : Any = {}
# old versions did not store training args
_A : str = input_state_dict.get("""args""",snake_case_ )
if ds_args is not None:
# do not make the user write a config file when the exact dimensions/sizes are already in the checkpoint
# from pprint import pprint
# pprint(vars(ds_args))
_A : Union[str, Any] = ds_args.padded_vocab_size
_A : List[Any] = ds_args.max_position_embeddings
_A : Optional[int] = ds_args.hidden_size
_A : List[Any] = ds_args.num_layers
_A : List[str] = ds_args.num_attention_heads
_A : int = ds_args.ffn_hidden_size
# pprint(config)
# The number of heads.
_A : Union[str, Any] = config.n_head
# The hidden_size per head.
_A : List[Any] = config.n_embd // config.n_head
# Megatron-LM checkpoint version
if "checkpoint_version" in input_state_dict.keys():
_A : Tuple = input_state_dict["""checkpoint_version"""]
else:
_A : Any = 0.0
# The model.
_A : Any = input_state_dict["""model"""]
# The language model.
_A : Tuple = model["""language_model"""]
# The embeddings.
_A : Any = lm["""embedding"""]
# The word embeddings.
_A : Dict = embeddings["""word_embeddings"""]["""weight"""]
# Truncate the embedding table to vocab_size rows.
_A : Union[str, Any] = word_embeddings[: config.vocab_size, :]
_A : Tuple = word_embeddings
# The position embeddings.
_A : Tuple = embeddings["""position_embeddings"""]["""weight"""]
# Read the causal mask dimension (seqlen). [max_sequence_length, hidden_size]
_A : Any = pos_embeddings.size(0 )
if n_positions != config.n_positions:
raise ValueError(
f'''pos_embeddings.max_sequence_length={n_positions} and config.n_positions={config.n_positions} don\'t match''' )
# Store the position embeddings.
_A : Optional[int] = pos_embeddings
# The transformer.
_A : Any = lm["""transformer"""] if """transformer""" in lm.keys() else lm["""encoder"""]
# The regex to extract layer names.
_A : Optional[int] = re.compile(r"""layers\.(\d+)\.([a-z0-9_.]+)\.([a-z]+)""" )
# The simple map of names for "automated" rules.
_A : Union[str, Any] = {
"""attention.dense""": """.attn.c_proj.""",
"""self_attention.dense""": """.attn.c_proj.""",
"""mlp.dense_h_to_4h""": """.mlp.c_fc.""",
"""mlp.dense_4h_to_h""": """.mlp.c_proj.""",
}
# Extract the layers.
for key, val in transformer.items():
# Match the name.
_A : List[str] = layer_re.match(snake_case_ )
# Stop if that's not a layer
if m is None:
break
# The index of the layer.
_A : Tuple = int(m.group(1 ) )
# The name of the operation.
_A : Optional[Any] = m.group(2 )
# Is it a weight or a bias?
_A : Dict = m.group(3 )
# The name of the layer.
_A : Optional[Any] = f'''transformer.h.{layer_idx}'''
# For layernorm(s), simply store the layer norm.
if op_name.endswith("""layernorm""" ):
_A : Union[str, Any] = """ln_1""" if op_name.startswith("""input""" ) else """ln_2"""
_A : List[str] = val
# Transpose the QKV matrix.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "weight":
# Insert a tensor of 1x1xDxD bias.
_A : List[str] = torch.tril(torch.ones((n_positions, n_positions),dtype=torch.floataa ) ).view(
1,1,snake_case_,snake_case_ )
_A : Any = causal_mask
# Insert a "dummy" tensor for masked_bias.
_A : List[str] = torch.tensor(-1e4,dtype=torch.floataa )
_A : Tuple = masked_bias
_A : Tuple = fix_query_key_value_ordering(snake_case_,snake_case_,3,snake_case_,snake_case_ )
# Megatron stores (3*D) x D but transformers-GPT2 expects D x 3*D.
_A : Tuple = out_val.transpose(0,1 ).contiguous()
# Store.
_A : Any = out_val
# Transpose the bias.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "bias":
_A : List[str] = fix_query_key_value_ordering(snake_case_,snake_case_,3,snake_case_,snake_case_ )
# Store. No change of shape.
_A : Tuple = out_val
# Transpose the weights.
elif weight_or_bias == "weight":
_A : List[str] = megatron_to_transformers[op_name]
_A : Any = val.transpose(0,1 )
# Copy the bias.
elif weight_or_bias == "bias":
_A : Dict = megatron_to_transformers[op_name]
_A : List[Any] = val
# DEBUG.
assert config.n_layer == layer_idx + 1
# The final layernorm.
_A : Optional[Any] = transformer["""final_layernorm.weight"""]
_A : Dict = transformer["""final_layernorm.bias"""]
# For LM head, transformers' wants the matrix to weight embeddings.
_A : List[str] = word_embeddings
# It should be done!
return output_state_dict
def lowerCAmelCase_ ( ):
# Create the argument parser.
_A : Any = argparse.ArgumentParser()
parser.add_argument("""--print-checkpoint-structure""",action="""store_true""" )
parser.add_argument(
"""path_to_checkpoint""",type=snake_case_,help="""Path to the checkpoint file (.zip archive or direct .pt file)""",)
parser.add_argument(
"""--config_file""",default="""""",type=snake_case_,help="""An optional config json file describing the pre-trained model.""",)
_A : Optional[int] = parser.parse_args()
# Extract the basename.
_A : Any = os.path.dirname(args.path_to_checkpoint )
# Load the model.
# the .zip is very optional, let's keep it for backward compatibility
print(f'''Extracting PyTorch state dictionary from {args.path_to_checkpoint}''' )
if args.path_to_checkpoint.endswith(""".zip""" ):
with zipfile.ZipFile(args.path_to_checkpoint,"""r""" ) as checkpoint:
with checkpoint.open("""release/mp_rank_00/model_optim_rng.pt""" ) as pytorch_dict:
_A : Tuple = torch.load(snake_case_,map_location="""cpu""" )
else:
_A : Tuple = torch.load(args.path_to_checkpoint,map_location="""cpu""" )
_A : Optional[Any] = input_state_dict.get("""args""",snake_case_ )
# Read the config, or default to the model released by NVIDIA.
if args.config_file == "":
if ds_args is not None:
if ds_args.bias_gelu_fusion:
_A : Union[str, Any] = """gelu_fast"""
elif ds_args.openai_gelu:
_A : int = """gelu_new"""
else:
_A : Optional[Any] = """gelu"""
else:
# in the very early days this used to be "gelu_new"
_A : Any = """gelu_new"""
# Spell out all parameters in case the defaults change.
_A : Any = GPTaConfig(
vocab_size=50257,n_positions=1024,n_embd=1024,n_layer=24,n_head=16,n_inner=4096,activation_function=snake_case_,resid_pdrop=0.1,embd_pdrop=0.1,attn_pdrop=0.1,layer_norm_epsilon=1e-5,initializer_range=0.02,summary_type="""cls_index""",summary_use_proj=snake_case_,summary_activation=snake_case_,summary_proj_to_labels=snake_case_,summary_first_dropout=0.1,scale_attn_weights=snake_case_,use_cache=snake_case_,bos_token_id=50256,eos_token_id=50256,)
else:
_A : Union[str, Any] = GPTaConfig.from_json_file(args.config_file )
_A : List[str] = ["""GPT2LMHeadModel"""]
# Convert.
print("""Converting""" )
_A : Optional[Any] = convert_megatron_checkpoint(snake_case_,snake_case_,snake_case_ )
# Print the structure of converted state dict.
if args.print_checkpoint_structure:
recursive_print(snake_case_,snake_case_ )
# Add tokenizer class info to config
# see https://github.com/huggingface/transformers/issues/13906)
if ds_args is not None:
_A : int = ds_args.tokenizer_type
if tokenizer_type == "GPT2BPETokenizer":
_A : Any = """gpt2"""
elif tokenizer_type == "PretrainedFromHF":
_A : List[Any] = ds_args.tokenizer_name_or_path
else:
raise ValueError(f'''Unrecognized tokenizer_type {tokenizer_type}''' )
else:
_A : Optional[Any] = """gpt2"""
_A : List[str] = AutoTokenizer.from_pretrained(snake_case_ )
_A : Tuple = type(snake_case_ ).__name__
_A : Union[str, Any] = tokenizer_class
# Store the config to file.
print("""Saving config""" )
config.save_pretrained(snake_case_ )
# Save tokenizer based on args
print(f'''Adding {tokenizer_class} tokenizer files''' )
tokenizer.save_pretrained(snake_case_ )
# Store the state_dict to file.
_A : Union[str, Any] = os.path.join(snake_case_,"""pytorch_model.bin""" )
print(f'''Saving checkpoint to "{output_checkpoint_file}"''' )
torch.save(snake_case_,snake_case_ )
####################################################################################################
if __name__ == "__main__":
main()
####################################################################################################
| 343 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
_snake_case = {
"configuration_owlvit": [
"OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"OwlViTConfig",
"OwlViTOnnxConfig",
"OwlViTTextConfig",
"OwlViTVisionConfig",
],
"processing_owlvit": ["OwlViTProcessor"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ["OwlViTFeatureExtractor"]
_snake_case = ["OwlViTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"OwlViTModel",
"OwlViTPreTrainedModel",
"OwlViTTextModel",
"OwlViTVisionModel",
"OwlViTForObjectDetection",
]
if TYPE_CHECKING:
from .configuration_owlvit import (
OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OwlViTConfig,
OwlViTOnnxConfig,
OwlViTTextConfig,
OwlViTVisionConfig,
)
from .processing_owlvit import OwlViTProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_owlvit import OwlViTFeatureExtractor
from .image_processing_owlvit import OwlViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_owlvit import (
OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
OwlViTForObjectDetection,
OwlViTModel,
OwlViTPreTrainedModel,
OwlViTTextModel,
OwlViTVisionModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 362 |
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert import BertTokenizer
_snake_case = logging.get_logger(__name__)
_snake_case = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
_snake_case = {
"vocab_file": {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json"
),
},
}
_snake_case = {
"vocab_file": {
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json"
),
},
}
_snake_case = {
"vocab_file": {
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json"
),
},
}
_snake_case = {
"facebook/dpr-ctx_encoder-single-nq-base": 512,
"facebook/dpr-ctx_encoder-multiset-base": 512,
}
_snake_case = {
"facebook/dpr-question_encoder-single-nq-base": 512,
"facebook/dpr-question_encoder-multiset-base": 512,
}
_snake_case = {
"facebook/dpr-reader-single-nq-base": 512,
"facebook/dpr-reader-multiset-base": 512,
}
_snake_case = {
"facebook/dpr-ctx_encoder-single-nq-base": {"do_lower_case": True},
"facebook/dpr-ctx_encoder-multiset-base": {"do_lower_case": True},
}
_snake_case = {
"facebook/dpr-question_encoder-single-nq-base": {"do_lower_case": True},
"facebook/dpr-question_encoder-multiset-base": {"do_lower_case": True},
}
_snake_case = {
"facebook/dpr-reader-single-nq-base": {"do_lower_case": True},
"facebook/dpr-reader-multiset-base": {"do_lower_case": True},
}
class lowercase ( UpperCamelCase__ ):
_a = VOCAB_FILES_NAMES
_a = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
_a = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
class lowercase ( UpperCamelCase__ ):
_a = VOCAB_FILES_NAMES
_a = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
_a = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
_snake_case = collections.namedtuple(
"DPRSpanPrediction", ["span_score", "relevance_score", "doc_id", "start_index", "end_index", "text"]
)
_snake_case = collections.namedtuple("DPRReaderOutput", ["start_logits", "end_logits", "relevance_logits"])
_snake_case = r"\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n ```\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n ```\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `'tf'`: Return TensorFlow `tf.constant` objects.\n - `'pt'`: Return PyTorch `torch.Tensor` objects.\n - `'np'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer's default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Returns:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n "
@add_start_docstrings(UpperCamelCase__ )
class lowercase :
def __call__( self , _a , _a = None , _a = None , _a = False , _a = False , _a = None , _a = None , _a = None , **_a , ) -> BatchEncoding:
if titles is None and texts is None:
return super().__call__(
_a , padding=_a , truncation=_a , max_length=_a , return_tensors=_a , return_attention_mask=_a , **_a , )
elif titles is None or texts is None:
_A : Optional[Any] = titles if texts is None else texts
return super().__call__(
_a , _a , padding=_a , truncation=_a , max_length=_a , return_tensors=_a , return_attention_mask=_a , **_a , )
_A : Dict = titles if not isinstance(_a , _a ) else [titles]
_A : Tuple = texts if not isinstance(_a , _a ) else [texts]
_A : Any = len(_a )
_A : Optional[Any] = questions if not isinstance(_a , _a ) else [questions] * n_passages
if len(_a ) != len(_a ):
raise ValueError(
F'''There should be as many titles than texts but got {len(_a )} titles and {len(_a )} texts.''' )
_A : str = super().__call__(_a , _a , padding=_a , truncation=_a )["""input_ids"""]
_A : Optional[int] = super().__call__(_a , add_special_tokens=_a , padding=_a , truncation=_a )["""input_ids"""]
_A : Optional[int] = {
"""input_ids""": [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(_a , _a )
]
}
if return_attention_mask is not False:
_A : Any = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
_A : str = attention_mask
return self.pad(_a , padding=_a , max_length=_a , return_tensors=_a )
def a__ ( self , _a , _a , _a = 16 , _a = 64 , _a = 4 , ) -> List[DPRSpanPrediction]:
_A : Dict = reader_input["""input_ids"""]
_A , _A , _A : Tuple = reader_output[:3]
_A : List[str] = len(_a )
_A : Tuple = sorted(range(_a ) , reverse=_a , key=relevance_logits.__getitem__ )
_A : List[DPRReaderOutput] = []
for doc_id in sorted_docs:
_A : Tuple = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
_A : int = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
_A : Tuple = sequence_ids.index(self.pad_token_id )
else:
_A : Tuple = len(_a )
_A : Union[str, Any] = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=_a , top_spans=_a , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=_a , start_index=_a , end_index=_a , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(_a ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def a__ ( self , _a , _a , _a , _a , ) -> List[DPRSpanPrediction]:
_A : Tuple = []
for start_index, start_score in enumerate(_a ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
_A : Tuple = sorted(_a , key=lambda _a : x[1] , reverse=_a )
_A : Union[str, Any] = []
for (start_index, end_index), score in scores:
if start_index > end_index:
raise ValueError(F'''Wrong span indices: [{start_index}:{end_index}]''' )
_A : Dict = end_index - start_index + 1
if length > max_answer_length:
raise ValueError(F'''Span is too long: {length} > {max_answer_length}''' )
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(_a ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(UpperCamelCase__ )
class lowercase ( UpperCamelCase__,UpperCamelCase__ ):
_a = VOCAB_FILES_NAMES
_a = READER_PRETRAINED_VOCAB_FILES_MAP
_a = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a = READER_PRETRAINED_INIT_CONFIGURATION
_a = ["input_ids", "attention_mask"]
| 343 | 0 |
_snake_case = 8.3_1_4_4_6_2 # Unit - J mol-1 K-1
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ):
if moles < 0 or kelvin < 0 or volume < 0:
raise ValueError("""Invalid inputs. Enter positive value.""" )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / volume
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ):
if moles < 0 or kelvin < 0 or pressure < 0:
raise ValueError("""Invalid inputs. Enter positive value.""" )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / pressure
if __name__ == "__main__":
from doctest import testmod
testmod()
| 363 |
import unittest
import numpy as np
from diffusers import OnnxStableDiffusionInpaintPipelineLegacy
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
load_numpy,
nightly,
require_onnxruntime,
require_torch_gpu,
)
if is_onnx_available():
import onnxruntime as ort
@nightly
@require_onnxruntime
@require_torch_gpu
class lowercase ( unittest.TestCase ):
@property
def a__ ( self ) -> Dict:
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def a__ ( self ) -> List[Any]:
_A : int = ort.SessionOptions()
_A : Any = False
return options
def a__ ( self ) -> Union[str, Any]:
_A : Tuple = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo.png""" )
_A : Dict = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo_mask.png""" )
_A : List[str] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/red_cat_sitting_on_a_park_bench_onnx.npy""" )
# using the PNDM scheduler by default
_A : str = OnnxStableDiffusionInpaintPipelineLegacy.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , revision="""onnx""" , safety_checker=_a , feature_extractor=_a , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=_a )
_A : Optional[Any] = """A red cat sitting on a park bench"""
_A : Optional[Any] = np.random.RandomState(0 )
_A : Dict = pipe(
prompt=_a , image=_a , mask_image=_a , strength=0.75 , guidance_scale=7.5 , num_inference_steps=15 , generator=_a , output_type="""np""" , )
_A : Optional[int] = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 1e-2
| 343 | 0 |
def lowerCAmelCase_ ( ):
return [
a * b * (1000 - a - b)
for a in range(1,999 )
for b in range(snake_case_,999 )
if (a * a + b * b == (1000 - a - b) ** 2)
][0]
if __name__ == "__main__":
print(f"""{solution() = }""")
| 364 |
from __future__ import annotations
def lowerCAmelCase_ ( snake_case_ ):
create_state_space_tree(snake_case_,[],0,[0 for i in range(len(snake_case_ ) )] )
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_,snake_case_,):
if index == len(snake_case_ ):
print(snake_case_ )
return
for i in range(len(snake_case_ ) ):
if not index_used[i]:
current_sequence.append(sequence[i] )
_A : Optional[Any] = True
create_state_space_tree(snake_case_,snake_case_,index + 1,snake_case_ )
current_sequence.pop()
_A : str = False
_snake_case = [3, 1, 2, 4]
generate_all_permutations(sequence)
_snake_case = ["A", "B", "C"]
generate_all_permutations(sequence_a)
| 343 | 0 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LDMTextToImagePipeline, UNetaDConditionModel
from diffusers.utils.testing_utils import (
enable_full_determinism,
load_numpy,
nightly,
require_torch_gpu,
slow,
torch_device,
)
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class lowercase ( UpperCamelCase__,unittest.TestCase ):
_a = LDMTextToImagePipeline
_a = TEXT_TO_IMAGE_PARAMS - {
"negative_prompt",
"negative_prompt_embeds",
"cross_attention_kwargs",
"prompt_embeds",
}
_a = PipelineTesterMixin.required_optional_params - {
"num_images_per_prompt",
"callback",
"callback_steps",
}
_a = TEXT_TO_IMAGE_BATCH_PARAMS
_a = False
def a__ ( self ) -> Optional[int]:
torch.manual_seed(0 )
_A : List[Any] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
_A : List[Any] = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=_a , set_alpha_to_one=_a , )
torch.manual_seed(0 )
_A : Any = AutoencoderKL(
block_out_channels=(32, 64) , in_channels=3 , out_channels=3 , down_block_types=("""DownEncoderBlock2D""", """DownEncoderBlock2D""") , up_block_types=("""UpDecoderBlock2D""", """UpDecoderBlock2D""") , latent_channels=4 , )
torch.manual_seed(0 )
_A : Union[str, Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
_A : List[Any] = CLIPTextModel(_a )
_A : Dict = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
_A : Tuple = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vqvae""": vae,
"""bert""": text_encoder,
"""tokenizer""": tokenizer,
}
return components
def a__ ( self , _a , _a=0 ) -> Union[str, Any]:
if str(_a ).startswith("""mps""" ):
_A : Optional[int] = torch.manual_seed(_a )
else:
_A : Any = torch.Generator(device=_a ).manual_seed(_a )
_A : Tuple = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def a__ ( self ) -> Optional[int]:
_A : Optional[int] = """cpu""" # ensure determinism for the device-dependent torch.Generator
_A : Optional[int] = self.get_dummy_components()
_A : Tuple = LDMTextToImagePipeline(**_a )
pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
_A : Optional[Any] = self.get_dummy_inputs(_a )
_A : Dict = pipe(**_a ).images
_A : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 16, 16, 3)
_A : Optional[Any] = np.array([0.6101, 0.6156, 0.5622, 0.4895, 0.6661, 0.3804, 0.5748, 0.6136, 0.5014] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
@slow
@require_torch_gpu
class lowercase ( unittest.TestCase ):
def a__ ( self ) -> int:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a__ ( self , _a , _a=torch.floataa , _a=0 ) -> Optional[Any]:
_A : Any = torch.manual_seed(_a )
_A : List[Any] = np.random.RandomState(_a ).standard_normal((1, 4, 32, 32) )
_A : Optional[int] = torch.from_numpy(_a ).to(device=_a , dtype=_a )
_A : List[Any] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def a__ ( self ) -> Tuple:
_A : Optional[Any] = LDMTextToImagePipeline.from_pretrained("""CompVis/ldm-text2im-large-256""" ).to(_a )
pipe.set_progress_bar_config(disable=_a )
_A : Optional[int] = self.get_inputs(_a )
_A : str = pipe(**_a ).images
_A : int = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 256, 256, 3)
_A : Tuple = np.array([0.51825, 0.52850, 0.52543, 0.54258, 0.52304, 0.52569, 0.54363, 0.55276, 0.56878] )
_A : List[Any] = np.abs(expected_slice - image_slice ).max()
assert max_diff < 1e-3
@nightly
@require_torch_gpu
class lowercase ( unittest.TestCase ):
def a__ ( self ) -> int:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a__ ( self , _a , _a=torch.floataa , _a=0 ) -> Any:
_A : List[Any] = torch.manual_seed(_a )
_A : Dict = np.random.RandomState(_a ).standard_normal((1, 4, 32, 32) )
_A : Optional[Any] = torch.from_numpy(_a ).to(device=_a , dtype=_a )
_A : Union[str, Any] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 50,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def a__ ( self ) -> List[Any]:
_A : str = LDMTextToImagePipeline.from_pretrained("""CompVis/ldm-text2im-large-256""" ).to(_a )
pipe.set_progress_bar_config(disable=_a )
_A : Dict = self.get_inputs(_a )
_A : List[Any] = pipe(**_a ).images[0]
_A : Tuple = load_numpy(
"""https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/ldm_text2img/ldm_large_256_ddim.npy""" )
_A : List[Any] = np.abs(expected_image - image ).max()
assert max_diff < 1e-3
| 365 |
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def lowerCAmelCase_ ( snake_case_ ):
_A : Tuple = filter(lambda snake_case_ : p.requires_grad,model.parameters() )
_A : str = sum([np.prod(p.size() ) for p in model_parameters] )
return params
_snake_case = logging.getLogger(__name__)
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
if metric == "rouge2":
_A : Optional[int] = """{val_avg_rouge2:.4f}-{step_count}"""
elif metric == "bleu":
_A : Dict = """{val_avg_bleu:.4f}-{step_count}"""
elif metric == "em":
_A : List[str] = """{val_avg_em:.4f}-{step_count}"""
else:
raise NotImplementedError(
f'''seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this'''
""" function.""" )
_A : Optional[int] = ModelCheckpoint(
dirpath=snake_case_,filename=snake_case_,monitor=f'''val_{metric}''',mode="""max""",save_top_k=3,every_n_epochs=1,)
return checkpoint_callback
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
return EarlyStopping(
monitor=f'''val_{metric}''',mode="""min""" if """loss""" in metric else """max""",patience=snake_case_,verbose=snake_case_,)
class lowercase ( pl.Callback ):
def a__ ( self , _a , _a ) -> Optional[Any]:
_A : List[Any] = {F'''lr_group_{i}''': param["""lr"""] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(_a )
@rank_zero_only
def a__ ( self , _a , _a , _a , _a=True ) -> None:
logger.info(F'''***** {type_path} results at step {trainer.global_step:05d} *****''' )
_A : int = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ["""log""", """progress_bar""", """preds"""]} )
# Log results
_A : Dict = Path(pl_module.hparams.output_dir )
if type_path == "test":
_A : List[Any] = od / """test_results.txt"""
_A : List[Any] = od / """test_generations.txt"""
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
_A : Optional[int] = od / F'''{type_path}_results/{trainer.global_step:05d}.txt'''
_A : int = od / F'''{type_path}_generations/{trainer.global_step:05d}.txt'''
results_file.parent.mkdir(exist_ok=_a )
generations_file.parent.mkdir(exist_ok=_a )
with open(_a , """a+""" ) as writer:
for key in sorted(_a ):
if key in ["log", "progress_bar", "preds"]:
continue
_A : List[Any] = metrics[key]
if isinstance(_a , torch.Tensor ):
_A : str = val.item()
_A : str = F'''{key}: {val:.6f}\n'''
writer.write(_a )
if not save_generations:
return
if "preds" in metrics:
_A : List[Any] = """\n""".join(metrics["""preds"""] )
generations_file.open("""w+""" ).write(_a )
@rank_zero_only
def a__ ( self , _a , _a ) -> str:
try:
_A : int = pl_module.model.model.num_parameters()
except AttributeError:
_A : str = pl_module.model.num_parameters()
_A : Optional[int] = count_trainable_parameters(_a )
# mp stands for million parameters
trainer.logger.log_metrics({"""n_params""": npars, """mp""": npars / 1e6, """grad_mp""": n_trainable_pars / 1e6} )
@rank_zero_only
def a__ ( self , _a , _a ) -> Optional[int]:
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(_a , _a , """test""" )
@rank_zero_only
def a__ ( self , _a , _a ) -> Tuple:
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 343 | 0 |
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
AutoConfig,
AutoFeatureExtractor,
WavaVecaConfig,
WavaVecaFeatureExtractor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
_snake_case = get_tests_dir("fixtures")
_snake_case = get_tests_dir("fixtures/dummy_feature_extractor_config.json")
_snake_case = get_tests_dir("fixtures/dummy-config.json")
class lowercase ( unittest.TestCase ):
def a__ ( self ) -> List[str]:
_A : Optional[int] = 0
def a__ ( self ) -> List[str]:
_A : int = AutoFeatureExtractor.from_pretrained("""facebook/wav2vec2-base-960h""" )
self.assertIsInstance(_a , _a )
def a__ ( self ) -> Tuple:
_A : Dict = AutoFeatureExtractor.from_pretrained(_a )
self.assertIsInstance(_a , _a )
def a__ ( self ) -> Dict:
with tempfile.TemporaryDirectory() as tmpdirname:
_A : Tuple = WavaVecaConfig()
# remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally
_A : str = AutoFeatureExtractor.from_pretrained(_a ).to_dict()
config_dict.pop("""feature_extractor_type""" )
_A : Optional[int] = WavaVecaFeatureExtractor(**_a )
# save in new folder
model_config.save_pretrained(_a )
config.save_pretrained(_a )
_A : Optional[Any] = AutoFeatureExtractor.from_pretrained(_a )
# make sure private variable is not incorrectly saved
_A : List[str] = json.loads(config.to_json_string() )
self.assertTrue("""_processor_class""" not in dict_as_saved )
self.assertIsInstance(_a , _a )
def a__ ( self ) -> Optional[Any]:
_A : int = AutoFeatureExtractor.from_pretrained(_a )
self.assertIsInstance(_a , _a )
def a__ ( self ) -> int:
with self.assertRaisesRegex(
_a , """bert-base is not a local folder and is not a valid model identifier""" ):
_A : Optional[Any] = AutoFeatureExtractor.from_pretrained("""bert-base""" )
def a__ ( self ) -> str:
with self.assertRaisesRegex(
_a , R"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
_A : Tuple = AutoFeatureExtractor.from_pretrained(_a , revision="""aaaaaa""" )
def a__ ( self ) -> Any:
with self.assertRaisesRegex(
_a , """hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.""" , ):
_A : int = AutoFeatureExtractor.from_pretrained("""hf-internal-testing/config-no-model""" )
def a__ ( self ) -> Any:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(_a ):
_A : Tuple = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(_a ):
_A : Dict = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=_a )
_A : Dict = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=_a )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
# Test feature extractor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(_a )
_A : List[str] = AutoFeatureExtractor.from_pretrained(_a , trust_remote_code=_a )
self.assertEqual(reloaded_feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
def a__ ( self ) -> List[Any]:
try:
AutoConfig.register("""custom""" , _a )
AutoFeatureExtractor.register(_a , _a )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_a ):
AutoFeatureExtractor.register(_a , _a )
# Now that the config is registered, it can be used as any other config with the auto-API
_A : List[str] = CustomFeatureExtractor.from_pretrained(_a )
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(_a )
_A : List[Any] = AutoFeatureExtractor.from_pretrained(_a )
self.assertIsInstance(_a , _a )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
def a__ ( self ) -> List[Any]:
class lowercase ( UpperCamelCase__ ):
_a = True
try:
AutoConfig.register("""custom""" , _a )
AutoFeatureExtractor.register(_a , _a )
# If remote code is not set, the default is to use local
_A : List[Any] = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
self.assertTrue(feature_extractor.is_local )
# If remote code is disabled, we load the local one.
_A : List[str] = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=_a )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
self.assertTrue(feature_extractor.is_local )
# If remote is enabled, we load from the Hub
_A : List[Any] = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=_a )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
self.assertTrue(not hasattr(_a , """is_local""" ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
| 366 |
from __future__ import annotations
from collections.abc import Callable
_snake_case = list[list[float | int]]
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_A : int = len(snake_case_ )
_A : Matrix = [[0 for _ in range(size + 1 )] for _ in range(snake_case_ )]
_A : int
_A : int
_A : int
_A : int
_A : int
_A : float
for row in range(snake_case_ ):
for col in range(snake_case_ ):
_A : Dict = matrix[row][col]
_A : List[Any] = vector[row][0]
_A : List[Any] = 0
_A : Optional[Any] = 0
while row < size and col < size:
# pivoting
_A : Any = max((abs(augmented[rowa][col] ), rowa) for rowa in range(snake_case_,snake_case_ ) )[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
_A , _A : Optional[Any] = augmented[pivot_row], augmented[row]
for rowa in range(row + 1,snake_case_ ):
_A : str = augmented[rowa][col] / augmented[row][col]
_A : List[Any] = 0
for cola in range(col + 1,size + 1 ):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1,snake_case_ ):
for row in range(snake_case_ ):
_A : int = augmented[row][col] / augmented[col][col]
for cola in range(snake_case_,size + 1 ):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row],10 )] for row in range(snake_case_ )
]
def lowerCAmelCase_ ( snake_case_ ):
_A : int = len(snake_case_ )
_A : Matrix = [[0 for _ in range(snake_case_ )] for _ in range(snake_case_ )]
_A : Matrix = [[0] for _ in range(snake_case_ )]
_A : Matrix
_A : int
_A : int
_A : int
for x_val, y_val in enumerate(snake_case_ ):
for col in range(snake_case_ ):
_A : str = (x_val + 1) ** (size - col - 1)
_A : List[str] = y_val
_A : Any = solve(snake_case_,snake_case_ )
def interpolated_func(snake_case_ ) -> int:
return sum(
round(coeffs[x_val][0] ) * (var ** (size - x_val - 1))
for x_val in range(snake_case_ ) )
return interpolated_func
def lowerCAmelCase_ ( snake_case_ ):
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**10
)
def lowerCAmelCase_ ( snake_case_ = question_function,snake_case_ = 10 ):
_A : list[int] = [func(snake_case_ ) for x_val in range(1,order + 1 )]
_A : list[Callable[[int], int]] = [
interpolate(data_points[:max_coeff] ) for max_coeff in range(1,order + 1 )
]
_A : int = 0
_A : Callable[[int], int]
_A : int
for poly in polynomials:
_A : Optional[int] = 1
while func(snake_case_ ) == poly(snake_case_ ):
x_val += 1
ret += poly(snake_case_ )
return ret
if __name__ == "__main__":
print(f"""{solution() = }""")
| 343 | 0 |
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_,snake_case_ ):
global f # a global dp table for knapsack
if f[i][j] < 0:
if j < wt[i - 1]:
_A : List[str] = mf_knapsack(i - 1,snake_case_,snake_case_,snake_case_ )
else:
_A : List[str] = max(
mf_knapsack(i - 1,snake_case_,snake_case_,snake_case_ ),mf_knapsack(i - 1,snake_case_,snake_case_,j - wt[i - 1] ) + val[i - 1],)
_A : Optional[Any] = val
return f[i][j]
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_,snake_case_ ):
_A : Any = [[0] * (w + 1) for _ in range(n + 1 )]
for i in range(1,n + 1 ):
for w_ in range(1,w + 1 ):
if wt[i - 1] <= w_:
_A : Dict = max(val[i - 1] + dp[i - 1][w_ - wt[i - 1]],dp[i - 1][w_] )
else:
_A : List[Any] = dp[i - 1][w_]
return dp[n][w_], dp
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ):
if not (isinstance(snake_case_,(list, tuple) ) and isinstance(snake_case_,(list, tuple) )):
raise ValueError(
"""Both the weights and values vectors must be either lists or tuples""" )
_A : int = len(snake_case_ )
if num_items != len(snake_case_ ):
_A : int = (
"""The number of weights must be the same as the number of values.\n"""
f'''But got {num_items} weights and {len(snake_case_ )} values'''
)
raise ValueError(snake_case_ )
for i in range(snake_case_ ):
if not isinstance(wt[i],snake_case_ ):
_A : List[str] = (
"""All weights must be integers but got weight of """
f'''type {type(wt[i] )} at index {i}'''
)
raise TypeError(snake_case_ )
_A : List[Any] = knapsack(snake_case_,snake_case_,snake_case_,snake_case_ )
_A : set = set()
_construct_solution(snake_case_,snake_case_,snake_case_,snake_case_,snake_case_ )
return optimal_val, example_optional_set
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_,snake_case_,snake_case_ ):
# for the current item i at a maximum weight j to be part of an optimal subset,
# the optimal value at (i, j) must be greater than the optimal value at (i-1, j).
# where i - 1 means considering only the previous items at the given maximum weight
if i > 0 and j > 0:
if dp[i - 1][j] == dp[i][j]:
_construct_solution(snake_case_,snake_case_,i - 1,snake_case_,snake_case_ )
else:
optimal_set.add(snake_case_ )
_construct_solution(snake_case_,snake_case_,i - 1,j - wt[i - 1],snake_case_ )
if __name__ == "__main__":
_snake_case = [3, 2, 4, 4]
_snake_case = [4, 3, 2, 3]
_snake_case = 4
_snake_case = 6
_snake_case = [[0] * (w + 1)] + [[0] + [-1] * (w + 1) for _ in range(n + 1)]
_snake_case , _snake_case = knapsack(w, wt, val, n)
print(optimal_solution)
print(mf_knapsack(n, wt, val, w)) # switched the n and w
# testing the dynamic programming problem with example
# the optimal subset for the above example are items 3 and 4
_snake_case , _snake_case = knapsack_with_example_solution(w, wt, val)
assert optimal_solution == 8
assert optimal_subset == {3, 4}
print("optimal_value = ", optimal_solution)
print("An optimal subset corresponding to the optimal value", optimal_subset)
| 367 |
from __future__ import annotations
from collections.abc import Generator
import requests
from bsa import BeautifulSoup
_snake_case = "https://www.indeed.co.in/jobs?q=mobile+app+development&l="
def lowerCAmelCase_ ( snake_case_ = "mumbai" ):
_A : Optional[Any] = BeautifulSoup(requests.get(url + location ).content,"""html.parser""" )
# This attribute finds out all the specifics listed in a job
for job in soup.find_all("""div""",attrs={"""data-tn-component""": """organicJob"""} ):
_A : Tuple = job.find("""a""",attrs={"""data-tn-element""": """jobTitle"""} ).text.strip()
_A : Optional[int] = job.find("""span""",{"""class""": """company"""} ).text.strip()
yield job_title, company_name
if __name__ == "__main__":
for i, job in enumerate(fetch_jobs("Bangalore"), 1):
print(f"""Job {i:>2} is {job[0]} at {job[1]}""")
| 343 | 0 |
import random
import unittest
import numpy as np
import transformers
from transformers import is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax
if is_flax_available():
import os
import jax.numpy as jnp
from jax import jit
from transformers import AutoTokenizer, FlaxAutoModelForCausalLM
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
_snake_case = "0.12" # assumed parallelism: 8
if is_torch_available():
import torch
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_=None ):
if rng is None:
_A : List[str] = random.Random()
_A : Optional[Any] = 1
for dim in shape:
total_dims *= dim
_A : Optional[Any] = []
for _ in range(snake_case_ ):
values.append(rng.randint(0,vocab_size - 1 ) )
_A : int = np.array(snake_case_,dtype=jnp.intaa ).reshape(snake_case_ )
return output
def lowerCAmelCase_ ( snake_case_,snake_case_=None ):
_A : str = ids_tensor(snake_case_,vocab_size=2,rng=snake_case_ )
# make sure that at least one token is attended to for each batch
_A : Optional[int] = 1
return attn_mask
@require_flax
class lowercase :
_a = None
_a = ()
def a__ ( self ) -> Optional[int]:
_A : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
# cut to half length & take max batch_size 3
_A : int = 2
_A : Dict = inputs["""input_ids"""].shape[-1] // 2
_A : Optional[int] = inputs["""input_ids"""][:max_batch_size, :sequence_length]
_A : str = jnp.ones_like(_a )
_A : Any = attention_mask[:max_batch_size, :sequence_length]
# generate max 5 tokens
_A : Union[str, Any] = input_ids.shape[-1] + 5
if config.eos_token_id is not None and config.pad_token_id is None:
# hack to allow generate for models such as GPT2 as is done in `generate()`
_A : Optional[Any] = config.eos_token_id
return config, input_ids, attention_mask, max_length
@is_pt_flax_cross_test
def a__ ( self ) -> Optional[Any]:
_A : Optional[int] = self._get_input_ids_and_config()
_A : int = False
_A : Any = max_length
_A : Optional[Any] = 0
for model_class in self.all_generative_model_classes:
_A : Optional[int] = model_class(_a )
_A : Optional[Any] = model_class.__name__[4:] # Skip the "Flax" at the beginning
_A : Dict = getattr(_a , _a )
_A : List[str] = pt_model_class(_a ).eval()
_A : Tuple = load_flax_weights_in_pytorch_model(_a , flax_model.params )
_A : Optional[int] = flax_model.generate(_a ).sequences
_A : str = pt_model.generate(torch.tensor(_a , dtype=torch.long ) )
if flax_generation_outputs.shape[-1] > pt_generation_outputs.shape[-1]:
_A : Dict = flax_generation_outputs[:, : pt_generation_outputs.shape[-1]]
self.assertListEqual(pt_generation_outputs.numpy().tolist() , flax_generation_outputs.tolist() )
def a__ ( self ) -> Union[str, Any]:
_A : Tuple = self._get_input_ids_and_config()
_A : Dict = False
_A : Any = max_length
for model_class in self.all_generative_model_classes:
_A : Optional[Any] = model_class(_a )
_A : Optional[Any] = model.generate(_a ).sequences
self.assertEqual(generation_outputs.shape[-1] , _a )
_A : List[Any] = jit(model.generate )
_A : Union[str, Any] = jit_generate(_a ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def a__ ( self ) -> Union[str, Any]:
_A : Tuple = self._get_input_ids_and_config()
_A : List[str] = True
_A : List[Any] = max_length
for model_class in self.all_generative_model_classes:
_A : Union[str, Any] = model_class(_a )
_A : Dict = model.generate(_a ).sequences
self.assertEqual(generation_outputs.shape[-1] , _a )
_A : str = jit(model.generate )
_A : int = jit_generate(_a ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def a__ ( self ) -> Optional[int]:
_A : Optional[int] = self._get_input_ids_and_config()
_A : List[Any] = False
_A : str = max_length
_A : List[Any] = 2
for model_class in self.all_generative_model_classes:
_A : Dict = model_class(_a )
_A : Dict = model.generate(_a ).sequences
self.assertEqual(generation_outputs.shape[-1] , _a )
_A : Dict = jit(model.generate )
_A : List[Any] = jit_generate(_a ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def a__ ( self ) -> List[Any]:
_A : List[str] = self._get_input_ids_and_config()
_A : int = False
_A : Optional[Any] = max_length
_A : Optional[Any] = 2
_A : Optional[Any] = 2
for model_class in self.all_generative_model_classes:
_A : Dict = model_class(_a )
_A : Any = model.generate(_a ).sequences
self.assertEqual(generation_outputs.shape[0] , input_ids.shape[0] * config.num_return_sequences )
def a__ ( self ) -> List[str]:
_A : Optional[Any] = self._get_input_ids_and_config()
_A : List[Any] = True
_A : List[Any] = max_length
_A : List[str] = 0.8
_A : Optional[Any] = 10
_A : List[str] = 0.3
_A : Optional[Any] = 1
_A : Any = 8
_A : Tuple = 9
for model_class in self.all_generative_model_classes:
_A : Tuple = model_class(_a )
_A : int = model.generate(_a ).sequences
self.assertEqual(generation_outputs.shape[-1] , _a )
_A : Optional[Any] = jit(model.generate )
_A : str = jit_generate(_a ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def a__ ( self ) -> List[str]:
_A : Optional[int] = self._get_input_ids_and_config()
_A : Union[str, Any] = max_length
_A : List[Any] = 1
_A : Optional[Any] = 8
_A : Any = 9
for model_class in self.all_generative_model_classes:
_A : Any = model_class(_a )
_A : int = model.generate(_a ).sequences
self.assertEqual(generation_outputs.shape[-1] , _a )
_A : int = jit(model.generate )
_A : List[Any] = jit_generate(_a ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def a__ ( self ) -> List[str]:
_A : str = self._get_input_ids_and_config()
_A : Any = max_length
_A : Dict = 2
_A : Tuple = 1
_A : List[Any] = 8
_A : Tuple = 9
for model_class in self.all_generative_model_classes:
_A : int = model_class(_a )
_A : Tuple = model.generate(_a ).sequences
self.assertEqual(generation_outputs.shape[-1] , _a )
_A : int = jit(model.generate )
_A : int = jit_generate(_a ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def a__ ( self ) -> Any:
_A : int = self._get_input_ids_and_config()
# pad attention mask on the left
_A : List[Any] = attention_mask.at[(0, 0)].set(0 )
_A : Dict = False
_A : int = max_length
for model_class in self.all_generative_model_classes:
_A : Optional[int] = model_class(_a )
_A : List[str] = model.generate(_a , attention_mask=_a ).sequences
self.assertEqual(generation_outputs.shape[-1] , _a )
_A : List[str] = jit(model.generate )
_A : str = jit_generate(_a , attention_mask=_a ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def a__ ( self ) -> Optional[Any]:
_A : Optional[int] = self._get_input_ids_and_config()
# pad attention mask on the left
_A : List[str] = attention_mask.at[(0, 0)].set(0 )
_A : Optional[Any] = True
_A : Union[str, Any] = max_length
for model_class in self.all_generative_model_classes:
_A : Dict = model_class(_a )
_A : Tuple = model.generate(_a , attention_mask=_a ).sequences
self.assertEqual(generation_outputs.shape[-1] , _a )
_A : List[Any] = jit(model.generate )
_A : Any = jit_generate(_a , attention_mask=_a ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def a__ ( self ) -> str:
_A : Optional[Any] = self._get_input_ids_and_config()
# pad attention mask on the left
_A : List[str] = attention_mask.at[(0, 0)].set(0 )
_A : str = 2
_A : Optional[Any] = max_length
for model_class in self.all_generative_model_classes:
_A : Optional[Any] = model_class(_a )
_A : Optional[Any] = model.generate(_a , attention_mask=_a ).sequences
self.assertEqual(generation_outputs.shape[-1] , _a )
_A : Any = jit(model.generate )
_A : Any = jit_generate(_a , attention_mask=_a ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
@require_flax
class lowercase ( unittest.TestCase ):
def a__ ( self ) -> List[Any]:
_A : int = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-bert""" )
_A : List[Any] = FlaxAutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-bert-flax-only""" )
_A : Tuple = """Hello world"""
_A : List[Any] = tokenizer(_a , return_tensors="""np""" ).input_ids
# typos are quickly detected (the correct argument is `do_sample`)
with self.assertRaisesRegex(_a , """do_samples""" ):
model.generate(_a , do_samples=_a )
# arbitrary arguments that will not be used anywhere are also not accepted
with self.assertRaisesRegex(_a , """foo""" ):
_A : Union[str, Any] = {"""foo""": """bar"""}
model.generate(_a , **_a )
| 368 |
from __future__ import annotations
from decimal import Decimal
from numpy import array
def lowerCAmelCase_ ( snake_case_ ):
_A : Tuple = Decimal
# Check if the provided matrix has 2 rows and 2 columns
# since this implementation only works for 2x2 matrices
if len(snake_case_ ) == 2 and len(matrix[0] ) == 2 and len(matrix[1] ) == 2:
# Calculate the determinant of the matrix
_A : List[Any] = float(
d(matrix[0][0] ) * d(matrix[1][1] ) - d(matrix[1][0] ) * d(matrix[0][1] ) )
if determinant == 0:
raise ValueError("""This matrix has no inverse.""" )
# Creates a copy of the matrix with swapped positions of the elements
_A : Tuple = [[0.0, 0.0], [0.0, 0.0]]
_A , _A : List[str] = matrix[1][1], matrix[0][0]
_A , _A : List[str] = -matrix[1][0], -matrix[0][1]
# Calculate the inverse of the matrix
return [
[(float(d(snake_case_ ) ) / determinant) or 0.0 for n in row] for row in swapped_matrix
]
elif (
len(snake_case_ ) == 3
and len(matrix[0] ) == 3
and len(matrix[1] ) == 3
and len(matrix[2] ) == 3
):
# Calculate the determinant of the matrix using Sarrus rule
_A : List[str] = float(
(
(d(matrix[0][0] ) * d(matrix[1][1] ) * d(matrix[2][2] ))
+ (d(matrix[0][1] ) * d(matrix[1][2] ) * d(matrix[2][0] ))
+ (d(matrix[0][2] ) * d(matrix[1][0] ) * d(matrix[2][1] ))
)
- (
(d(matrix[0][2] ) * d(matrix[1][1] ) * d(matrix[2][0] ))
+ (d(matrix[0][1] ) * d(matrix[1][0] ) * d(matrix[2][2] ))
+ (d(matrix[0][0] ) * d(matrix[1][2] ) * d(matrix[2][1] ))
) )
if determinant == 0:
raise ValueError("""This matrix has no inverse.""" )
# Creating cofactor matrix
_A : List[Any] = [
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
]
_A : Union[str, Any] = (d(matrix[1][1] ) * d(matrix[2][2] )) - (
d(matrix[1][2] ) * d(matrix[2][1] )
)
_A : Optional[Any] = -(
(d(matrix[1][0] ) * d(matrix[2][2] )) - (d(matrix[1][2] ) * d(matrix[2][0] ))
)
_A : Any = (d(matrix[1][0] ) * d(matrix[2][1] )) - (
d(matrix[1][1] ) * d(matrix[2][0] )
)
_A : List[Any] = -(
(d(matrix[0][1] ) * d(matrix[2][2] )) - (d(matrix[0][2] ) * d(matrix[2][1] ))
)
_A : int = (d(matrix[0][0] ) * d(matrix[2][2] )) - (
d(matrix[0][2] ) * d(matrix[2][0] )
)
_A : Union[str, Any] = -(
(d(matrix[0][0] ) * d(matrix[2][1] )) - (d(matrix[0][1] ) * d(matrix[2][0] ))
)
_A : Any = (d(matrix[0][1] ) * d(matrix[1][2] )) - (
d(matrix[0][2] ) * d(matrix[1][1] )
)
_A : List[str] = -(
(d(matrix[0][0] ) * d(matrix[1][2] )) - (d(matrix[0][2] ) * d(matrix[1][0] ))
)
_A : Optional[int] = (d(matrix[0][0] ) * d(matrix[1][1] )) - (
d(matrix[0][1] ) * d(matrix[1][0] )
)
# Transpose the cofactor matrix (Adjoint matrix)
_A : List[Any] = array(snake_case_ )
for i in range(3 ):
for j in range(3 ):
_A : List[str] = cofactor_matrix[j][i]
# Inverse of the matrix using the formula (1/determinant) * adjoint matrix
_A : Union[str, Any] = array(snake_case_ )
for i in range(3 ):
for j in range(3 ):
inverse_matrix[i][j] /= d(snake_case_ )
# Calculate the inverse of the matrix
return [[float(d(snake_case_ ) ) or 0.0 for n in row] for row in inverse_matrix]
raise ValueError("""Please provide a matrix of size 2x2 or 3x3.""" )
| 343 | 0 |
"""simple docstring"""
import numpy as np
class lowercase :
def __init__( self ) -> Optional[int]:
_A : List[str] = (0, 0)
_A : Union[str, Any] = None
_A : Optional[Any] = 0
_A : Any = 0
_A : str = 0
def __eq__( self , _a ) -> Tuple:
return self.position == cell.position
def a__ ( self ) -> Any:
print(self.position )
class lowercase :
def __init__( self , _a=(5, 5) ) -> int:
_A : Optional[Any] = np.zeros(_a )
_A : Union[str, Any] = world_size[0]
_A : str = world_size[1]
def a__ ( self ) -> str:
print(self.w )
def a__ ( self , _a ) -> Optional[int]:
_A : List[str] = [
(-1, -1),
(-1, 0),
(-1, 1),
(0, -1),
(0, 1),
(1, -1),
(1, 0),
(1, 1),
]
_A : str = cell.position[0]
_A : str = cell.position[1]
_A : List[Any] = []
for n in neughbour_cord:
_A : str = current_x + n[0]
_A : Dict = current_y + n[1]
if 0 <= x < self.world_x_limit and 0 <= y < self.world_y_limit:
_A : int = Cell()
_A : Union[str, Any] = (x, y)
_A : Optional[int] = cell
neighbours.append(_a )
return neighbours
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ):
_A : Tuple = []
_A : str = []
_open.append(snake_case_ )
while _open:
_A : List[str] = np.argmin([n.f for n in _open] )
_A : Optional[Any] = _open[min_f]
_closed.append(_open.pop(snake_case_ ) )
if current == goal:
break
for n in world.get_neigbours(snake_case_ ):
for c in _closed:
if c == n:
continue
_A : Optional[Any] = current.g + 1
_A : str = n.position
_A : Any = goal.position
_A : Optional[Any] = (ya - ya) ** 2 + (xa - xa) ** 2
_A : Dict = n.h + n.g
for c in _open:
if c == n and c.f < n.f:
continue
_open.append(snake_case_ )
_A : Union[str, Any] = []
while current.parent is not None:
path.append(current.position )
_A : str = current.parent
path.append(current.position )
return path[::-1]
if __name__ == "__main__":
_snake_case = Gridworld()
# Start position and goal
_snake_case = Cell()
_snake_case = (0, 0)
_snake_case = Cell()
_snake_case = (4, 4)
print(f"""path from {start.position} to {goal.position}""")
_snake_case = astar(world, start, goal)
# Just for visual reasons.
for i in s:
_snake_case = 1
print(world.w)
| 369 |
from dataclasses import dataclass
from typing import Dict, Optional, Union
import torch
import torch.nn.functional as F
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .attention_processor import AttentionProcessor, AttnProcessor
from .embeddings import TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
@dataclass
class lowercase ( UpperCamelCase__ ):
_a = 42
class lowercase ( UpperCamelCase__,UpperCamelCase__ ):
@register_to_config
def __init__( self , _a = 32 , _a = 64 , _a = 20 , _a = 768 , _a=77 , _a=4 , _a = 0.0 , _a = "silu" , _a = None , _a = None , _a = "linear" , _a = "prd" , _a = None , _a = None , _a = None , ) -> Any:
super().__init__()
_A : int = num_attention_heads
_A : Union[str, Any] = attention_head_dim
_A : Tuple = num_attention_heads * attention_head_dim
_A : Any = additional_embeddings
_A : Any = time_embed_dim or inner_dim
_A : List[str] = embedding_proj_dim or embedding_dim
_A : Optional[int] = clip_embed_dim or embedding_dim
_A : Union[str, Any] = Timesteps(_a , _a , 0 )
_A : str = TimestepEmbedding(_a , _a , out_dim=_a , act_fn=_a )
_A : Dict = nn.Linear(_a , _a )
if embedding_proj_norm_type is None:
_A : int = None
elif embedding_proj_norm_type == "layer":
_A : Optional[Any] = nn.LayerNorm(_a )
else:
raise ValueError(F'''unsupported embedding_proj_norm_type: {embedding_proj_norm_type}''' )
_A : Optional[Any] = nn.Linear(_a , _a )
if encoder_hid_proj_type is None:
_A : Union[str, Any] = None
elif encoder_hid_proj_type == "linear":
_A : Tuple = nn.Linear(_a , _a )
else:
raise ValueError(F'''unsupported encoder_hid_proj_type: {encoder_hid_proj_type}''' )
_A : List[str] = nn.Parameter(torch.zeros(1 , num_embeddings + additional_embeddings , _a ) )
if added_emb_type == "prd":
_A : str = nn.Parameter(torch.zeros(1 , 1 , _a ) )
elif added_emb_type is None:
_A : Union[str, Any] = None
else:
raise ValueError(
F'''`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `\'prd\'` or `None`.''' )
_A : int = nn.ModuleList(
[
BasicTransformerBlock(
_a , _a , _a , dropout=_a , activation_fn="""gelu""" , attention_bias=_a , )
for d in range(_a )
] )
if norm_in_type == "layer":
_A : Union[str, Any] = nn.LayerNorm(_a )
elif norm_in_type is None:
_A : Tuple = None
else:
raise ValueError(F'''Unsupported norm_in_type: {norm_in_type}.''' )
_A : int = nn.LayerNorm(_a )
_A : str = nn.Linear(_a , _a )
_A : Any = torch.full(
[num_embeddings + additional_embeddings, num_embeddings + additional_embeddings] , -10000.0 )
causal_attention_mask.triu_(1 )
_A : Optional[int] = causal_attention_mask[None, ...]
self.register_buffer("""causal_attention_mask""" , _a , persistent=_a )
_A : Tuple = nn.Parameter(torch.zeros(1 , _a ) )
_A : Dict = nn.Parameter(torch.zeros(1 , _a ) )
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def a__ ( self ) -> Dict[str, AttentionProcessor]:
_A : List[str] = {}
def fn_recursive_add_processors(_a , _a , _a ):
if hasattr(_a , """set_processor""" ):
_A : Tuple = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(F'''{name}.{sub_name}''' , _a , _a )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(_a , _a , _a )
return processors
def a__ ( self , _a ) -> List[str]:
_A : Optional[int] = len(self.attn_processors.keys() )
if isinstance(_a , _a ) and len(_a ) != count:
raise ValueError(
F'''A dict of processors was passed, but the number of processors {len(_a )} does not match the'''
F''' number of attention layers: {count}. Please make sure to pass {count} processor classes.''' )
def fn_recursive_attn_processor(_a , _a , _a ):
if hasattr(_a , """set_processor""" ):
if not isinstance(_a , _a ):
module.set_processor(_a )
else:
module.set_processor(processor.pop(F'''{name}.processor''' ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(F'''{name}.{sub_name}''' , _a , _a )
for name, module in self.named_children():
fn_recursive_attn_processor(_a , _a , _a )
def a__ ( self ) -> Union[str, Any]:
self.set_attn_processor(AttnProcessor() )
def a__ ( self , _a , _a , _a , _a = None , _a = None , _a = True , ) -> Optional[Any]:
_A : Tuple = hidden_states.shape[0]
_A : List[Any] = timestep
if not torch.is_tensor(_a ):
_A : Dict = torch.tensor([timesteps] , dtype=torch.long , device=hidden_states.device )
elif torch.is_tensor(_a ) and len(timesteps.shape ) == 0:
_A : Tuple = timesteps[None].to(hidden_states.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
_A : Optional[int] = timesteps * torch.ones(_a , dtype=timesteps.dtype , device=timesteps.device )
_A : Dict = self.time_proj(_a )
# timesteps does not contain any weights and will always return f32 tensors
# but time_embedding might be fp16, so we need to cast here.
_A : Tuple = timesteps_projected.to(dtype=self.dtype )
_A : List[Any] = self.time_embedding(_a )
if self.embedding_proj_norm is not None:
_A : Dict = self.embedding_proj_norm(_a )
_A : List[Any] = self.embedding_proj(_a )
if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None:
_A : List[Any] = self.encoder_hidden_states_proj(_a )
elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None:
raise ValueError("""`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set""" )
_A : Optional[int] = self.proj_in(_a )
_A : Optional[int] = self.positional_embedding.to(hidden_states.dtype )
_A : Union[str, Any] = []
_A : List[str] = 0
if encoder_hidden_states is not None:
additional_embeds.append(_a )
additional_embeddings_len += encoder_hidden_states.shape[1]
if len(proj_embeddings.shape ) == 2:
_A : List[str] = proj_embeddings[:, None, :]
if len(hidden_states.shape ) == 2:
_A : List[str] = hidden_states[:, None, :]
_A : Dict = additional_embeds + [
proj_embeddings,
time_embeddings[:, None, :],
hidden_states,
]
if self.prd_embedding is not None:
_A : Optional[int] = self.prd_embedding.to(hidden_states.dtype ).expand(_a , -1 , -1 )
additional_embeds.append(_a )
_A : str = torch.cat(
_a , dim=1 , )
# Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens
_A : Dict = additional_embeddings_len + proj_embeddings.shape[1] + 1
if positional_embeddings.shape[1] < hidden_states.shape[1]:
_A : Union[str, Any] = F.pad(
_a , (
0,
0,
additional_embeddings_len,
self.prd_embedding.shape[1] if self.prd_embedding is not None else 0,
) , value=0.0 , )
_A : Optional[Any] = hidden_states + positional_embeddings
if attention_mask is not None:
_A : Optional[Any] = (1 - attention_mask.to(hidden_states.dtype )) * -10000.0
_A : List[Any] = F.pad(_a , (0, self.additional_embeddings) , value=0.0 )
_A : Optional[Any] = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype )
_A : int = attention_mask.repeat_interleave(self.config.num_attention_heads , dim=0 )
if self.norm_in is not None:
_A : str = self.norm_in(_a )
for block in self.transformer_blocks:
_A : List[Any] = block(_a , attention_mask=_a )
_A : Any = self.norm_out(_a )
if self.prd_embedding is not None:
_A : int = hidden_states[:, -1]
else:
_A : Any = hidden_states[:, additional_embeddings_len:]
_A : Union[str, Any] = self.proj_to_clip_embeddings(_a )
if not return_dict:
return (predicted_image_embedding,)
return PriorTransformerOutput(predicted_image_embedding=_a )
def a__ ( self , _a ) -> Tuple:
_A : List[Any] = (prior_latents * self.clip_std) + self.clip_mean
return prior_latents
| 343 | 0 |
import warnings
from ...utils import logging
from .image_processing_flava import FlavaImageProcessor
_snake_case = logging.get_logger(__name__)
class lowercase ( UpperCamelCase__ ):
def __init__( self , *_a , **_a ) -> None:
warnings.warn(
"""The class FlavaFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use FlavaImageProcessor instead.""" , _a , )
super().__init__(*_a , **_a )
| 370 |
import argparse
import json
import math
import os
import time
import traceback
import zipfile
from collections import Counter
import requests
def lowerCAmelCase_ ( snake_case_,snake_case_=None ):
_A : Any = None
if token is not None:
_A : int = {"""Accept""": """application/vnd.github+json""", """Authorization""": f'''Bearer {token}'''}
_A : Any = f'''https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100'''
_A : Union[str, Any] = requests.get(snake_case_,headers=snake_case_ ).json()
_A : str = {}
try:
job_links.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
_A : int = math.ceil((result["""total_count"""] - 100) / 100 )
for i in range(snake_case_ ):
_A : List[str] = requests.get(url + f'''&page={i + 2}''',headers=snake_case_ ).json()
job_links.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
return job_links
except Exception:
print(f'''Unknown error, could not fetch links:\n{traceback.format_exc()}''' )
return {}
def lowerCAmelCase_ ( snake_case_,snake_case_=None ):
_A : int = None
if token is not None:
_A : List[str] = {"""Accept""": """application/vnd.github+json""", """Authorization""": f'''Bearer {token}'''}
_A : str = f'''https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100'''
_A : Optional[Any] = requests.get(snake_case_,headers=snake_case_ ).json()
_A : Any = {}
try:
artifacts.update({artifact["""name"""]: artifact["""archive_download_url"""] for artifact in result["""artifacts"""]} )
_A : Tuple = math.ceil((result["""total_count"""] - 100) / 100 )
for i in range(snake_case_ ):
_A : List[Any] = requests.get(url + f'''&page={i + 2}''',headers=snake_case_ ).json()
artifacts.update({artifact["""name"""]: artifact["""archive_download_url"""] for artifact in result["""artifacts"""]} )
return artifacts
except Exception:
print(f'''Unknown error, could not fetch links:\n{traceback.format_exc()}''' )
return {}
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_,snake_case_ ):
_A : Dict = None
if token is not None:
_A : int = {"""Accept""": """application/vnd.github+json""", """Authorization""": f'''Bearer {token}'''}
_A : Tuple = requests.get(snake_case_,headers=snake_case_,allow_redirects=snake_case_ )
_A : Tuple = result.headers["""Location"""]
_A : Union[str, Any] = requests.get(snake_case_,allow_redirects=snake_case_ )
_A : Dict = os.path.join(snake_case_,f'''{artifact_name}.zip''' )
with open(snake_case_,"""wb""" ) as fp:
fp.write(response.content )
def lowerCAmelCase_ ( snake_case_,snake_case_=None ):
_A : List[str] = []
_A : int = []
_A : Tuple = None
with zipfile.ZipFile(snake_case_ ) as z:
for filename in z.namelist():
if not os.path.isdir(snake_case_ ):
# read the file
if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]:
with z.open(snake_case_ ) as f:
for line in f:
_A : Any = line.decode("""UTF-8""" ).strip()
if filename == "failures_line.txt":
try:
# `error_line` is the place where `error` occurs
_A : Dict = line[: line.index(""": """ )]
_A : Dict = line[line.index(""": """ ) + len(""": """ ) :]
errors.append([error_line, error] )
except Exception:
# skip un-related lines
pass
elif filename == "summary_short.txt" and line.startswith("""FAILED """ ):
# `test` is the test method that failed
_A : List[str] = line[len("""FAILED """ ) :]
failed_tests.append(snake_case_ )
elif filename == "job_name.txt":
_A : Optional[int] = line
if len(snake_case_ ) != len(snake_case_ ):
raise ValueError(
f'''`errors` and `failed_tests` should have the same number of elements. Got {len(snake_case_ )} for `errors` '''
f'''and {len(snake_case_ )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some'''
""" problem.""" )
_A : Any = None
if job_name and job_links:
_A : Dict = job_links.get(snake_case_,snake_case_ )
# A list with elements of the form (line of error, error, failed test)
_A : Optional[int] = [x + [y] + [job_link] for x, y in zip(snake_case_,snake_case_ )]
return result
def lowerCAmelCase_ ( snake_case_,snake_case_=None ):
_A : Dict = []
_A : Optional[int] = [os.path.join(snake_case_,snake_case_ ) for p in os.listdir(snake_case_ ) if p.endswith(""".zip""" )]
for p in paths:
errors.extend(get_errors_from_single_artifact(snake_case_,job_links=snake_case_ ) )
return errors
def lowerCAmelCase_ ( snake_case_,snake_case_=None ):
_A : Dict = Counter()
counter.update([x[1] for x in logs] )
_A : Tuple = counter.most_common()
_A : Tuple = {}
for error, count in counts:
if error_filter is None or error not in error_filter:
_A : str = {"""count""": count, """failed_tests""": [(x[2], x[0]) for x in logs if x[1] == error]}
_A : Union[str, Any] = dict(sorted(r.items(),key=lambda snake_case_ : item[1]["count"],reverse=snake_case_ ) )
return r
def lowerCAmelCase_ ( snake_case_ ):
_A : Union[str, Any] = test.split("""::""" )[0]
if test.startswith("""tests/models/""" ):
_A : Dict = test.split("""/""" )[2]
else:
_A : str = None
return test
def lowerCAmelCase_ ( snake_case_,snake_case_=None ):
_A : str = [(x[0], x[1], get_model(x[2] )) for x in logs]
_A : Union[str, Any] = [x for x in logs if x[2] is not None]
_A : Optional[Any] = {x[2] for x in logs}
_A : List[Any] = {}
for test in tests:
_A : Any = Counter()
# count by errors in `test`
counter.update([x[1] for x in logs if x[2] == test] )
_A : Union[str, Any] = counter.most_common()
_A : Any = {error: count for error, count in counts if (error_filter is None or error not in error_filter)}
_A : str = sum(error_counts.values() )
if n_errors > 0:
_A : Optional[int] = {"""count""": n_errors, """errors""": error_counts}
_A : Union[str, Any] = dict(sorted(r.items(),key=lambda snake_case_ : item[1]["count"],reverse=snake_case_ ) )
return r
def lowerCAmelCase_ ( snake_case_ ):
_A : Optional[int] = """| no. | error | status |"""
_A : List[Any] = """|-:|:-|:-|"""
_A : List[Any] = [header, sep]
for error in reduced_by_error:
_A : List[str] = reduced_by_error[error]["""count"""]
_A : List[Any] = f'''| {count} | {error[:100]} | |'''
lines.append(snake_case_ )
return "\n".join(snake_case_ )
def lowerCAmelCase_ ( snake_case_ ):
_A : List[Any] = """| model | no. of errors | major error | count |"""
_A : Optional[Any] = """|-:|-:|-:|-:|"""
_A : Union[str, Any] = [header, sep]
for model in reduced_by_model:
_A : Dict = reduced_by_model[model]["""count"""]
_A , _A : str = list(reduced_by_model[model]["""errors"""].items() )[0]
_A : Union[str, Any] = f'''| {model} | {count} | {error[:60]} | {_count} |'''
lines.append(snake_case_ )
return "\n".join(snake_case_ )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--workflow_run_id", type=str, required=True, help="A GitHub Actions workflow run id.")
parser.add_argument(
"--output_dir",
type=str,
required=True,
help="Where to store the downloaded artifacts and other result files.",
)
parser.add_argument("--token", default=None, type=str, help="A token that has actions:read permission.")
_snake_case = parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
_snake_case = get_job_links(args.workflow_run_id, token=args.token)
_snake_case = {}
# To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee.
# For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`.
if _job_links:
for k, v in _job_links.items():
# This is how GitHub actions combine job names.
if " / " in k:
_snake_case = k.find(" / ")
_snake_case = k[index + len(" / ") :]
_snake_case = v
with open(os.path.join(args.output_dir, "job_links.json"), "w", encoding="UTF-8") as fp:
json.dump(job_links, fp, ensure_ascii=False, indent=4)
_snake_case = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, "artifacts.json"), "w", encoding="UTF-8") as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
for idx, (name, url) in enumerate(artifacts.items()):
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
_snake_case = get_all_errors(args.output_dir, job_links=job_links)
# `e[1]` is the error
_snake_case = Counter()
counter.update([e[1] for e in errors])
# print the top 30 most common test errors
_snake_case = counter.most_common(30)
for item in most_common:
print(item)
with open(os.path.join(args.output_dir, "errors.json"), "w", encoding="UTF-8") as fp:
json.dump(errors, fp, ensure_ascii=False, indent=4)
_snake_case = reduce_by_error(errors)
_snake_case = reduce_by_model(errors)
_snake_case = make_github_table(reduced_by_error)
_snake_case = make_github_table_per_model(reduced_by_model)
with open(os.path.join(args.output_dir, "reduced_by_error.txt"), "w", encoding="UTF-8") as fp:
fp.write(sa)
with open(os.path.join(args.output_dir, "reduced_by_model.txt"), "w", encoding="UTF-8") as fp:
fp.write(sa)
| 343 | 0 |
import os
import pickle
import unittest
from transformers import AutoTokenizer
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.models.bert_japanese.tokenization_bert_japanese import (
VOCAB_FILES_NAMES,
BertJapaneseTokenizer,
CharacterTokenizer,
JumanppTokenizer,
MecabTokenizer,
SudachiTokenizer,
WordpieceTokenizer,
)
from transformers.testing_utils import custom_tokenizers, require_jumanpp, require_sudachi
from ...test_tokenization_common import TokenizerTesterMixin
@custom_tokenizers
class lowercase ( UpperCamelCase__,unittest.TestCase ):
"""simple docstring"""
_a = BertJapaneseTokenizer
_a = False
_a = True
def a__ ( self ) -> Any:
super().setUp()
_A : Dict = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""こんにちは""",
"""こん""",
"""にちは""",
"""ばんは""",
"""##こん""",
"""##にちは""",
"""##ばんは""",
"""世界""",
"""##世界""",
"""、""",
"""##、""",
"""。""",
"""##。""",
]
_A : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
def a__ ( self , _a ) -> Dict:
_A : Optional[Any] = """こんにちは、世界。 \nこんばんは、世界。"""
_A : Optional[Any] = """こんにちは 、 世界 。 こんばんは 、 世界 。"""
return input_text, output_text
def a__ ( self , _a ) -> Union[str, Any]:
_A : List[str] = self.get_input_output_texts(_a )
_A : Optional[Any] = tokenizer.encode(_a , add_special_tokens=_a )
_A : int = tokenizer.decode(_a , clean_up_tokenization_spaces=_a )
return text, ids
def a__ ( self ) -> int:
pass # TODO add if relevant
def a__ ( self ) -> List[str]:
pass # TODO add if relevant
def a__ ( self ) -> Optional[Any]:
pass # TODO add if relevant
def a__ ( self ) -> Optional[int]:
_A : int = self.tokenizer_class(self.vocab_file )
_A : List[str] = tokenizer.tokenize("""こんにちは、世界。\nこんばんは、世界。""" )
self.assertListEqual(_a , ["""こんにちは""", """、""", """世界""", """。""", """こん""", """##ばんは""", """、""", """世界""", """。"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_a ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
def a__ ( self ) -> Optional[Any]:
_A : List[Any] = self.tokenizer_class(self.vocab_file , word_tokenizer_type="""mecab""" )
self.assertIsNotNone(_a )
_A : Tuple = """こんにちは、世界。\nこんばんは、世界。"""
_A : Union[str, Any] = tokenizer.tokenize(_a )
self.assertListEqual(_a , ["""こんにちは""", """、""", """世界""", """。""", """こん""", """##ばんは""", """、""", """世界""", """。"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_a ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
_A : str = os.path.join(self.tmpdirname , """tokenizer.bin""" )
with open(_a , """wb""" ) as handle:
pickle.dump(_a , _a )
with open(_a , """rb""" ) as handle:
_A : List[str] = pickle.load(_a )
_A : str = tokenizer_new.tokenize(_a )
self.assertListEqual(_a , _a )
def a__ ( self ) -> List[Any]:
_A : str = MecabTokenizer(mecab_dic="""ipadic""" )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップルストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れ""", """た""", """。"""] , )
def a__ ( self ) -> Tuple:
try:
_A : str = MecabTokenizer(mecab_dic="""unidic_lite""" )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップル""", """ストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れ""", """た""", """。"""] , )
def a__ ( self ) -> List[Any]:
try:
_A : Any = MecabTokenizer(mecab_dic="""unidic""" )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップル""", """ストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れ""", """た""", """。"""] , )
def a__ ( self ) -> Optional[int]:
_A : List[Any] = MecabTokenizer(do_lower_case=_a , mecab_dic="""ipadic""" )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップルストア""", """で""", """iphone""", """8""", """が""", """発売""", """さ""", """れ""", """た""", """。"""] , )
def a__ ( self ) -> Optional[Any]:
try:
_A : List[Any] = MecabTokenizer(
do_lower_case=_a , normalize_text=_a , mecab_option="""-d /usr/local/lib/mecab/dic/jumandic""" )
except RuntimeError:
# if dict doesn't exist in the system, previous code raises this error.
return
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップルストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れた""", """\u3000""", """。"""] , )
def a__ ( self ) -> Optional[Any]:
_A : Dict = MecabTokenizer(normalize_text=_a , mecab_dic="""ipadic""" )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップルストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れ""", """た""", """ """, """。"""] , )
@require_sudachi
def a__ ( self ) -> List[str]:
_A : Optional[Any] = self.tokenizer_class(self.vocab_file , word_tokenizer_type="""sudachi""" )
self.assertIsNotNone(_a )
_A : List[str] = """こんにちは、世界。\nこんばんは、世界。"""
_A : Dict = tokenizer.tokenize(_a )
self.assertListEqual(_a , ["""こんにちは""", """、""", """世界""", """。""", """こん""", """##ばんは""", """、""", """世界""", """。"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_a ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
_A : List[str] = os.path.join(self.tmpdirname , """tokenizer.bin""" )
with open(_a , """wb""" ) as handle:
pickle.dump(_a , _a )
with open(_a , """rb""" ) as handle:
_A : Any = pickle.load(_a )
_A : int = tokenizer_new.tokenize(_a )
self.assertListEqual(_a , _a )
@require_sudachi
def a__ ( self ) -> Optional[int]:
_A : Optional[int] = SudachiTokenizer(sudachi_dict_type="""core""" )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , [""" """, """\t""", """アップル""", """ストア""", """で""", """iPhone""", """8""", """ """, """が""", """ """, """ """, """\n """, """発売""", """さ""", """れ""", """た""", """ """, """。""", """ """, """ """] , )
@require_sudachi
def a__ ( self ) -> List[Any]:
_A : int = SudachiTokenizer(sudachi_dict_type="""core""" , sudachi_split_mode="""A""" )
self.assertListEqual(tokenizer.tokenize("""外国人参政権""" ) , ["""外国""", """人""", """参政""", """権"""] )
@require_sudachi
def a__ ( self ) -> Optional[Any]:
_A : Optional[int] = SudachiTokenizer(sudachi_dict_type="""core""" , sudachi_split_mode="""B""" )
self.assertListEqual(tokenizer.tokenize("""外国人参政権""" ) , ["""外国人""", """参政権"""] )
@require_sudachi
def a__ ( self ) -> Tuple:
_A : str = SudachiTokenizer(sudachi_dict_type="""core""" , sudachi_split_mode="""C""" )
self.assertListEqual(tokenizer.tokenize("""外国人参政権""" ) , ["""外国人参政権"""] )
@require_sudachi
def a__ ( self ) -> Optional[Any]:
_A : str = SudachiTokenizer(do_lower_case=_a , sudachi_dict_type="""core""" )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , [""" """, """\t""", """アップル""", """ストア""", """で""", """iphone""", """8""", """ """, """が""", """ """, """ """, """\n """, """発売""", """さ""", """れ""", """た""", """ """, """。""", """ """, """ """] , )
@require_sudachi
def a__ ( self ) -> Optional[int]:
_A : Tuple = SudachiTokenizer(normalize_text=_a , sudachi_dict_type="""core""" )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , [""" """, """\t""", """アップル""", """ストア""", """で""", """iPhone""", """8""", """ """, """が""", """ """, """ """, """\n """, """発売""", """さ""", """れ""", """た""", """\u3000""", """。""", """ """, """ """] , )
@require_sudachi
def a__ ( self ) -> List[str]:
_A : List[Any] = SudachiTokenizer(trim_whitespace=_a , sudachi_dict_type="""core""" )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップル""", """ストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れ""", """た""", """。"""] , )
@require_jumanpp
def a__ ( self ) -> Tuple:
_A : Dict = self.tokenizer_class(self.vocab_file , word_tokenizer_type="""jumanpp""" )
self.assertIsNotNone(_a )
_A : List[Any] = """こんにちは、世界。\nこんばんは、世界。"""
_A : str = tokenizer.tokenize(_a )
self.assertListEqual(_a , ["""こんにちは""", """、""", """世界""", """。""", """こん""", """##ばんは""", """、""", """世界""", """。"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_a ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
_A : Optional[Any] = os.path.join(self.tmpdirname , """tokenizer.bin""" )
with open(_a , """wb""" ) as handle:
pickle.dump(_a , _a )
with open(_a , """rb""" ) as handle:
_A : Any = pickle.load(_a )
_A : Dict = tokenizer_new.tokenize(_a )
self.assertListEqual(_a , _a )
@require_jumanpp
def a__ ( self ) -> Optional[Any]:
_A : List[str] = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップル""", """ストア""", """で""", """iPhone""", """8""", """\u3000""", """が""", """\u3000""", """\u3000""", """\u3000""", """発売""", """さ""", """れた""", """\u3000""", """。"""] , )
@require_jumanpp
def a__ ( self ) -> Optional[int]:
_A : Union[str, Any] = JumanppTokenizer(do_lower_case=_a )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップル""", """ストア""", """で""", """iphone""", """8""", """\u3000""", """が""", """\u3000""", """\u3000""", """\u3000""", """発売""", """さ""", """れた""", """\u3000""", """。"""] , )
@require_jumanpp
def a__ ( self ) -> List[str]:
_A : str = JumanppTokenizer(normalize_text=_a )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""ア""", """ッ""", """フ""", """゚""", """ル""", """ストア""", """で""", """iPhone""", """8""", """\u3000""", """が""", """\u3000""", """\u3000""", """\u3000""", """発売""", """さ""", """れた""", """\u3000""", """。"""] , )
@require_jumanpp
def a__ ( self ) -> Optional[Any]:
_A : Union[str, Any] = JumanppTokenizer(trim_whitespace=_a )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップル""", """ストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れた""", """。"""] , )
@require_jumanpp
def a__ ( self ) -> str:
_A : Tuple = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize("""ありがとうございますm(_ _)m見つけるのが大変です。""" ) , ["""ありがとう""", """ございます""", """m(_ _)m""", """見つける""", """の""", """が""", """大変です""", """。"""] , )
def a__ ( self ) -> Optional[Any]:
_A : Dict = ["""[UNK]""", """[CLS]""", """[SEP]""", """こんにちは""", """こん""", """にちは""", """ばんは""", """##こん""", """##にちは""", """##ばんは"""]
_A : int = {}
for i, token in enumerate(_a ):
_A : List[Any] = i
_A : Any = WordpieceTokenizer(vocab=_a , unk_token="""[UNK]""" )
self.assertListEqual(tokenizer.tokenize("""""" ) , [] )
self.assertListEqual(tokenizer.tokenize("""こんにちは""" ) , ["""こんにちは"""] )
self.assertListEqual(tokenizer.tokenize("""こんばんは""" ) , ["""こん""", """##ばんは"""] )
self.assertListEqual(tokenizer.tokenize("""こんばんは こんばんにちは こんにちは""" ) , ["""こん""", """##ばんは""", """[UNK]""", """こんにちは"""] )
def a__ ( self ) -> List[Any]:
_A : Tuple = BertJapaneseTokenizer.from_pretrained("""nlp-waseda/roberta-base-japanese-with-auto-jumanpp""" )
_A : str = tokenizer.subword_tokenizer
_A : Any = subword_tokenizer.tokenize("""国境 の 長い トンネル を 抜ける と 雪国 であった 。""" )
self.assertListEqual(_a , ["""▁国境""", """▁の""", """▁長い""", """▁トンネル""", """▁を""", """▁抜ける""", """▁と""", """▁雪""", """国""", """▁であった""", """▁。"""] )
_A : Dict = subword_tokenizer.tokenize("""こんばんは こんばん にち は こんにちは""" )
self.assertListEqual(_a , ["""▁こん""", """ばん""", """は""", """▁こん""", """ばん""", """▁に""", """ち""", """▁は""", """▁こんにちは"""] )
def a__ ( self ) -> Dict:
_A : str = self.tokenizer_class.from_pretrained("""cl-tohoku/bert-base-japanese""" )
_A : Optional[Any] = tokenizer.encode("""ありがとう。""" , add_special_tokens=_a )
_A : str = tokenizer.encode("""どういたしまして。""" , add_special_tokens=_a )
_A : List[Any] = tokenizer.build_inputs_with_special_tokens(_a )
_A : Tuple = tokenizer.build_inputs_with_special_tokens(_a , _a )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class lowercase ( UpperCamelCase__,unittest.TestCase ):
"""simple docstring"""
_a = BertJapaneseTokenizer
_a = False
def a__ ( self ) -> Any:
super().setUp()
_A : List[Any] = ["""[UNK]""", """[CLS]""", """[SEP]""", """こ""", """ん""", """に""", """ち""", """は""", """ば""", """世""", """界""", """、""", """。"""]
_A : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
def a__ ( self , **_a ) -> Optional[int]:
return BertJapaneseTokenizer.from_pretrained(self.tmpdirname , subword_tokenizer_type="""character""" , **_a )
def a__ ( self , _a ) -> int:
_A : str = """こんにちは、世界。 \nこんばんは、世界。"""
_A : int = """こ ん に ち は 、 世 界 。 こ ん ば ん は 、 世 界 。"""
return input_text, output_text
def a__ ( self ) -> Tuple:
pass # TODO add if relevant
def a__ ( self ) -> Tuple:
pass # TODO add if relevant
def a__ ( self ) -> str:
pass # TODO add if relevant
def a__ ( self ) -> Any:
_A : List[str] = self.tokenizer_class(self.vocab_file , subword_tokenizer_type="""character""" )
_A : List[str] = tokenizer.tokenize("""こんにちは、世界。 \nこんばんは、世界。""" )
self.assertListEqual(
_a , ["""こ""", """ん""", """に""", """ち""", """は""", """、""", """世""", """界""", """。""", """こ""", """ん""", """ば""", """ん""", """は""", """、""", """世""", """界""", """。"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_a ) , [3, 4, 5, 6, 7, 11, 9, 10, 12, 3, 4, 8, 4, 7, 11, 9, 10, 12] )
def a__ ( self ) -> Tuple:
_A : List[Any] = ["""[UNK]""", """[CLS]""", """[SEP]""", """こ""", """ん""", """に""", """ち""", """は""", """ば""", """世""", """界""", """、""", """。"""]
_A : Dict = {}
for i, token in enumerate(_a ):
_A : Tuple = i
_A : int = CharacterTokenizer(vocab=_a , unk_token="""[UNK]""" )
self.assertListEqual(tokenizer.tokenize("""""" ) , [] )
self.assertListEqual(tokenizer.tokenize("""こんにちは""" ) , ["""こ""", """ん""", """に""", """ち""", """は"""] )
self.assertListEqual(tokenizer.tokenize("""こんにちほ""" ) , ["""こ""", """ん""", """に""", """ち""", """[UNK]"""] )
def a__ ( self ) -> Dict:
_A : int = self.tokenizer_class.from_pretrained("""cl-tohoku/bert-base-japanese-char""" )
_A : int = tokenizer.encode("""ありがとう。""" , add_special_tokens=_a )
_A : Optional[Any] = tokenizer.encode("""どういたしまして。""" , add_special_tokens=_a )
_A : Optional[Any] = tokenizer.build_inputs_with_special_tokens(_a )
_A : int = tokenizer.build_inputs_with_special_tokens(_a , _a )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def a__ ( self ) -> Union[str, Any]:
_A : List[Any] = """cl-tohoku/bert-base-japanese"""
_A : List[str] = AutoTokenizer.from_pretrained(_a )
self.assertIsInstance(_a , _a )
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def a__ ( self ) -> Optional[int]:
_A : Dict = """cl-tohoku/bert-base-japanese"""
with self.assertLogs("""transformers""" , level="""WARNING""" ) as cm:
BertTokenizer.from_pretrained(_a )
self.assertTrue(
cm.records[0].message.startswith(
"""The tokenizer class you load from this checkpoint is not the same type as the class this function"""
""" is called from.""" ) )
_A : str = """bert-base-cased"""
with self.assertLogs("""transformers""" , level="""WARNING""" ) as cm:
BertJapaneseTokenizer.from_pretrained(_a )
self.assertTrue(
cm.records[0].message.startswith(
"""The tokenizer class you load from this checkpoint is not the same type as the class this function"""
""" is called from.""" ) )
| 371 |
import unittest
from accelerate import debug_launcher
from accelerate.test_utils import require_cpu, test_ops, test_script
@require_cpu
class lowercase ( unittest.TestCase ):
def a__ ( self ) -> List[str]:
debug_launcher(test_script.main )
def a__ ( self ) -> Any:
debug_launcher(test_ops.main )
| 343 | 0 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case = logging.get_logger(__name__)
def lowerCAmelCase_ ( snake_case_ ):
_A : str = DPTConfig()
if "large" in checkpoint_url:
_A : List[Any] = 1024
_A : Union[str, Any] = 4096
_A : Tuple = 24
_A : Tuple = 16
_A : int = [5, 11, 17, 23]
_A : List[str] = [256, 512, 1024, 1024]
_A : Optional[Any] = (1, 384, 384)
if "ade" in checkpoint_url:
_A : Optional[Any] = True
_A : Union[str, Any] = 150
_A : Dict = """huggingface/label-files"""
_A : Any = """ade20k-id2label.json"""
_A : Union[str, Any] = json.load(open(cached_download(hf_hub_url(snake_case_,snake_case_,repo_type="""dataset""" ) ),"""r""" ) )
_A : List[str] = {int(snake_case_ ): v for k, v in idalabel.items()}
_A : Optional[int] = idalabel
_A : int = {v: k for k, v in idalabel.items()}
_A : int = [1, 150, 480, 480]
return config, expected_shape
def lowerCAmelCase_ ( snake_case_ ):
_A : List[str] = ["""pretrained.model.head.weight""", """pretrained.model.head.bias"""]
for k in ignore_keys:
state_dict.pop(snake_case_,snake_case_ )
def lowerCAmelCase_ ( snake_case_ ):
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
_A : Dict = name.replace("""pretrained.model""","""dpt.encoder""" )
if "pretrained.model" in name:
_A : Any = name.replace("""pretrained.model""","""dpt.embeddings""" )
if "patch_embed" in name:
_A : List[Any] = name.replace("""patch_embed""","""patch_embeddings""" )
if "pos_embed" in name:
_A : str = name.replace("""pos_embed""","""position_embeddings""" )
if "attn.proj" in name:
_A : Optional[int] = name.replace("""attn.proj""","""attention.output.dense""" )
if "proj" in name and "project" not in name:
_A : int = name.replace("""proj""","""projection""" )
if "blocks" in name:
_A : str = name.replace("""blocks""","""layer""" )
if "mlp.fc1" in name:
_A : int = name.replace("""mlp.fc1""","""intermediate.dense""" )
if "mlp.fc2" in name:
_A : Any = name.replace("""mlp.fc2""","""output.dense""" )
if "norm1" in name:
_A : Tuple = name.replace("""norm1""","""layernorm_before""" )
if "norm2" in name:
_A : Optional[Any] = name.replace("""norm2""","""layernorm_after""" )
if "scratch.output_conv" in name:
_A : List[str] = name.replace("""scratch.output_conv""","""head""" )
if "scratch" in name:
_A : Dict = name.replace("""scratch""","""neck""" )
if "layer1_rn" in name:
_A : Dict = name.replace("""layer1_rn""","""convs.0""" )
if "layer2_rn" in name:
_A : List[Any] = name.replace("""layer2_rn""","""convs.1""" )
if "layer3_rn" in name:
_A : str = name.replace("""layer3_rn""","""convs.2""" )
if "layer4_rn" in name:
_A : Any = name.replace("""layer4_rn""","""convs.3""" )
if "refinenet" in name:
_A : int = int(name[len("""neck.refinenet""" ) : len("""neck.refinenet""" ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
_A : List[str] = name.replace(f'''refinenet{layer_idx}''',f'''fusion_stage.layers.{abs(layer_idx-4 )}''' )
if "out_conv" in name:
_A : Tuple = name.replace("""out_conv""","""projection""" )
if "resConfUnit1" in name:
_A : Optional[Any] = name.replace("""resConfUnit1""","""residual_layer1""" )
if "resConfUnit2" in name:
_A : List[str] = name.replace("""resConfUnit2""","""residual_layer2""" )
if "conv1" in name:
_A : Union[str, Any] = name.replace("""conv1""","""convolution1""" )
if "conv2" in name:
_A : List[Any] = name.replace("""conv2""","""convolution2""" )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
_A : List[Any] = name.replace("""pretrained.act_postprocess1.0.project.0""","""neck.reassemble_stage.readout_projects.0.0""" )
if "pretrained.act_postprocess2.0.project.0" in name:
_A : Optional[int] = name.replace("""pretrained.act_postprocess2.0.project.0""","""neck.reassemble_stage.readout_projects.1.0""" )
if "pretrained.act_postprocess3.0.project.0" in name:
_A : Union[str, Any] = name.replace("""pretrained.act_postprocess3.0.project.0""","""neck.reassemble_stage.readout_projects.2.0""" )
if "pretrained.act_postprocess4.0.project.0" in name:
_A : Dict = name.replace("""pretrained.act_postprocess4.0.project.0""","""neck.reassemble_stage.readout_projects.3.0""" )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
_A : int = name.replace("""pretrained.act_postprocess1.3""","""neck.reassemble_stage.layers.0.projection""" )
if "pretrained.act_postprocess1.4" in name:
_A : Union[str, Any] = name.replace("""pretrained.act_postprocess1.4""","""neck.reassemble_stage.layers.0.resize""" )
if "pretrained.act_postprocess2.3" in name:
_A : str = name.replace("""pretrained.act_postprocess2.3""","""neck.reassemble_stage.layers.1.projection""" )
if "pretrained.act_postprocess2.4" in name:
_A : Any = name.replace("""pretrained.act_postprocess2.4""","""neck.reassemble_stage.layers.1.resize""" )
if "pretrained.act_postprocess3.3" in name:
_A : List[str] = name.replace("""pretrained.act_postprocess3.3""","""neck.reassemble_stage.layers.2.projection""" )
if "pretrained.act_postprocess4.3" in name:
_A : str = name.replace("""pretrained.act_postprocess4.3""","""neck.reassemble_stage.layers.3.projection""" )
if "pretrained.act_postprocess4.4" in name:
_A : List[Any] = name.replace("""pretrained.act_postprocess4.4""","""neck.reassemble_stage.layers.3.resize""" )
if "pretrained" in name:
_A : int = name.replace("""pretrained""","""dpt""" )
if "bn" in name:
_A : Any = name.replace("""bn""","""batch_norm""" )
if "head" in name:
_A : List[str] = name.replace("""head""","""head.head""" )
if "encoder.norm" in name:
_A : int = name.replace("""encoder.norm""","""layernorm""" )
if "auxlayer" in name:
_A : Any = name.replace("""auxlayer""","""auxiliary_head.head""" )
return name
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_A : Optional[Any] = state_dict.pop(f'''dpt.encoder.layer.{i}.attn.qkv.weight''' )
_A : Any = state_dict.pop(f'''dpt.encoder.layer.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
_A : Optional[int] = in_proj_weight[: config.hidden_size, :]
_A : List[str] = in_proj_bias[: config.hidden_size]
_A : Optional[Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_A : str = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_A : Union[str, Any] = in_proj_weight[
-config.hidden_size :, :
]
_A : Optional[int] = in_proj_bias[-config.hidden_size :]
def lowerCAmelCase_ ( ):
_A : List[Any] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
_A : Optional[Any] = Image.open(requests.get(snake_case_,stream=snake_case_ ).raw )
return im
@torch.no_grad()
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_,snake_case_ ):
_A : Optional[int] = get_dpt_config(snake_case_ )
# load original state_dict from URL
_A : Tuple = torch.hub.load_state_dict_from_url(snake_case_,map_location="""cpu""" )
# remove certain keys
remove_ignore_keys_(snake_case_ )
# rename keys
for key in state_dict.copy().keys():
_A : Dict = state_dict.pop(snake_case_ )
_A : List[Any] = val
# read in qkv matrices
read_in_q_k_v(snake_case_,snake_case_ )
# load HuggingFace model
_A : Optional[int] = DPTForSemanticSegmentation(snake_case_ ) if """ade""" in checkpoint_url else DPTForDepthEstimation(snake_case_ )
model.load_state_dict(snake_case_ )
model.eval()
# Check outputs on an image
_A : Optional[Any] = 480 if """ade""" in checkpoint_url else 384
_A : str = DPTImageProcessor(size=snake_case_ )
_A : Any = prepare_img()
_A : Union[str, Any] = image_processor(snake_case_,return_tensors="""pt""" )
# forward pass
_A : List[str] = model(**snake_case_ ).logits if """ade""" in checkpoint_url else model(**snake_case_ ).predicted_depth
# Assert logits
_A : Optional[Any] = torch.tensor([[6.31_99, 6.36_29, 6.41_48], [6.38_50, 6.36_15, 6.41_66], [6.35_19, 6.31_76, 6.35_75]] )
if "ade" in checkpoint_url:
_A : List[str] = torch.tensor([[4.04_80, 4.24_20, 4.43_60], [4.31_24, 4.56_93, 4.82_61], [4.57_68, 4.89_65, 5.21_63]] )
assert outputs.shape == torch.Size(snake_case_ )
assert (
torch.allclose(outputs[0, 0, :3, :3],snake_case_,atol=1e-4 )
if "ade" in checkpoint_url
else torch.allclose(outputs[0, :3, :3],snake_case_ )
)
Path(snake_case_ ).mkdir(exist_ok=snake_case_ )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(snake_case_ )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(snake_case_ )
if push_to_hub:
print("""Pushing model to hub...""" )
model.push_to_hub(
repo_path_or_name=Path(snake_case_,snake_case_ ),organization="""nielsr""",commit_message="""Add model""",use_temp_dir=snake_case_,)
image_processor.push_to_hub(
repo_path_or_name=Path(snake_case_,snake_case_ ),organization="""nielsr""",commit_message="""Add image processor""",use_temp_dir=snake_case_,)
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt",
type=str,
help="URL of the original DPT checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
required=True,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub",
action="store_true",
)
parser.add_argument(
"--model_name",
default="dpt-large",
type=str,
help="Name of the model, in case you're pushing to the hub.",
)
_snake_case = parser.parse_args()
convert_dpt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 350 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_snake_case = logging.get_logger(__name__)
_snake_case = {
"microsoft/resnet-50": "https://huggingface.co/microsoft/resnet-50/blob/main/config.json",
}
class lowercase ( UpperCamelCase__,UpperCamelCase__ ):
_a = "resnet"
_a = ["basic", "bottleneck"]
def __init__( self , _a=3 , _a=64 , _a=[256, 512, 1024, 2048] , _a=[3, 4, 6, 3] , _a="bottleneck" , _a="relu" , _a=False , _a=None , _a=None , **_a , ) -> int:
super().__init__(**_a )
if layer_type not in self.layer_types:
raise ValueError(F'''layer_type={layer_type} is not one of {",".join(self.layer_types )}''' )
_A : Optional[Any] = num_channels
_A : List[Any] = embedding_size
_A : int = hidden_sizes
_A : Union[str, Any] = depths
_A : Optional[int] = layer_type
_A : Any = hidden_act
_A : List[Any] = downsample_in_first_stage
_A : int = ["""stem"""] + [F'''stage{idx}''' for idx in range(1 , len(_a ) + 1 )]
_A , _A : str = get_aligned_output_features_output_indices(
out_features=_a , out_indices=_a , stage_names=self.stage_names )
class lowercase ( UpperCamelCase__ ):
_a = version.parse("1.11" )
@property
def a__ ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def a__ ( self ) -> float:
return 1e-3
| 343 | 0 |
import numpy as np
def lowerCAmelCase_ ( snake_case_ ):
return 1 / (1 + np.exp(-vector ))
def lowerCAmelCase_ ( snake_case_ ):
return vector * sigmoid(snake_case_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 351 |
import argparse
import json
import numpy
import torch
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
# Load checkpoint
_A : Optional[int] = torch.load(snake_case_,map_location="""cpu""" )
_A : Any = chkpt["""model"""]
# We have the base model one level deeper than the original XLM repository
_A : Any = {}
for k, v in state_dict.items():
if "pred_layer" in k:
_A : Tuple = v
else:
_A : Dict = v
_A : Optional[Any] = chkpt["""params"""]
_A : Union[str, Any] = {n: v for n, v in config.items() if not isinstance(snake_case_,(torch.FloatTensor, numpy.ndarray) )}
_A : str = chkpt["""dico_word2id"""]
_A : Optional[Any] = {s + """</w>""" if s.find("""@@""" ) == -1 and i > 13 else s.replace("""@@""","""""" ): i for s, i in vocab.items()}
# Save pytorch-model
_A : Dict = pytorch_dump_folder_path + """/""" + WEIGHTS_NAME
_A : Any = pytorch_dump_folder_path + """/""" + CONFIG_NAME
_A : Optional[int] = pytorch_dump_folder_path + """/""" + VOCAB_FILES_NAMES["""vocab_file"""]
print(f'''Save PyTorch model to {pytorch_weights_dump_path}''' )
torch.save(snake_case_,snake_case_ )
print(f'''Save configuration file to {pytorch_config_dump_path}''' )
with open(snake_case_,"""w""",encoding="""utf-8""" ) as f:
f.write(json.dumps(snake_case_,indent=2 ) + """\n""" )
print(f'''Save vocab file to {pytorch_config_dump_path}''' )
with open(snake_case_,"""w""",encoding="""utf-8""" ) as f:
f.write(json.dumps(snake_case_,indent=2 ) + """\n""" )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--xlm_checkpoint_path", default=None, type=str, required=True, help="Path the official PyTorch dump."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
_snake_case = parser.parse_args()
convert_xlm_checkpoint_to_pytorch(args.xlm_checkpoint_path, args.pytorch_dump_folder_path)
| 343 | 0 |
import gc
import unittest
import numpy as np
import torch
import torch.nn.functional as F
from transformers import (
ClapTextConfig,
ClapTextModelWithProjection,
RobertaTokenizer,
SpeechTaHifiGan,
SpeechTaHifiGanConfig,
)
from diffusers import (
AudioLDMPipeline,
AutoencoderKL,
DDIMScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS, TEXT_TO_AUDIO_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class lowercase ( UpperCamelCase__,unittest.TestCase ):
_a = AudioLDMPipeline
_a = TEXT_TO_AUDIO_PARAMS
_a = TEXT_TO_AUDIO_BATCH_PARAMS
_a = frozenset(
[
"num_inference_steps",
"num_waveforms_per_prompt",
"generator",
"latents",
"output_type",
"return_dict",
"callback",
"callback_steps",
] )
def a__ ( self ):
torch.manual_seed(0 )
_A : Tuple = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=(32, 64) , class_embed_type="""simple_projection""" , projection_class_embeddings_input_dim=32 , class_embeddings_concat=_a , )
_A : Tuple = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=_a , set_alpha_to_one=_a , )
torch.manual_seed(0 )
_A : Optional[int] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=1 , out_channels=1 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
_A : Any = ClapTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , projection_dim=32 , )
_A : Optional[int] = ClapTextModelWithProjection(_a )
_A : Union[str, Any] = RobertaTokenizer.from_pretrained("""hf-internal-testing/tiny-random-roberta""" , model_max_length=77 )
_A : int = SpeechTaHifiGanConfig(
model_in_dim=8 , sampling_rate=1_6000 , upsample_initial_channel=16 , upsample_rates=[2, 2] , upsample_kernel_sizes=[4, 4] , resblock_kernel_sizes=[3, 7] , resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5]] , normalize_before=_a , )
_A : Optional[int] = SpeechTaHifiGan(_a )
_A : Optional[Any] = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""vocoder""": vocoder,
}
return components
def a__ ( self , _a , _a=0 ):
if str(_a ).startswith("""mps""" ):
_A : int = torch.manual_seed(_a )
else:
_A : List[Any] = torch.Generator(device=_a ).manual_seed(_a )
_A : str = {
"""prompt""": """A hammer hitting a wooden surface""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
}
return inputs
def a__ ( self ):
_A : List[str] = """cpu""" # ensure determinism for the device-dependent torch.Generator
_A : Tuple = self.get_dummy_components()
_A : List[Any] = AudioLDMPipeline(**_a )
_A : Any = audioldm_pipe.to(_a )
audioldm_pipe.set_progress_bar_config(disable=_a )
_A : Optional[Any] = self.get_dummy_inputs(_a )
_A : List[str] = audioldm_pipe(**_a )
_A : List[Any] = output.audios[0]
assert audio.ndim == 1
assert len(_a ) == 256
_A : Tuple = audio[:10]
_A : Union[str, Any] = np.array(
[-0.0050, 0.0050, -0.0060, 0.0033, -0.0026, 0.0033, -0.0027, 0.0033, -0.0028, 0.0033] )
assert np.abs(audio_slice - expected_slice ).max() < 1e-2
def a__ ( self ):
_A : List[str] = self.get_dummy_components()
_A : int = AudioLDMPipeline(**_a )
_A : List[Any] = audioldm_pipe.to(_a )
_A : Any = audioldm_pipe.to(_a )
audioldm_pipe.set_progress_bar_config(disable=_a )
_A : List[Any] = self.get_dummy_inputs(_a )
_A : Any = 3 * [inputs["""prompt"""]]
# forward
_A : Tuple = audioldm_pipe(**_a )
_A : Optional[int] = output.audios[0]
_A : Union[str, Any] = self.get_dummy_inputs(_a )
_A : Optional[int] = 3 * [inputs.pop("""prompt""" )]
_A : Tuple = audioldm_pipe.tokenizer(
_a , padding="""max_length""" , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=_a , return_tensors="""pt""" , )
_A : List[Any] = text_inputs["""input_ids"""].to(_a )
_A : Dict = audioldm_pipe.text_encoder(
_a , )
_A : str = prompt_embeds.text_embeds
# additional L_2 normalization over each hidden-state
_A : Optional[int] = F.normalize(_a , dim=-1 )
_A : List[Any] = prompt_embeds
# forward
_A : Dict = audioldm_pipe(**_a )
_A : Optional[Any] = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1e-2
def a__ ( self ):
_A : int = self.get_dummy_components()
_A : Union[str, Any] = AudioLDMPipeline(**_a )
_A : int = audioldm_pipe.to(_a )
_A : Optional[int] = audioldm_pipe.to(_a )
audioldm_pipe.set_progress_bar_config(disable=_a )
_A : Optional[Any] = self.get_dummy_inputs(_a )
_A : str = 3 * ["""this is a negative prompt"""]
_A : List[str] = negative_prompt
_A : Any = 3 * [inputs["""prompt"""]]
# forward
_A : Union[str, Any] = audioldm_pipe(**_a )
_A : Union[str, Any] = output.audios[0]
_A : str = self.get_dummy_inputs(_a )
_A : Dict = 3 * [inputs.pop("""prompt""" )]
_A : Any = []
for p in [prompt, negative_prompt]:
_A : Optional[int] = audioldm_pipe.tokenizer(
_a , padding="""max_length""" , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=_a , return_tensors="""pt""" , )
_A : Dict = text_inputs["""input_ids"""].to(_a )
_A : str = audioldm_pipe.text_encoder(
_a , )
_A : List[Any] = text_embeds.text_embeds
# additional L_2 normalization over each hidden-state
_A : int = F.normalize(_a , dim=-1 )
embeds.append(_a )
_A : str = embeds
# forward
_A : Dict = audioldm_pipe(**_a )
_A : Dict = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1e-2
def a__ ( self ):
_A : str = """cpu""" # ensure determinism for the device-dependent torch.Generator
_A : Tuple = self.get_dummy_components()
_A : Tuple = PNDMScheduler(skip_prk_steps=_a )
_A : List[Any] = AudioLDMPipeline(**_a )
_A : int = audioldm_pipe.to(_a )
audioldm_pipe.set_progress_bar_config(disable=_a )
_A : Optional[Any] = self.get_dummy_inputs(_a )
_A : List[str] = """egg cracking"""
_A : Dict = audioldm_pipe(**_a , negative_prompt=_a )
_A : Tuple = output.audios[0]
assert audio.ndim == 1
assert len(_a ) == 256
_A : Tuple = audio[:10]
_A : Union[str, Any] = np.array(
[-0.0051, 0.0050, -0.0060, 0.0034, -0.0026, 0.0033, -0.0027, 0.0033, -0.0028, 0.0032] )
assert np.abs(audio_slice - expected_slice ).max() < 1e-2
def a__ ( self ):
_A : Dict = """cpu""" # ensure determinism for the device-dependent torch.Generator
_A : List[str] = self.get_dummy_components()
_A : Any = PNDMScheduler(skip_prk_steps=_a )
_A : Tuple = AudioLDMPipeline(**_a )
_A : List[Any] = audioldm_pipe.to(_a )
audioldm_pipe.set_progress_bar_config(disable=_a )
_A : List[str] = """A hammer hitting a wooden surface"""
# test num_waveforms_per_prompt=1 (default)
_A : Tuple = audioldm_pipe(_a , num_inference_steps=2 ).audios
assert audios.shape == (1, 256)
# test num_waveforms_per_prompt=1 (default) for batch of prompts
_A : int = 2
_A : str = audioldm_pipe([prompt] * batch_size , num_inference_steps=2 ).audios
assert audios.shape == (batch_size, 256)
# test num_waveforms_per_prompt for single prompt
_A : Union[str, Any] = 2
_A : str = audioldm_pipe(_a , num_inference_steps=2 , num_waveforms_per_prompt=_a ).audios
assert audios.shape == (num_waveforms_per_prompt, 256)
# test num_waveforms_per_prompt for batch of prompts
_A : List[Any] = 2
_A : Optional[int] = audioldm_pipe(
[prompt] * batch_size , num_inference_steps=2 , num_waveforms_per_prompt=_a ).audios
assert audios.shape == (batch_size * num_waveforms_per_prompt, 256)
def a__ ( self ):
_A : Dict = """cpu""" # ensure determinism for the device-dependent torch.Generator
_A : Optional[Any] = self.get_dummy_components()
_A : List[str] = AudioLDMPipeline(**_a )
_A : Optional[Any] = audioldm_pipe.to(_a )
audioldm_pipe.set_progress_bar_config(disable=_a )
_A : Optional[Any] = audioldm_pipe.vocoder.config.sampling_rate
_A : Optional[Any] = self.get_dummy_inputs(_a )
_A : Tuple = audioldm_pipe(audio_length_in_s=0.016 , **_a )
_A : List[str] = output.audios[0]
assert audio.ndim == 1
assert len(_a ) / vocoder_sampling_rate == 0.016
_A : Union[str, Any] = audioldm_pipe(audio_length_in_s=0.032 , **_a )
_A : List[Any] = output.audios[0]
assert audio.ndim == 1
assert len(_a ) / vocoder_sampling_rate == 0.032
def a__ ( self ):
_A : List[Any] = self.get_dummy_components()
_A : Optional[Any] = AudioLDMPipeline(**_a )
_A : Union[str, Any] = audioldm_pipe.to(_a )
audioldm_pipe.set_progress_bar_config(disable=_a )
_A : str = ["""hey"""]
_A : Union[str, Any] = audioldm_pipe(_a , num_inference_steps=1 )
_A : Optional[int] = output.audios.shape
assert audio_shape == (1, 256)
_A : Tuple = audioldm_pipe.vocoder.config
config.model_in_dim *= 2
_A : Dict = SpeechTaHifiGan(_a ).to(_a )
_A : Tuple = audioldm_pipe(_a , num_inference_steps=1 )
_A : Tuple = output.audios.shape
# waveform shape is unchanged, we just have 2x the number of mel channels in the spectrogram
assert audio_shape == (1, 256)
def a__ ( self ):
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=_a )
def a__ ( self ):
self._test_inference_batch_single_identical(test_mean_pixel_difference=_a )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def a__ ( self ):
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=_a )
@slow
class lowercase ( unittest.TestCase ):
def a__ ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a__ ( self , _a , _a="cpu" , _a=torch.floataa , _a=0 ):
_A : str = torch.Generator(device=_a ).manual_seed(_a )
_A : Any = np.random.RandomState(_a ).standard_normal((1, 8, 128, 16) )
_A : Dict = torch.from_numpy(_a ).to(device=_a , dtype=_a )
_A : List[str] = {
"""prompt""": """A hammer hitting a wooden surface""",
"""latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 2.5,
}
return inputs
def a__ ( self ):
_A : List[Any] = AudioLDMPipeline.from_pretrained("""cvssp/audioldm""" )
_A : Tuple = audioldm_pipe.to(_a )
audioldm_pipe.set_progress_bar_config(disable=_a )
_A : Union[str, Any] = self.get_inputs(_a )
_A : Dict = 25
_A : Union[str, Any] = audioldm_pipe(**_a ).audios[0]
assert audio.ndim == 1
assert len(_a ) == 8_1920
_A : int = audio[7_7230:7_7240]
_A : Dict = np.array(
[-0.4884, -0.4607, 0.0023, 0.5007, 0.5896, 0.5151, 0.3813, -0.0208, -0.3687, -0.4315] )
_A : Union[str, Any] = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 1e-2
def a__ ( self ):
_A : Dict = AudioLDMPipeline.from_pretrained("""cvssp/audioldm""" )
_A : Union[str, Any] = LMSDiscreteScheduler.from_config(audioldm_pipe.scheduler.config )
_A : List[Any] = audioldm_pipe.to(_a )
audioldm_pipe.set_progress_bar_config(disable=_a )
_A : Optional[Any] = self.get_inputs(_a )
_A : Optional[int] = audioldm_pipe(**_a ).audios[0]
assert audio.ndim == 1
assert len(_a ) == 8_1920
_A : Any = audio[2_7780:2_7790]
_A : int = np.array([-0.2131, -0.0873, -0.0124, -0.0189, 0.0569, 0.1373, 0.1883, 0.2886, 0.3297, 0.2212] )
_A : Optional[Any] = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 3e-2
| 352 |
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowercase ( UpperCamelCase__ ):
_a = ["image_processor", "tokenizer"]
_a = "BlipImageProcessor"
_a = ("BertTokenizer", "BertTokenizerFast")
def __init__( self , _a , _a ) -> Any:
_A : List[Any] = False
super().__init__(_a , _a )
_A : Optional[int] = self.image_processor
def __call__( self , _a = None , _a = None , _a = True , _a = False , _a = None , _a = None , _a = 0 , _a = None , _a = None , _a = False , _a = False , _a = False , _a = False , _a = False , _a = True , _a = None , **_a , ) -> BatchEncoding:
if images is None and text is None:
raise ValueError("""You have to specify either images or text.""" )
# Get only text
if images is None:
_A : Dict = self.tokenizer
_A : Dict = self.tokenizer(
text=_a , add_special_tokens=_a , padding=_a , truncation=_a , max_length=_a , stride=_a , pad_to_multiple_of=_a , return_attention_mask=_a , return_overflowing_tokens=_a , return_special_tokens_mask=_a , return_offsets_mapping=_a , return_token_type_ids=_a , return_length=_a , verbose=_a , return_tensors=_a , **_a , )
return text_encoding
# add pixel_values
_A : int = self.image_processor(_a , return_tensors=_a )
if text is not None:
_A : List[Any] = self.tokenizer(
text=_a , add_special_tokens=_a , padding=_a , truncation=_a , max_length=_a , stride=_a , pad_to_multiple_of=_a , return_attention_mask=_a , return_overflowing_tokens=_a , return_special_tokens_mask=_a , return_offsets_mapping=_a , return_token_type_ids=_a , return_length=_a , verbose=_a , return_tensors=_a , **_a , )
else:
_A : int = None
if text_encoding is not None:
encoding_image_processor.update(_a )
return encoding_image_processor
def a__ ( self , *_a , **_a ) -> Any:
return self.tokenizer.batch_decode(*_a , **_a )
def a__ ( self , *_a , **_a ) -> List[str]:
return self.tokenizer.decode(*_a , **_a )
@property
def a__ ( self ) -> Optional[Any]:
_A : Any = self.tokenizer.model_input_names
_A : List[Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 343 | 0 |
import inspect
import unittest
from transformers import ConvNextConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextBackbone, ConvNextForImageClassification, ConvNextModel
from transformers.models.convnext.modeling_convnext import CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowercase :
def __init__( self , _a , _a=13 , _a=32 , _a=3 , _a=4 , _a=[10, 20, 30, 40] , _a=[2, 2, 3, 2] , _a=True , _a=True , _a=37 , _a="gelu" , _a=10 , _a=0.02 , _a=["stage2", "stage3", "stage4"] , _a=[2, 3, 4] , _a=None , ) -> List[Any]:
_A : Tuple = parent
_A : Any = batch_size
_A : int = image_size
_A : Tuple = num_channels
_A : List[Any] = num_stages
_A : Any = hidden_sizes
_A : Union[str, Any] = depths
_A : Union[str, Any] = is_training
_A : Tuple = use_labels
_A : Optional[Any] = intermediate_size
_A : Union[str, Any] = hidden_act
_A : Any = num_labels
_A : List[str] = initializer_range
_A : str = out_features
_A : int = out_indices
_A : List[Any] = scope
def a__ ( self ) -> str:
_A : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_A : str = None
if self.use_labels:
_A : int = ids_tensor([self.batch_size] , self.num_labels )
_A : str = self.get_config()
return config, pixel_values, labels
def a__ ( self ) -> List[str]:
return ConvNextConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=_a , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def a__ ( self , _a , _a , _a ) -> int:
_A : int = ConvNextModel(config=_a )
model.to(_a )
model.eval()
_A : int = model(_a )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def a__ ( self , _a , _a , _a ) -> List[Any]:
_A : Union[str, Any] = ConvNextForImageClassification(_a )
model.to(_a )
model.eval()
_A : List[Any] = model(_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def a__ ( self , _a , _a , _a ) -> str:
_A : List[str] = ConvNextBackbone(config=_a )
model.to(_a )
model.eval()
_A : Optional[int] = model(_a )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
_A : Optional[Any] = None
_A : str = ConvNextBackbone(config=_a )
model.to(_a )
model.eval()
_A : int = model(_a )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def a__ ( self ) -> int:
_A : int = self.prepare_config_and_inputs()
_A : List[Any] = config_and_inputs
_A : Any = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class lowercase ( UpperCamelCase__,UpperCamelCase__,unittest.TestCase ):
_a = (
(
ConvNextModel,
ConvNextForImageClassification,
ConvNextBackbone,
)
if is_torch_available()
else ()
)
_a = (
{"feature-extraction": ConvNextModel, "image-classification": ConvNextForImageClassification}
if is_torch_available()
else {}
)
_a = True
_a = False
_a = False
_a = False
_a = False
def a__ ( self ) -> Dict:
_A : int = ConvNextModelTester(self )
_A : List[Any] = ConfigTester(self , config_class=_a , has_text_modality=_a , hidden_size=37 )
def a__ ( self ) -> Any:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def a__ ( self ) -> str:
return
@unittest.skip(reason="""ConvNext does not use inputs_embeds""" )
def a__ ( self ) -> Tuple:
pass
@unittest.skip(reason="""ConvNext does not support input and output embeddings""" )
def a__ ( self ) -> Optional[Any]:
pass
@unittest.skip(reason="""ConvNext does not use feedforward chunking""" )
def a__ ( self ) -> List[Any]:
pass
def a__ ( self ) -> Optional[Any]:
_A : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A : Optional[Any] = model_class(_a )
_A : List[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_A : List[Any] = [*signature.parameters.keys()]
_A : int = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _a )
def a__ ( self ) -> Union[str, Any]:
_A : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def a__ ( self ) -> Tuple:
_A : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*_a )
def a__ ( self ) -> Tuple:
def check_hidden_states_output(_a , _a , _a ):
_A : Tuple = model_class(_a )
model.to(_a )
model.eval()
with torch.no_grad():
_A : Dict = model(**self._prepare_for_class(_a , _a ) )
_A : Optional[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_A : Dict = self.model_tester.num_stages
self.assertEqual(len(_a ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
_A : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A : List[Any] = True
check_hidden_states_output(_a , _a , _a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_A : Union[str, Any] = True
check_hidden_states_output(_a , _a , _a )
def a__ ( self ) -> int:
_A : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_a )
@slow
def a__ ( self ) -> Optional[int]:
for model_name in CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_A : Optional[Any] = ConvNextModel.from_pretrained(_a )
self.assertIsNotNone(_a )
def lowerCAmelCase_ ( ):
_A : Optional[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class lowercase ( unittest.TestCase ):
@cached_property
def a__ ( self ) -> str:
return AutoImageProcessor.from_pretrained("""facebook/convnext-tiny-224""" ) if is_vision_available() else None
@slow
def a__ ( self ) -> Optional[Any]:
_A : Any = ConvNextForImageClassification.from_pretrained("""facebook/convnext-tiny-224""" ).to(_a )
_A : List[str] = self.default_image_processor
_A : int = prepare_img()
_A : Union[str, Any] = image_processor(images=_a , return_tensors="""pt""" ).to(_a )
# forward pass
with torch.no_grad():
_A : Dict = model(**_a )
# verify the logits
_A : Optional[Any] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _a )
_A : Any = torch.tensor([-0.0260, -0.4739, 0.1911] ).to(_a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _a , atol=1e-4 ) )
@require_torch
class lowercase ( unittest.TestCase,UpperCamelCase__ ):
_a = (ConvNextBackbone,) if is_torch_available() else ()
_a = ConvNextConfig
_a = False
def a__ ( self ) -> List[str]:
_A : Optional[int] = ConvNextModelTester(self )
| 353 |
from random import randint
from tempfile import TemporaryFile
import numpy as np
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ):
_A : Tuple = 0
if start < end:
_A : Tuple = randint(snake_case_,snake_case_ )
_A : Any = a[end]
_A : int = a[pivot]
_A : int = temp
_A , _A : List[Any] = _in_place_partition(snake_case_,snake_case_,snake_case_ )
count += _in_place_quick_sort(snake_case_,snake_case_,p - 1 )
count += _in_place_quick_sort(snake_case_,p + 1,snake_case_ )
return count
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ):
_A : str = 0
_A : List[str] = randint(snake_case_,snake_case_ )
_A : Union[str, Any] = a[end]
_A : List[str] = a[pivot]
_A : List[Any] = temp
_A : List[str] = start - 1
for index in range(snake_case_,snake_case_ ):
count += 1
if a[index] < a[end]: # check if current val is less than pivot value
_A : Union[str, Any] = new_pivot_index + 1
_A : List[Any] = a[new_pivot_index]
_A : Optional[int] = a[index]
_A : List[Any] = temp
_A : Optional[Any] = a[new_pivot_index + 1]
_A : Any = a[end]
_A : Dict = temp
return new_pivot_index + 1, count
_snake_case = TemporaryFile()
_snake_case = 100 # 1000 elements are to be sorted
_snake_case , _snake_case = 0, 1 # mean and standard deviation
_snake_case = np.random.normal(mu, sigma, p)
np.save(outfile, X)
print("The array is")
print(X)
outfile.seek(0) # using the same array
_snake_case = np.load(outfile)
_snake_case = len(M) - 1
_snake_case = _in_place_quick_sort(M, 0, r)
print(
"No of Comparisons for 100 elements selected from a standard normal distribution"
"is :"
)
print(z)
| 343 | 0 |
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
_snake_case = logging.get_logger(__name__)
_snake_case = OrderedDict(
[
# Base model mapping
("albert", "FlaxAlbertModel"),
("bart", "FlaxBartModel"),
("beit", "FlaxBeitModel"),
("bert", "FlaxBertModel"),
("big_bird", "FlaxBigBirdModel"),
("blenderbot", "FlaxBlenderbotModel"),
("blenderbot-small", "FlaxBlenderbotSmallModel"),
("clip", "FlaxCLIPModel"),
("distilbert", "FlaxDistilBertModel"),
("electra", "FlaxElectraModel"),
("gpt-sw3", "FlaxGPT2Model"),
("gpt2", "FlaxGPT2Model"),
("gpt_neo", "FlaxGPTNeoModel"),
("gptj", "FlaxGPTJModel"),
("longt5", "FlaxLongT5Model"),
("marian", "FlaxMarianModel"),
("mbart", "FlaxMBartModel"),
("mt5", "FlaxMT5Model"),
("opt", "FlaxOPTModel"),
("pegasus", "FlaxPegasusModel"),
("regnet", "FlaxRegNetModel"),
("resnet", "FlaxResNetModel"),
("roberta", "FlaxRobertaModel"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormModel"),
("roformer", "FlaxRoFormerModel"),
("t5", "FlaxT5Model"),
("vision-text-dual-encoder", "FlaxVisionTextDualEncoderModel"),
("vit", "FlaxViTModel"),
("wav2vec2", "FlaxWav2Vec2Model"),
("whisper", "FlaxWhisperModel"),
("xglm", "FlaxXGLMModel"),
("xlm-roberta", "FlaxXLMRobertaModel"),
]
)
_snake_case = OrderedDict(
[
# Model for pre-training mapping
("albert", "FlaxAlbertForPreTraining"),
("bart", "FlaxBartForConditionalGeneration"),
("bert", "FlaxBertForPreTraining"),
("big_bird", "FlaxBigBirdForPreTraining"),
("electra", "FlaxElectraForPreTraining"),
("longt5", "FlaxLongT5ForConditionalGeneration"),
("mbart", "FlaxMBartForConditionalGeneration"),
("mt5", "FlaxMT5ForConditionalGeneration"),
("roberta", "FlaxRobertaForMaskedLM"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMaskedLM"),
("roformer", "FlaxRoFormerForMaskedLM"),
("t5", "FlaxT5ForConditionalGeneration"),
("wav2vec2", "FlaxWav2Vec2ForPreTraining"),
("whisper", "FlaxWhisperForConditionalGeneration"),
("xlm-roberta", "FlaxXLMRobertaForMaskedLM"),
]
)
_snake_case = OrderedDict(
[
# Model for Masked LM mapping
("albert", "FlaxAlbertForMaskedLM"),
("bart", "FlaxBartForConditionalGeneration"),
("bert", "FlaxBertForMaskedLM"),
("big_bird", "FlaxBigBirdForMaskedLM"),
("distilbert", "FlaxDistilBertForMaskedLM"),
("electra", "FlaxElectraForMaskedLM"),
("mbart", "FlaxMBartForConditionalGeneration"),
("roberta", "FlaxRobertaForMaskedLM"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMaskedLM"),
("roformer", "FlaxRoFormerForMaskedLM"),
("xlm-roberta", "FlaxXLMRobertaForMaskedLM"),
]
)
_snake_case = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
("bart", "FlaxBartForConditionalGeneration"),
("blenderbot", "FlaxBlenderbotForConditionalGeneration"),
("blenderbot-small", "FlaxBlenderbotSmallForConditionalGeneration"),
("encoder-decoder", "FlaxEncoderDecoderModel"),
("longt5", "FlaxLongT5ForConditionalGeneration"),
("marian", "FlaxMarianMTModel"),
("mbart", "FlaxMBartForConditionalGeneration"),
("mt5", "FlaxMT5ForConditionalGeneration"),
("pegasus", "FlaxPegasusForConditionalGeneration"),
("t5", "FlaxT5ForConditionalGeneration"),
]
)
_snake_case = OrderedDict(
[
# Model for Image-classsification
("beit", "FlaxBeitForImageClassification"),
("regnet", "FlaxRegNetForImageClassification"),
("resnet", "FlaxResNetForImageClassification"),
("vit", "FlaxViTForImageClassification"),
]
)
_snake_case = OrderedDict(
[
("vision-encoder-decoder", "FlaxVisionEncoderDecoderModel"),
]
)
_snake_case = OrderedDict(
[
# Model for Causal LM mapping
("bart", "FlaxBartForCausalLM"),
("bert", "FlaxBertForCausalLM"),
("big_bird", "FlaxBigBirdForCausalLM"),
("electra", "FlaxElectraForCausalLM"),
("gpt-sw3", "FlaxGPT2LMHeadModel"),
("gpt2", "FlaxGPT2LMHeadModel"),
("gpt_neo", "FlaxGPTNeoForCausalLM"),
("gptj", "FlaxGPTJForCausalLM"),
("opt", "FlaxOPTForCausalLM"),
("roberta", "FlaxRobertaForCausalLM"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForCausalLM"),
("xglm", "FlaxXGLMForCausalLM"),
("xlm-roberta", "FlaxXLMRobertaForCausalLM"),
]
)
_snake_case = OrderedDict(
[
# Model for Sequence Classification mapping
("albert", "FlaxAlbertForSequenceClassification"),
("bart", "FlaxBartForSequenceClassification"),
("bert", "FlaxBertForSequenceClassification"),
("big_bird", "FlaxBigBirdForSequenceClassification"),
("distilbert", "FlaxDistilBertForSequenceClassification"),
("electra", "FlaxElectraForSequenceClassification"),
("mbart", "FlaxMBartForSequenceClassification"),
("roberta", "FlaxRobertaForSequenceClassification"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForSequenceClassification"),
("roformer", "FlaxRoFormerForSequenceClassification"),
("xlm-roberta", "FlaxXLMRobertaForSequenceClassification"),
]
)
_snake_case = OrderedDict(
[
# Model for Question Answering mapping
("albert", "FlaxAlbertForQuestionAnswering"),
("bart", "FlaxBartForQuestionAnswering"),
("bert", "FlaxBertForQuestionAnswering"),
("big_bird", "FlaxBigBirdForQuestionAnswering"),
("distilbert", "FlaxDistilBertForQuestionAnswering"),
("electra", "FlaxElectraForQuestionAnswering"),
("mbart", "FlaxMBartForQuestionAnswering"),
("roberta", "FlaxRobertaForQuestionAnswering"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForQuestionAnswering"),
("roformer", "FlaxRoFormerForQuestionAnswering"),
("xlm-roberta", "FlaxXLMRobertaForQuestionAnswering"),
]
)
_snake_case = OrderedDict(
[
# Model for Token Classification mapping
("albert", "FlaxAlbertForTokenClassification"),
("bert", "FlaxBertForTokenClassification"),
("big_bird", "FlaxBigBirdForTokenClassification"),
("distilbert", "FlaxDistilBertForTokenClassification"),
("electra", "FlaxElectraForTokenClassification"),
("roberta", "FlaxRobertaForTokenClassification"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForTokenClassification"),
("roformer", "FlaxRoFormerForTokenClassification"),
("xlm-roberta", "FlaxXLMRobertaForTokenClassification"),
]
)
_snake_case = OrderedDict(
[
# Model for Multiple Choice mapping
("albert", "FlaxAlbertForMultipleChoice"),
("bert", "FlaxBertForMultipleChoice"),
("big_bird", "FlaxBigBirdForMultipleChoice"),
("distilbert", "FlaxDistilBertForMultipleChoice"),
("electra", "FlaxElectraForMultipleChoice"),
("roberta", "FlaxRobertaForMultipleChoice"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMultipleChoice"),
("roformer", "FlaxRoFormerForMultipleChoice"),
("xlm-roberta", "FlaxXLMRobertaForMultipleChoice"),
]
)
_snake_case = OrderedDict(
[
("bert", "FlaxBertForNextSentencePrediction"),
]
)
_snake_case = OrderedDict(
[
("speech-encoder-decoder", "FlaxSpeechEncoderDecoderModel"),
("whisper", "FlaxWhisperForConditionalGeneration"),
]
)
_snake_case = OrderedDict(
[
("whisper", "FlaxWhisperForAudioClassification"),
]
)
_snake_case = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
_snake_case = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
_snake_case = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
_snake_case = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
_snake_case = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
_snake_case = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
_snake_case = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
_snake_case = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
_snake_case = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
_snake_case = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
_snake_case = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
_snake_case = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
_snake_case = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
_snake_case = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class lowercase ( _BaseAutoModelClass ):
_a = FLAX_MODEL_MAPPING
_snake_case = auto_class_update(FlaxAutoModel)
class lowercase ( _BaseAutoModelClass ):
_a = FLAX_MODEL_FOR_PRETRAINING_MAPPING
_snake_case = auto_class_update(FlaxAutoModelForPreTraining, head_doc="pretraining")
class lowercase ( _BaseAutoModelClass ):
_a = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
_snake_case = auto_class_update(FlaxAutoModelForCausalLM, head_doc="causal language modeling")
class lowercase ( _BaseAutoModelClass ):
_a = FLAX_MODEL_FOR_MASKED_LM_MAPPING
_snake_case = auto_class_update(FlaxAutoModelForMaskedLM, head_doc="masked language modeling")
class lowercase ( _BaseAutoModelClass ):
_a = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
_snake_case = auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc="sequence-to-sequence language modeling", checkpoint_for_example="t5-base"
)
class lowercase ( _BaseAutoModelClass ):
_a = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
_snake_case = auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc="sequence classification"
)
class lowercase ( _BaseAutoModelClass ):
_a = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
_snake_case = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc="question answering")
class lowercase ( _BaseAutoModelClass ):
_a = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
_snake_case = auto_class_update(
FlaxAutoModelForTokenClassification, head_doc="token classification"
)
class lowercase ( _BaseAutoModelClass ):
_a = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
_snake_case = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc="multiple choice")
class lowercase ( _BaseAutoModelClass ):
_a = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
_snake_case = auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc="next sentence prediction"
)
class lowercase ( _BaseAutoModelClass ):
_a = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
_snake_case = auto_class_update(
FlaxAutoModelForImageClassification, head_doc="image classification"
)
class lowercase ( _BaseAutoModelClass ):
_a = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
_snake_case = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc="vision-to-text modeling")
class lowercase ( _BaseAutoModelClass ):
_a = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
_snake_case = auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc="sequence-to-sequence speech-to-text modeling"
)
| 354 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"MIT/ast-finetuned-audioset-10-10-0.4593": (
"https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json"
),
}
class lowercase ( UpperCamelCase__ ):
_a = "audio-spectrogram-transformer"
def __init__( self , _a=768 , _a=12 , _a=12 , _a=3072 , _a="gelu" , _a=0.0 , _a=0.0 , _a=0.02 , _a=1e-12 , _a=16 , _a=True , _a=10 , _a=10 , _a=1024 , _a=128 , **_a , ) -> List[Any]:
super().__init__(**_a )
_A : Any = hidden_size
_A : Tuple = num_hidden_layers
_A : List[str] = num_attention_heads
_A : Any = intermediate_size
_A : Optional[Any] = hidden_act
_A : Optional[Any] = hidden_dropout_prob
_A : Any = attention_probs_dropout_prob
_A : Optional[Any] = initializer_range
_A : Optional[Any] = layer_norm_eps
_A : str = patch_size
_A : Tuple = qkv_bias
_A : Dict = frequency_stride
_A : Union[str, Any] = time_stride
_A : Any = max_length
_A : Tuple = num_mel_bins
| 343 | 0 |
import argparse
import json
from typing import List
from ltp import LTP
from transformers import BertTokenizer
def lowerCAmelCase_ ( snake_case_ ):
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0X4_E00 and cp <= 0X9_FFF)
or (cp >= 0X3_400 and cp <= 0X4_DBF) #
or (cp >= 0X20_000 and cp <= 0X2A_6DF) #
or (cp >= 0X2A_700 and cp <= 0X2B_73F) #
or (cp >= 0X2B_740 and cp <= 0X2B_81F) #
or (cp >= 0X2B_820 and cp <= 0X2C_EAF) #
or (cp >= 0XF_900 and cp <= 0XF_AFF)
or (cp >= 0X2F_800 and cp <= 0X2F_A1F) #
): #
return True
return False
def lowerCAmelCase_ ( snake_case_ ):
# word like '180' or '身高' or '神'
for char in word:
_A : List[str] = ord(snake_case_ )
if not _is_chinese_char(snake_case_ ):
return 0
return 1
def lowerCAmelCase_ ( snake_case_ ):
_A : Tuple = set()
for token in tokens:
_A : int = len(snake_case_ ) > 1 and is_chinese(snake_case_ )
if chinese_word:
word_set.add(snake_case_ )
_A : int = list(snake_case_ )
return word_list
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
if not chinese_word_set:
return bert_tokens
_A : Tuple = max([len(snake_case_ ) for w in chinese_word_set] )
_A : int = bert_tokens
_A : List[str] = 0, len(snake_case_ )
while start < end:
_A : int = True
if is_chinese(bert_word[start] ):
_A : List[Any] = min(end - start,snake_case_ )
for i in range(snake_case_,1,-1 ):
_A : Any = """""".join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1,start + i ):
_A : List[Any] = """##""" + bert_word[j]
_A : Any = start + i
_A : int = False
break
if single_word:
start += 1
return bert_word
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ):
_A : Optional[Any] = []
for i in range(0,len(snake_case_ ),100 ):
_A : Union[str, Any] = ltp_tokenizer.seg(lines[i : i + 100] )[0]
_A : Optional[int] = [get_chinese_word(snake_case_ ) for r in res]
ltp_res.extend(snake_case_ )
assert len(snake_case_ ) == len(snake_case_ )
_A : Tuple = []
for i in range(0,len(snake_case_ ),100 ):
_A : Optional[Any] = bert_tokenizer(lines[i : i + 100],add_special_tokens=snake_case_,truncation=snake_case_,max_length=512 )
bert_res.extend(res["""input_ids"""] )
assert len(snake_case_ ) == len(snake_case_ )
_A : Optional[int] = []
for input_ids, chinese_word in zip(snake_case_,snake_case_ ):
_A : int = []
for id in input_ids:
_A : Any = bert_tokenizer._convert_id_to_token(snake_case_ )
input_tokens.append(snake_case_ )
_A : Optional[Any] = add_sub_symbol(snake_case_,snake_case_ )
_A : List[Any] = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(snake_case_ ):
if token[:2] == "##":
_A : Dict = token[2:]
# save chinese tokens' pos
if len(snake_case_ ) == 1 and _is_chinese_char(ord(snake_case_ ) ):
ref_id.append(snake_case_ )
ref_ids.append(snake_case_ )
assert len(snake_case_ ) == len(snake_case_ )
return ref_ids
def lowerCAmelCase_ ( snake_case_ ):
# For Chinese (Ro)Bert, the best result is from : RoBERTa-wwm-ext (https://github.com/ymcui/Chinese-BERT-wwm)
# If we want to fine-tune these model, we have to use same tokenizer : LTP (https://github.com/HIT-SCIR/ltp)
with open(args.file_name,"""r""",encoding="""utf-8""" ) as f:
_A : str = f.readlines()
_A : Optional[int] = [line.strip() for line in data if len(snake_case_ ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
_A : Union[str, Any] = LTP(args.ltp ) # faster in GPU device
_A : Optional[Any] = BertTokenizer.from_pretrained(args.bert )
_A : Tuple = prepare_ref(snake_case_,snake_case_,snake_case_ )
with open(args.save_path,"""w""",encoding="""utf-8""" ) as f:
_A : Tuple = [json.dumps(snake_case_ ) + """\n""" for ref in ref_ids]
f.writelines(snake_case_ )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser(description="prepare_chinese_ref")
parser.add_argument(
"--file_name",
type=str,
default="./resources/chinese-demo.txt",
help="file need process, same as training data in lm",
)
parser.add_argument(
"--ltp", type=str, default="./resources/ltp", help="resources for LTP tokenizer, usually a path"
)
parser.add_argument("--bert", type=str, default="./resources/robert", help="resources for Bert tokenizer")
parser.add_argument("--save_path", type=str, default="./resources/ref.txt", help="path to save res")
_snake_case = parser.parse_args()
main(args)
| 355 |
import argparse
import logging
import sys
from unittest.mock import patch
import run_glue_deebert
from transformers.testing_utils import TestCasePlus, get_gpu_count, require_torch_non_multi_gpu, slow
logging.basicConfig(level=logging.DEBUG)
_snake_case = logging.getLogger()
def lowerCAmelCase_ ( ):
_A : Optional[Any] = argparse.ArgumentParser()
parser.add_argument("""-f""" )
_A : Optional[Any] = parser.parse_args()
return args.f
class lowercase ( UpperCamelCase__ ):
def a__ ( self ) -> None:
_A : List[Any] = logging.StreamHandler(sys.stdout )
logger.addHandler(_a )
def a__ ( self , _a ) -> Dict:
_A : Tuple = get_gpu_count()
if n_gpu > 1:
pass
# XXX: doesn't quite work with n_gpu > 1 https://github.com/huggingface/transformers/issues/10560
# script = f"{self.examples_dir_str}/research_projects/deebert/run_glue_deebert.py"
# distributed_args = f"-m torch.distributed.launch --nproc_per_node={n_gpu} {script}".split()
# cmd = [sys.executable] + distributed_args + args
# execute_subprocess_async(cmd, env=self.get_env())
# XXX: test the results - need to save them first into .json file
else:
args.insert(0 , """run_glue_deebert.py""" )
with patch.object(_a , """argv""" , _a ):
_A : Optional[Any] = run_glue_deebert.main()
for value in result.values():
self.assertGreaterEqual(_a , 0.666 )
@slow
@require_torch_non_multi_gpu
def a__ ( self ) -> Optional[int]:
_A : Tuple = """
--model_type roberta
--model_name_or_path roberta-base
--task_name MRPC
--do_train
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--max_seq_length 128
--per_gpu_eval_batch_size=1
--per_gpu_train_batch_size=8
--learning_rate 2e-4
--num_train_epochs 3
--overwrite_output_dir
--seed 42
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--save_steps 0
--overwrite_cache
--eval_after_first_stage
""".split()
self.run_and_check(_a )
_A : Optional[Any] = """
--model_type roberta
--model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--task_name MRPC
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--max_seq_length 128
--eval_each_highway
--eval_highway
--overwrite_cache
--per_gpu_eval_batch_size=1
""".split()
self.run_and_check(_a )
_A : List[str] = """
--model_type roberta
--model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--task_name MRPC
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--max_seq_length 128
--early_exit_entropy 0.1
--eval_highway
--overwrite_cache
--per_gpu_eval_batch_size=1
""".split()
self.run_and_check(_a )
| 343 | 0 |
import unittest
from accelerate import debug_launcher
from accelerate.test_utils import require_cpu, test_ops, test_script
@require_cpu
class lowercase ( unittest.TestCase ):
def a__ ( self ) -> List[str]:
debug_launcher(test_script.main )
def a__ ( self ) -> Any:
debug_launcher(test_ops.main )
| 356 |
import inspect
import unittest
from transformers import ViTMSNConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMSNForImageClassification, ViTMSNModel
from transformers.models.vit_msn.modeling_vit_msn import VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class lowercase :
def __init__( self , _a , _a=13 , _a=30 , _a=2 , _a=3 , _a=True , _a=True , _a=32 , _a=5 , _a=4 , _a=37 , _a="gelu" , _a=0.1 , _a=0.1 , _a=10 , _a=0.02 , _a=None , ) -> Union[str, Any]:
_A : Optional[int] = parent
_A : Dict = batch_size
_A : Any = image_size
_A : Optional[int] = patch_size
_A : Optional[int] = num_channels
_A : List[Any] = is_training
_A : Optional[Any] = use_labels
_A : Any = hidden_size
_A : Any = num_hidden_layers
_A : List[Any] = num_attention_heads
_A : int = intermediate_size
_A : Dict = hidden_act
_A : Optional[int] = hidden_dropout_prob
_A : str = attention_probs_dropout_prob
_A : Any = type_sequence_label_size
_A : str = initializer_range
_A : Tuple = scope
# in ViT MSN, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
_A : List[Any] = (image_size // patch_size) ** 2
_A : str = num_patches + 1
def a__ ( self ) -> Dict:
_A : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_A : List[str] = None
if self.use_labels:
_A : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_A : List[Any] = self.get_config()
return config, pixel_values, labels
def a__ ( self ) -> Union[str, Any]:
return ViTMSNConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def a__ ( self , _a , _a , _a ) -> Dict:
_A : List[str] = ViTMSNModel(config=_a )
model.to(_a )
model.eval()
_A : List[str] = model(_a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a__ ( self , _a , _a , _a ) -> List[str]:
_A : Union[str, Any] = self.type_sequence_label_size
_A : Tuple = ViTMSNForImageClassification(_a )
model.to(_a )
model.eval()
_A : Optional[int] = model(_a , labels=_a )
print("""Pixel and labels shape: {pixel_values.shape}, {labels.shape}""" )
print("""Labels: {labels}""" )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_A : Dict = 1
_A : str = ViTMSNForImageClassification(_a )
model.to(_a )
model.eval()
_A : int = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_A : int = model(_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def a__ ( self ) -> Any:
_A : Optional[int] = self.prepare_config_and_inputs()
_A , _A , _A : Dict = config_and_inputs
_A : List[Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class lowercase ( UpperCamelCase__,UpperCamelCase__,unittest.TestCase ):
_a = (ViTMSNModel, ViTMSNForImageClassification) if is_torch_available() else ()
_a = (
{"feature-extraction": ViTMSNModel, "image-classification": ViTMSNForImageClassification}
if is_torch_available()
else {}
)
_a = False
_a = False
_a = False
_a = False
def a__ ( self ) -> Tuple:
_A : Tuple = ViTMSNModelTester(self )
_A : List[Any] = ConfigTester(self , config_class=_a , has_text_modality=_a , hidden_size=37 )
def a__ ( self ) -> Optional[int]:
self.config_tester.run_common_tests()
@unittest.skip(reason="""ViTMSN does not use inputs_embeds""" )
def a__ ( self ) -> int:
pass
def a__ ( self ) -> Any:
_A , _A : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A : Tuple = model_class(_a )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_A : str = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_a , nn.Linear ) )
def a__ ( self ) -> str:
_A , _A : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A : int = model_class(_a )
_A : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_A : str = [*signature.parameters.keys()]
_A : Optional[int] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _a )
def a__ ( self ) -> List[Any]:
_A : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def a__ ( self ) -> Any:
_A : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_a )
@slow
def a__ ( self ) -> int:
for model_name in VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_A : int = ViTMSNModel.from_pretrained(_a )
self.assertIsNotNone(_a )
def lowerCAmelCase_ ( ):
_A : Dict = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class lowercase ( unittest.TestCase ):
@cached_property
def a__ ( self ) -> int:
return ViTImageProcessor.from_pretrained("""facebook/vit-msn-small""" ) if is_vision_available() else None
@slow
def a__ ( self ) -> Optional[int]:
torch.manual_seed(2 )
_A : Tuple = ViTMSNForImageClassification.from_pretrained("""facebook/vit-msn-small""" ).to(_a )
_A : Tuple = self.default_image_processor
_A : Dict = prepare_img()
_A : Optional[Any] = image_processor(images=_a , return_tensors="""pt""" ).to(_a )
# forward pass
with torch.no_grad():
_A : int = model(**_a )
# verify the logits
_A : Union[str, Any] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _a )
_A : Optional[int] = torch.tensor([-0.0803, -0.4454, -0.2375] ).to(_a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _a , atol=1e-4 ) )
| 343 | 0 |
from __future__ import annotations
from collections.abc import Callable
_snake_case = list[list[float | int]]
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_A : int = len(snake_case_ )
_A : Matrix = [[0 for _ in range(size + 1 )] for _ in range(snake_case_ )]
_A : int
_A : int
_A : int
_A : int
_A : int
_A : float
for row in range(snake_case_ ):
for col in range(snake_case_ ):
_A : Dict = matrix[row][col]
_A : List[Any] = vector[row][0]
_A : List[Any] = 0
_A : Optional[Any] = 0
while row < size and col < size:
# pivoting
_A : Any = max((abs(augmented[rowa][col] ), rowa) for rowa in range(snake_case_,snake_case_ ) )[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
_A : Optional[Any] = augmented[pivot_row], augmented[row]
for rowa in range(row + 1,snake_case_ ):
_A : str = augmented[rowa][col] / augmented[row][col]
_A : List[Any] = 0
for cola in range(col + 1,size + 1 ):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1,snake_case_ ):
for row in range(snake_case_ ):
_A : int = augmented[row][col] / augmented[col][col]
for cola in range(snake_case_,size + 1 ):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row],10 )] for row in range(snake_case_ )
]
def lowerCAmelCase_ ( snake_case_ ):
_A : int = len(snake_case_ )
_A : Matrix = [[0 for _ in range(snake_case_ )] for _ in range(snake_case_ )]
_A : Matrix = [[0] for _ in range(snake_case_ )]
_A : Matrix
_A : int
_A : int
_A : int
for x_val, y_val in enumerate(snake_case_ ):
for col in range(snake_case_ ):
_A : str = (x_val + 1) ** (size - col - 1)
_A : List[str] = y_val
_A : Any = solve(snake_case_,snake_case_ )
def interpolated_func(snake_case_ ) -> int:
return sum(
round(coeffs[x_val][0] ) * (var ** (size - x_val - 1))
for x_val in range(snake_case_ ) )
return interpolated_func
def lowerCAmelCase_ ( snake_case_ ):
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**10
)
def lowerCAmelCase_ ( snake_case_ = question_function,snake_case_ = 10 ):
_A : list[int] = [func(snake_case_ ) for x_val in range(1,order + 1 )]
_A : list[Callable[[int], int]] = [
interpolate(data_points[:max_coeff] ) for max_coeff in range(1,order + 1 )
]
_A : int = 0
_A : Callable[[int], int]
_A : int
for poly in polynomials:
_A : Optional[int] = 1
while func(snake_case_ ) == poly(snake_case_ ):
x_val += 1
ret += poly(snake_case_ )
return ret
if __name__ == "__main__":
print(f"""{solution() = }""")
| 357 |
def lowerCAmelCase_ ( snake_case_ = 1000 ):
_A : List[Any] = 3
_A : Tuple = 0
while a < n:
if a % 3 == 0 or a % 5 == 0:
result += a
elif a % 15 == 0:
result -= a
a += 1
return result
if __name__ == "__main__":
print(f"""{solution() = }""")
| 343 | 0 |
import argparse
import json
import math
import os
import time
import traceback
import zipfile
from collections import Counter
import requests
def lowerCAmelCase_ ( snake_case_,snake_case_=None ):
_A : Any = None
if token is not None:
_A : int = {"""Accept""": """application/vnd.github+json""", """Authorization""": f'''Bearer {token}'''}
_A : Any = f'''https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100'''
_A : Union[str, Any] = requests.get(snake_case_,headers=snake_case_ ).json()
_A : str = {}
try:
job_links.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
_A : int = math.ceil((result["""total_count"""] - 100) / 100 )
for i in range(snake_case_ ):
_A : List[str] = requests.get(url + f'''&page={i + 2}''',headers=snake_case_ ).json()
job_links.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
return job_links
except Exception:
print(f'''Unknown error, could not fetch links:\n{traceback.format_exc()}''' )
return {}
def lowerCAmelCase_ ( snake_case_,snake_case_=None ):
_A : int = None
if token is not None:
_A : List[str] = {"""Accept""": """application/vnd.github+json""", """Authorization""": f'''Bearer {token}'''}
_A : str = f'''https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100'''
_A : Optional[Any] = requests.get(snake_case_,headers=snake_case_ ).json()
_A : Any = {}
try:
artifacts.update({artifact["""name"""]: artifact["""archive_download_url"""] for artifact in result["""artifacts"""]} )
_A : Tuple = math.ceil((result["""total_count"""] - 100) / 100 )
for i in range(snake_case_ ):
_A : List[Any] = requests.get(url + f'''&page={i + 2}''',headers=snake_case_ ).json()
artifacts.update({artifact["""name"""]: artifact["""archive_download_url"""] for artifact in result["""artifacts"""]} )
return artifacts
except Exception:
print(f'''Unknown error, could not fetch links:\n{traceback.format_exc()}''' )
return {}
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_,snake_case_ ):
_A : Dict = None
if token is not None:
_A : int = {"""Accept""": """application/vnd.github+json""", """Authorization""": f'''Bearer {token}'''}
_A : Tuple = requests.get(snake_case_,headers=snake_case_,allow_redirects=snake_case_ )
_A : Tuple = result.headers["""Location"""]
_A : Union[str, Any] = requests.get(snake_case_,allow_redirects=snake_case_ )
_A : Dict = os.path.join(snake_case_,f'''{artifact_name}.zip''' )
with open(snake_case_,"""wb""" ) as fp:
fp.write(response.content )
def lowerCAmelCase_ ( snake_case_,snake_case_=None ):
_A : List[str] = []
_A : int = []
_A : Tuple = None
with zipfile.ZipFile(snake_case_ ) as z:
for filename in z.namelist():
if not os.path.isdir(snake_case_ ):
# read the file
if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]:
with z.open(snake_case_ ) as f:
for line in f:
_A : Any = line.decode("""UTF-8""" ).strip()
if filename == "failures_line.txt":
try:
# `error_line` is the place where `error` occurs
_A : Dict = line[: line.index(""": """ )]
_A : Dict = line[line.index(""": """ ) + len(""": """ ) :]
errors.append([error_line, error] )
except Exception:
# skip un-related lines
pass
elif filename == "summary_short.txt" and line.startswith("""FAILED """ ):
# `test` is the test method that failed
_A : List[str] = line[len("""FAILED """ ) :]
failed_tests.append(snake_case_ )
elif filename == "job_name.txt":
_A : Optional[int] = line
if len(snake_case_ ) != len(snake_case_ ):
raise ValueError(
f'''`errors` and `failed_tests` should have the same number of elements. Got {len(snake_case_ )} for `errors` '''
f'''and {len(snake_case_ )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some'''
""" problem.""" )
_A : Any = None
if job_name and job_links:
_A : Dict = job_links.get(snake_case_,snake_case_ )
# A list with elements of the form (line of error, error, failed test)
_A : Optional[int] = [x + [y] + [job_link] for x, y in zip(snake_case_,snake_case_ )]
return result
def lowerCAmelCase_ ( snake_case_,snake_case_=None ):
_A : Dict = []
_A : Optional[int] = [os.path.join(snake_case_,snake_case_ ) for p in os.listdir(snake_case_ ) if p.endswith(""".zip""" )]
for p in paths:
errors.extend(get_errors_from_single_artifact(snake_case_,job_links=snake_case_ ) )
return errors
def lowerCAmelCase_ ( snake_case_,snake_case_=None ):
_A : Dict = Counter()
counter.update([x[1] for x in logs] )
_A : Tuple = counter.most_common()
_A : Tuple = {}
for error, count in counts:
if error_filter is None or error not in error_filter:
_A : str = {"""count""": count, """failed_tests""": [(x[2], x[0]) for x in logs if x[1] == error]}
_A : Union[str, Any] = dict(sorted(r.items(),key=lambda snake_case_ : item[1]["count"],reverse=snake_case_ ) )
return r
def lowerCAmelCase_ ( snake_case_ ):
_A : Union[str, Any] = test.split("""::""" )[0]
if test.startswith("""tests/models/""" ):
_A : Dict = test.split("""/""" )[2]
else:
_A : str = None
return test
def lowerCAmelCase_ ( snake_case_,snake_case_=None ):
_A : str = [(x[0], x[1], get_model(x[2] )) for x in logs]
_A : Union[str, Any] = [x for x in logs if x[2] is not None]
_A : Optional[Any] = {x[2] for x in logs}
_A : List[Any] = {}
for test in tests:
_A : Any = Counter()
# count by errors in `test`
counter.update([x[1] for x in logs if x[2] == test] )
_A : Union[str, Any] = counter.most_common()
_A : Any = {error: count for error, count in counts if (error_filter is None or error not in error_filter)}
_A : str = sum(error_counts.values() )
if n_errors > 0:
_A : Optional[int] = {"""count""": n_errors, """errors""": error_counts}
_A : Union[str, Any] = dict(sorted(r.items(),key=lambda snake_case_ : item[1]["count"],reverse=snake_case_ ) )
return r
def lowerCAmelCase_ ( snake_case_ ):
_A : Optional[int] = """| no. | error | status |"""
_A : List[Any] = """|-:|:-|:-|"""
_A : List[Any] = [header, sep]
for error in reduced_by_error:
_A : List[str] = reduced_by_error[error]["""count"""]
_A : List[Any] = f'''| {count} | {error[:100]} | |'''
lines.append(snake_case_ )
return "\n".join(snake_case_ )
def lowerCAmelCase_ ( snake_case_ ):
_A : List[Any] = """| model | no. of errors | major error | count |"""
_A : Optional[Any] = """|-:|-:|-:|-:|"""
_A : Union[str, Any] = [header, sep]
for model in reduced_by_model:
_A : Dict = reduced_by_model[model]["""count"""]
_A : str = list(reduced_by_model[model]["""errors"""].items() )[0]
_A : Union[str, Any] = f'''| {model} | {count} | {error[:60]} | {_count} |'''
lines.append(snake_case_ )
return "\n".join(snake_case_ )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--workflow_run_id", type=str, required=True, help="A GitHub Actions workflow run id.")
parser.add_argument(
"--output_dir",
type=str,
required=True,
help="Where to store the downloaded artifacts and other result files.",
)
parser.add_argument("--token", default=None, type=str, help="A token that has actions:read permission.")
_snake_case = parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
_snake_case = get_job_links(args.workflow_run_id, token=args.token)
_snake_case = {}
# To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee.
# For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`.
if _job_links:
for k, v in _job_links.items():
# This is how GitHub actions combine job names.
if " / " in k:
_snake_case = k.find(" / ")
_snake_case = k[index + len(" / ") :]
_snake_case = v
with open(os.path.join(args.output_dir, "job_links.json"), "w", encoding="UTF-8") as fp:
json.dump(job_links, fp, ensure_ascii=False, indent=4)
_snake_case = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, "artifacts.json"), "w", encoding="UTF-8") as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
for idx, (name, url) in enumerate(artifacts.items()):
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
_snake_case = get_all_errors(args.output_dir, job_links=job_links)
# `e[1]` is the error
_snake_case = Counter()
counter.update([e[1] for e in errors])
# print the top 30 most common test errors
_snake_case = counter.most_common(30)
for item in most_common:
print(item)
with open(os.path.join(args.output_dir, "errors.json"), "w", encoding="UTF-8") as fp:
json.dump(errors, fp, ensure_ascii=False, indent=4)
_snake_case = reduce_by_error(errors)
_snake_case = reduce_by_model(errors)
_snake_case = make_github_table(reduced_by_error)
_snake_case = make_github_table_per_model(reduced_by_model)
with open(os.path.join(args.output_dir, "reduced_by_error.txt"), "w", encoding="UTF-8") as fp:
fp.write(sa)
with open(os.path.join(args.output_dir, "reduced_by_model.txt"), "w", encoding="UTF-8") as fp:
fp.write(sa)
| 358 |
import inspect
import unittest
from transformers import ConvNextConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextBackbone, ConvNextForImageClassification, ConvNextModel
from transformers.models.convnext.modeling_convnext import CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowercase :
def __init__( self , _a , _a=13 , _a=32 , _a=3 , _a=4 , _a=[10, 20, 30, 40] , _a=[2, 2, 3, 2] , _a=True , _a=True , _a=37 , _a="gelu" , _a=10 , _a=0.02 , _a=["stage2", "stage3", "stage4"] , _a=[2, 3, 4] , _a=None , ) -> List[Any]:
_A : Tuple = parent
_A : Any = batch_size
_A : int = image_size
_A : Tuple = num_channels
_A : List[Any] = num_stages
_A : Any = hidden_sizes
_A : Union[str, Any] = depths
_A : Union[str, Any] = is_training
_A : Tuple = use_labels
_A : Optional[Any] = intermediate_size
_A : Union[str, Any] = hidden_act
_A : Any = num_labels
_A : List[str] = initializer_range
_A : str = out_features
_A : int = out_indices
_A : List[Any] = scope
def a__ ( self ) -> str:
_A : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_A : str = None
if self.use_labels:
_A : int = ids_tensor([self.batch_size] , self.num_labels )
_A : str = self.get_config()
return config, pixel_values, labels
def a__ ( self ) -> List[str]:
return ConvNextConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=_a , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def a__ ( self , _a , _a , _a ) -> int:
_A : int = ConvNextModel(config=_a )
model.to(_a )
model.eval()
_A : int = model(_a )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def a__ ( self , _a , _a , _a ) -> List[Any]:
_A : Union[str, Any] = ConvNextForImageClassification(_a )
model.to(_a )
model.eval()
_A : List[Any] = model(_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def a__ ( self , _a , _a , _a ) -> str:
_A : List[str] = ConvNextBackbone(config=_a )
model.to(_a )
model.eval()
_A : Optional[int] = model(_a )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
_A : Optional[Any] = None
_A : str = ConvNextBackbone(config=_a )
model.to(_a )
model.eval()
_A : int = model(_a )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def a__ ( self ) -> int:
_A : int = self.prepare_config_and_inputs()
_A , _A , _A : List[Any] = config_and_inputs
_A : Any = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class lowercase ( UpperCamelCase__,UpperCamelCase__,unittest.TestCase ):
_a = (
(
ConvNextModel,
ConvNextForImageClassification,
ConvNextBackbone,
)
if is_torch_available()
else ()
)
_a = (
{"feature-extraction": ConvNextModel, "image-classification": ConvNextForImageClassification}
if is_torch_available()
else {}
)
_a = True
_a = False
_a = False
_a = False
_a = False
def a__ ( self ) -> Dict:
_A : int = ConvNextModelTester(self )
_A : List[Any] = ConfigTester(self , config_class=_a , has_text_modality=_a , hidden_size=37 )
def a__ ( self ) -> Any:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def a__ ( self ) -> str:
return
@unittest.skip(reason="""ConvNext does not use inputs_embeds""" )
def a__ ( self ) -> Tuple:
pass
@unittest.skip(reason="""ConvNext does not support input and output embeddings""" )
def a__ ( self ) -> Optional[Any]:
pass
@unittest.skip(reason="""ConvNext does not use feedforward chunking""" )
def a__ ( self ) -> List[Any]:
pass
def a__ ( self ) -> Optional[Any]:
_A , _A : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A : Optional[Any] = model_class(_a )
_A : List[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_A : List[Any] = [*signature.parameters.keys()]
_A : int = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _a )
def a__ ( self ) -> Union[str, Any]:
_A : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def a__ ( self ) -> Tuple:
_A : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*_a )
def a__ ( self ) -> Tuple:
def check_hidden_states_output(_a , _a , _a ):
_A : Tuple = model_class(_a )
model.to(_a )
model.eval()
with torch.no_grad():
_A : Dict = model(**self._prepare_for_class(_a , _a ) )
_A : Optional[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_A : Dict = self.model_tester.num_stages
self.assertEqual(len(_a ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
_A , _A : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A : List[Any] = True
check_hidden_states_output(_a , _a , _a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_A : Union[str, Any] = True
check_hidden_states_output(_a , _a , _a )
def a__ ( self ) -> int:
_A : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_a )
@slow
def a__ ( self ) -> Optional[int]:
for model_name in CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_A : Optional[Any] = ConvNextModel.from_pretrained(_a )
self.assertIsNotNone(_a )
def lowerCAmelCase_ ( ):
_A : Optional[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class lowercase ( unittest.TestCase ):
@cached_property
def a__ ( self ) -> str:
return AutoImageProcessor.from_pretrained("""facebook/convnext-tiny-224""" ) if is_vision_available() else None
@slow
def a__ ( self ) -> Optional[Any]:
_A : Any = ConvNextForImageClassification.from_pretrained("""facebook/convnext-tiny-224""" ).to(_a )
_A : List[str] = self.default_image_processor
_A : int = prepare_img()
_A : Union[str, Any] = image_processor(images=_a , return_tensors="""pt""" ).to(_a )
# forward pass
with torch.no_grad():
_A : Dict = model(**_a )
# verify the logits
_A : Optional[Any] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _a )
_A : Any = torch.tensor([-0.0260, -0.4739, 0.1911] ).to(_a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _a , atol=1e-4 ) )
@require_torch
class lowercase ( unittest.TestCase,UpperCamelCase__ ):
_a = (ConvNextBackbone,) if is_torch_available() else ()
_a = ConvNextConfig
_a = False
def a__ ( self ) -> List[str]:
_A : Optional[int] = ConvNextModelTester(self )
| 343 | 0 |
import argparse
import torch
from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert
from transformers.utils import logging
logging.set_verbosity_info()
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ):
# Initialise PyTorch model
_A : List[Any] = MobileBertConfig.from_json_file(snake_case_ )
print(f'''Building PyTorch model from configuration: {config}''' )
_A : Tuple = MobileBertForPreTraining(snake_case_ )
# Load weights from tf checkpoint
_A : Tuple = load_tf_weights_in_mobilebert(snake_case_,snake_case_,snake_case_ )
# Save pytorch-model
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict(),snake_case_ )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--mobilebert_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained MobileBERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
_snake_case = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path)
| 359 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_snake_case = {
"configuration_roc_bert": ["ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "RoCBertConfig"],
"tokenization_roc_bert": ["RoCBertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
pass
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"RoCBertForCausalLM",
"RoCBertForMaskedLM",
"RoCBertForMultipleChoice",
"RoCBertForPreTraining",
"RoCBertForQuestionAnswering",
"RoCBertForSequenceClassification",
"RoCBertForTokenClassification",
"RoCBertLayer",
"RoCBertModel",
"RoCBertPreTrainedModel",
"load_tf_weights_in_roc_bert",
]
if TYPE_CHECKING:
from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig
from .tokenization_roc_bert import RoCBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
raise OptionalDependencyNotAvailable()
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roc_bert import (
ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RoCBertForCausalLM,
RoCBertForMaskedLM,
RoCBertForMultipleChoice,
RoCBertForPreTraining,
RoCBertForQuestionAnswering,
RoCBertForSequenceClassification,
RoCBertForTokenClassification,
RoCBertLayer,
RoCBertModel,
RoCBertPreTrainedModel,
load_tf_weights_in_roc_bert,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 343 | 0 |
"""simple docstring"""
import inspect
import tempfile
import unittest
from huggingface_hub import hf_hub_download
from transformers import is_torch_available
from transformers.testing_utils import is_flaky, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
_snake_case = 1e-4
if is_torch_available():
import torch
from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel
from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder
@require_torch
class lowercase :
def __init__( self , _a , _a=16 , _a=13 , _a=7 , _a=14 , _a=10 , _a=19 , _a=5 , _a=4 , _a=True , _a=16 , _a=2 , _a=4 , _a=4 , _a="gelu" , _a=0.1 , _a=0.1 , _a=[1, 2, 3, 4, 5] , _a=25 , _a=5 , ) -> List[str]:
_A : Any = d_model
_A : List[str] = parent
_A : Optional[Any] = batch_size
_A : Optional[Any] = prediction_length
_A : List[Any] = context_length
_A : Tuple = cardinality
_A : Union[str, Any] = num_time_features
_A : Union[str, Any] = lags_sequence
_A : List[str] = embedding_dimension
_A : List[str] = is_training
_A : Any = hidden_size
_A : int = num_hidden_layers
_A : Tuple = num_attention_heads
_A : Optional[int] = intermediate_size
_A : Any = hidden_act
_A : Union[str, Any] = hidden_dropout_prob
_A : List[str] = attention_probs_dropout_prob
_A : List[str] = context_length
_A : Optional[Any] = prediction_length + label_length
_A : Optional[Any] = label_length
_A : str = moving_average
_A : str = autocorrelation_factor
def a__ ( self ) -> Tuple:
return AutoformerConfig(
d_model=self.d_model , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , prediction_length=self.prediction_length , context_length=self.context_length , label_length=self.label_length , lags_sequence=self.lags_sequence , num_time_features=self.num_time_features , num_static_categorical_features=1 , cardinality=[self.cardinality] , embedding_dimension=[self.embedding_dimension] , moving_average=self.moving_average , )
def a__ ( self , _a ) -> str:
_A : int = config.context_length + max(config.lags_sequence )
_A : Dict = ids_tensor([self.batch_size, 1] , config.cardinality[0] )
_A : int = floats_tensor([self.batch_size, _past_length, config.num_time_features] )
_A : Any = floats_tensor([self.batch_size, _past_length] )
_A : Tuple = floats_tensor([self.batch_size, _past_length] ) > 0.5
# decoder inputs
_A : int = floats_tensor([self.batch_size, config.prediction_length, config.num_time_features] )
_A : Union[str, Any] = floats_tensor([self.batch_size, config.prediction_length] )
_A : Any = {
"""past_values""": past_values,
"""static_categorical_features""": static_categorical_features,
"""past_time_features""": past_time_features,
"""past_observed_mask""": past_observed_mask,
"""future_time_features""": future_time_features,
"""future_values""": future_values,
}
return inputs_dict
def a__ ( self ) -> List[str]:
_A : Any = self.get_config()
_A : Any = self.prepare_autoformer_inputs_dict(_a )
return config, inputs_dict
def a__ ( self ) -> Tuple:
_A : Any = self.prepare_config_and_inputs()
return config, inputs_dict
def a__ ( self , _a , _a ) -> Optional[int]:
_A : Union[str, Any] = AutoformerModel(config=_a ).to(_a ).eval()
_A : List[Any] = model(**_a )
_A : Any = outputs.encoder_last_hidden_state
_A : Union[str, Any] = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
_A : List[str] = model.get_encoder()
encoder.save_pretrained(_a )
_A : Union[str, Any] = AutoformerEncoder.from_pretrained(_a ).to(_a )
_A : str = model.create_network_inputs(**_a )
_A : str = model.decomposition_layer(transformer_inputs[:, : config.context_length, ...] )
_A : Optional[Any] = torch.cat(
(transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]) , dim=-1 , )
_A : int = encoder(inputs_embeds=_a )[0]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1e-3 )
_A : Union[str, Any] = (
torch.mean(transformer_inputs[:, : config.context_length, ...] , dim=1 )
.unsqueeze(1 )
.repeat(1 , config.prediction_length , 1 )
)
_A : Any = torch.zeros(
[transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]] , device=enc_input.device , )
_A : int = torch.cat(
(
torch.cat((seasonal_input[:, -config.label_length :, ...], zeros) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
_A : str = torch.cat(
(
torch.cat((trend_input[:, -config.label_length :, ...], mean) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
with tempfile.TemporaryDirectory() as tmpdirname:
_A : int = model.get_decoder()
decoder.save_pretrained(_a )
_A : str = AutoformerDecoder.from_pretrained(_a ).to(_a )
_A : List[Any] = decoder(
trend=_a , inputs_embeds=_a , encoder_hidden_states=_a , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1e-3 )
@require_torch
class lowercase ( UpperCamelCase__,UpperCamelCase__,unittest.TestCase ):
_a = (AutoformerModel, AutoformerForPrediction) if is_torch_available() else ()
_a = (AutoformerForPrediction,) if is_torch_available() else ()
_a = {"feature-extraction": AutoformerModel} if is_torch_available() else {}
_a = False
_a = False
_a = False
_a = False
_a = False
_a = False
def a__ ( self ) -> int:
_A : Tuple = AutoformerModelTester(self )
_A : Union[str, Any] = ConfigTester(self , config_class=_a , has_text_modality=_a )
def a__ ( self ) -> List[str]:
self.config_tester.run_common_tests()
def a__ ( self ) -> str:
_A : str = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
_A : Any = model_class(_a )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_a )
_A : Any = model_class.from_pretrained(_a , output_loading_info=_a )
self.assertEqual(info["""missing_keys"""] , [] )
def a__ ( self ) -> str:
_A : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*_a )
@unittest.skip(reason="""Model has no tokens embeddings""" )
def a__ ( self ) -> str:
pass
def a__ ( self ) -> Optional[Any]:
_A : Tuple = inspect.signature(getattr(_a , """forward""" ) )
# The main input is the name of the argument after `self`
_A : int = list(model_signature.parameters.keys() )[1]
self.assertEqual(AutoformerModel.main_input_name , _a )
def a__ ( self ) -> int:
_A : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A : Optional[int] = model_class(_a )
_A : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_A : List[str] = [*signature.parameters.keys()]
_A : str = [
"""past_values""",
"""past_time_features""",
"""past_observed_mask""",
"""static_categorical_features""",
"""static_real_features""",
"""future_values""",
"""future_time_features""",
]
if model.__class__.__name__ in ["AutoformerForPrediction"]:
expected_arg_names.append("""future_observed_mask""" )
expected_arg_names.extend(
[
"""decoder_attention_mask""",
"""head_mask""",
"""decoder_head_mask""",
"""cross_attn_head_mask""",
"""encoder_outputs""",
"""past_key_values""",
"""output_hidden_states""",
"""output_attentions""",
"""use_cache""",
"""return_dict""",
] )
self.assertListEqual(arg_names[: len(_a )] , _a )
def a__ ( self ) -> List[Any]:
_A : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
_A : Tuple = True
_A : Optional[Any] = getattr(self.model_tester , """seq_length""" , _a )
_A : Optional[Any] = getattr(self.model_tester , """decoder_seq_length""" , _a )
_A : Union[str, Any] = getattr(self.model_tester , """encoder_seq_length""" , _a )
_A : List[Any] = getattr(self.model_tester , """d_model""" , _a )
_A : Any = getattr(self.model_tester , """num_attention_heads""" , _a )
_A : List[str] = d_model // num_attention_heads
for model_class in self.all_model_classes:
_A : Optional[Any] = True
_A : Dict = False
_A : List[Any] = True
_A : int = model_class(_a )
model.to(_a )
model.eval()
with torch.no_grad():
_A : Optional[Any] = model(**self._prepare_for_class(_a , _a ) )
_A : Any = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(_a ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
_A : Any = True
_A : Tuple = model_class(_a )
model.to(_a )
model.eval()
with torch.no_grad():
_A : List[Any] = model(**self._prepare_for_class(_a , _a ) )
_A : Optional[int] = outputs.encoder_attentions
self.assertEqual(len(_a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
_A : Optional[int] = len(_a )
_A : Union[str, Any] = 7
if "last_hidden_state" in outputs:
correct_outlen += 1
if "trend" in outputs:
correct_outlen += 1
if "past_key_values" in outputs:
correct_outlen += 1 # past_key_values have been returned
if "loss" in outputs:
correct_outlen += 1
if "params" in outputs:
correct_outlen += 1
self.assertEqual(_a , _a )
# decoder attentions
_A : Tuple = outputs.decoder_attentions
self.assertIsInstance(_a , (list, tuple) )
self.assertEqual(len(_a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# cross attentions
_A : str = outputs.cross_attentions
self.assertIsInstance(_a , (list, tuple) )
self.assertEqual(len(_a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(cross_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# Check attention is always last and order is fine
_A : Optional[Any] = True
_A : Dict = True
_A : List[Any] = model_class(_a )
model.to(_a )
model.eval()
with torch.no_grad():
_A : Tuple = model(**self._prepare_for_class(_a , _a ) )
self.assertEqual(out_len + 2 , len(_a ) )
_A : Optional[Any] = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(_a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
@is_flaky()
def a__ ( self ) -> List[Any]:
super().test_retain_grad_hidden_states_attentions()
def lowerCAmelCase_ ( snake_case_="train-batch.pt" ):
_A : Union[str, Any] = hf_hub_download(repo_id="""hf-internal-testing/tourism-monthly-batch""",filename=snake_case_,repo_type="""dataset""" )
_A : Optional[int] = torch.load(snake_case_,map_location=snake_case_ )
return batch
@require_torch
@slow
class lowercase ( unittest.TestCase ):
def a__ ( self ) -> Dict:
_A : str = AutoformerModel.from_pretrained("""huggingface/autoformer-tourism-monthly""" ).to(_a )
_A : Any = prepare_batch()
with torch.no_grad():
_A : str = model(
past_values=batch["""past_values"""] , past_time_features=batch["""past_time_features"""] , past_observed_mask=batch["""past_observed_mask"""] , static_categorical_features=batch["""static_categorical_features"""] , future_values=batch["""future_values"""] , future_time_features=batch["""future_time_features"""] , )[0]
_A : Union[str, Any] = torch.Size(
(64, model.config.prediction_length + model.config.label_length, model.config.feature_size) )
self.assertEqual(output.shape , _a )
_A : str = torch.tensor(
[[0.3593, -1.3398, 0.6330], [0.2279, 1.5396, -0.1792], [0.0450, 1.3225, -0.2335]] , device=_a )
self.assertTrue(torch.allclose(output[0, :3, :3] , _a , atol=_a ) )
def a__ ( self ) -> Any:
_A : Optional[int] = AutoformerForPrediction.from_pretrained("""huggingface/autoformer-tourism-monthly""" ).to(_a )
_A : int = prepare_batch("""val-batch.pt""" )
with torch.no_grad():
_A : List[Any] = model(
past_values=batch["""past_values"""] , past_time_features=batch["""past_time_features"""] , past_observed_mask=batch["""past_observed_mask"""] , static_categorical_features=batch["""static_categorical_features"""] , ).encoder_last_hidden_state
_A : Dict = torch.Size((64, model.config.context_length, model.config.d_model) )
self.assertEqual(output.shape , _a )
_A : Dict = torch.tensor(
[[-0.0734, -0.9036, 0.8358], [4.7186, 2.4113, 1.9581], [1.7953, 2.3558, 1.2970]] , device=_a )
self.assertTrue(torch.allclose(output[0, :3, :3] , _a , atol=_a ) )
def a__ ( self ) -> List[str]:
_A : int = AutoformerForPrediction.from_pretrained("""huggingface/autoformer-tourism-monthly""" ).to(_a )
_A : Any = prepare_batch("""val-batch.pt""" )
with torch.no_grad():
_A : Optional[Any] = model.generate(
static_categorical_features=batch["""static_categorical_features"""] , past_time_features=batch["""past_time_features"""] , past_values=batch["""past_values"""] , future_time_features=batch["""future_time_features"""] , past_observed_mask=batch["""past_observed_mask"""] , )
_A : Union[str, Any] = torch.Size((64, model.config.num_parallel_samples, model.config.prediction_length) )
self.assertEqual(outputs.sequences.shape , _a )
_A : str = torch.tensor([3130.6763, 4056.5293, 7053.0786] , device=_a )
_A : Optional[Any] = outputs.sequences.mean(dim=1 )
self.assertTrue(torch.allclose(mean_prediction[0, -3:] , _a , rtol=1e-1 ) )
| 360 |
# DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax
import jax.numpy as jnp
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils_flax import (
CommonSchedulerState,
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
add_noise_common,
get_velocity_common,
)
@flax.struct.dataclass
class lowercase :
_a = 42
# setable values
_a = 42
_a = 42
_a = None
@classmethod
def a__ ( cls , _a , _a , _a ) -> Tuple:
return cls(common=_a , init_noise_sigma=_a , timesteps=_a )
@dataclass
class lowercase ( UpperCamelCase__ ):
_a = 42
class lowercase ( UpperCamelCase__,UpperCamelCase__ ):
_a = [e.name for e in FlaxKarrasDiffusionSchedulers]
_a = 42
@property
def a__ ( self ) -> Dict:
return True
@register_to_config
def __init__( self , _a = 1000 , _a = 0.0001 , _a = 0.02 , _a = "linear" , _a = None , _a = "fixed_small" , _a = True , _a = "epsilon" , _a = jnp.floataa , ) -> Tuple:
_A : Tuple = dtype
def a__ ( self , _a = None ) -> DDPMSchedulerState:
if common is None:
_A : Dict = CommonSchedulerState.create(self )
# standard deviation of the initial noise distribution
_A : Union[str, Any] = jnp.array(1.0 , dtype=self.dtype )
_A : Tuple = jnp.arange(0 , self.config.num_train_timesteps ).round()[::-1]
return DDPMSchedulerState.create(
common=_a , init_noise_sigma=_a , timesteps=_a , )
def a__ ( self , _a , _a , _a = None ) -> jnp.ndarray:
return sample
def a__ ( self , _a , _a , _a = () ) -> DDPMSchedulerState:
_A : Any = self.config.num_train_timesteps // num_inference_steps
# creates integer timesteps by multiplying by ratio
# rounding to avoid issues when num_inference_step is power of 3
_A : Dict = (jnp.arange(0 , _a ) * step_ratio).round()[::-1]
return state.replace(
num_inference_steps=_a , timesteps=_a , )
def a__ ( self , _a , _a , _a=None , _a=None ) -> Optional[int]:
_A : Optional[Any] = state.common.alphas_cumprod[t]
_A : int = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
_A : List[str] = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t]
if variance_type is None:
_A : Optional[Any] = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small":
_A : Optional[Any] = jnp.clip(_a , a_min=1e-20 )
# for rl-diffuser https://arxiv.org/abs/2205.09991
elif variance_type == "fixed_small_log":
_A : Any = jnp.log(jnp.clip(_a , a_min=1e-20 ) )
elif variance_type == "fixed_large":
_A : Optional[Any] = state.common.betas[t]
elif variance_type == "fixed_large_log":
# Glide max_log
_A : Tuple = jnp.log(state.common.betas[t] )
elif variance_type == "learned":
return predicted_variance
elif variance_type == "learned_range":
_A : str = variance
_A : Union[str, Any] = state.common.betas[t]
_A : Tuple = (predicted_variance + 1) / 2
_A : List[str] = frac * max_log + (1 - frac) * min_log
return variance
def a__ ( self , _a , _a , _a , _a , _a = None , _a = True , ) -> Union[FlaxDDPMSchedulerOutput, Tuple]:
_A : Dict = timestep
if key is None:
_A : int = jax.random.PRNGKey(0 )
if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]:
_A , _A : List[str] = jnp.split(_a , sample.shape[1] , axis=1 )
else:
_A : int = None
# 1. compute alphas, betas
_A : int = state.common.alphas_cumprod[t]
_A : List[str] = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
_A : Union[str, Any] = 1 - alpha_prod_t
_A : Optional[int] = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
_A : Dict = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
_A : Optional[int] = model_output
elif self.config.prediction_type == "v_prediction":
_A : Any = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
else:
raise ValueError(
F'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` '''
""" for the FlaxDDPMScheduler.""" )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
_A : Union[str, Any] = jnp.clip(_a , -1 , 1 )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
_A : List[Any] = (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t
_A : Dict = state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
_A : int = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
def random_variance():
_A : Tuple = jax.random.split(_a , num=1 )
_A : Dict = jax.random.normal(_a , shape=model_output.shape , dtype=self.dtype )
return (self._get_variance(_a , _a , predicted_variance=_a ) ** 0.5) * noise
_A : int = jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype ) )
_A : Union[str, Any] = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample, state)
return FlaxDDPMSchedulerOutput(prev_sample=_a , state=_a )
def a__ ( self , _a , _a , _a , _a , ) -> jnp.ndarray:
return add_noise_common(state.common , _a , _a , _a )
def a__ ( self , _a , _a , _a , _a , ) -> jnp.ndarray:
return get_velocity_common(state.common , _a , _a , _a )
def __len__( self ) -> List[Any]:
return self.config.num_train_timesteps
| 343 | 0 |
from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels
from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor
from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
| 361 |
# Copyright (c) 2021-, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
####################################################################################################
#
# Note: If when running this conversion script you're getting an exception:
# ModuleNotFoundError: No module named 'megatron.model.enums'
# you need to tell python where to find the clone of Megatron-LM, e.g.:
#
# cd /tmp
# git clone https://github.com/NVIDIA/Megatron-LM
# PYTHONPATH=/tmp/Megatron-LM python src/transformers/models/megatron_gpt2/convert_megatron_gpt2_checkpoint.py ...
#
# if you already have it cloned elsewhere, simply adjust the path to the existing path
#
# If the training was done using a Megatron-LM fork, e.g.,
# https://github.com/microsoft/Megatron-DeepSpeed/ then chances are that you need to have that one
# in your path, i.e., /path/to/Megatron-DeepSpeed/
#
import argparse
import os
import re
import zipfile
import torch
from transformers import AutoTokenizer, GPTaConfig
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_=0 ):
# Format the message.
if name is None:
_A : Union[str, Any] = None
else:
_A : Dict = """.""" * max(0,spaces - 2 ) + """# {:""" + str(50 - spaces ) + """s}"""
_A : Tuple = fmt.format(snake_case_ )
# Print and recurse (if needed).
if isinstance(snake_case_,snake_case_ ):
if msg is not None:
print(snake_case_ )
for k in val.keys():
recursive_print(snake_case_,val[k],spaces + 2 )
elif isinstance(snake_case_,torch.Tensor ):
print(snake_case_,""":""",val.size() )
else:
print(snake_case_,""":""",snake_case_ )
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_,snake_case_,snake_case_ ):
# Permutes layout of param tensor to [num_splits * num_heads * hidden_size, :]
# for compatibility with later versions of NVIDIA Megatron-LM.
# The inverse operation is performed inside Megatron-LM to read checkpoints:
# https://github.com/NVIDIA/Megatron-LM/blob/v2.4/megatron/checkpointing.py#L209
# If param is the weight tensor of the self-attention block, the returned tensor
# will have to be transposed one more time to be read by HuggingFace GPT2.
_A : str = param.size()
if checkpoint_version == 1.0:
# version 1.0 stores [num_heads * hidden_size * num_splits, :]
_A : Union[str, Any] = (num_heads, hidden_size, num_splits) + input_shape[1:]
_A : Tuple = param.view(*snake_case_ )
_A : Any = param.transpose(0,2 )
_A : int = param.transpose(1,2 ).contiguous()
elif checkpoint_version >= 2.0:
# other versions store [num_heads * num_splits * hidden_size, :]
_A : Optional[Any] = (num_heads, num_splits, hidden_size) + input_shape[1:]
_A : int = param.view(*snake_case_ )
_A : Any = param.transpose(0,1 ).contiguous()
_A : Optional[int] = param.view(*snake_case_ )
return param
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ):
# The converted output model.
_A : Any = {}
# old versions did not store training args
_A : str = input_state_dict.get("""args""",snake_case_ )
if ds_args is not None:
# do not make the user write a config file when the exact dimensions/sizes are already in the checkpoint
# from pprint import pprint
# pprint(vars(ds_args))
_A : Union[str, Any] = ds_args.padded_vocab_size
_A : List[Any] = ds_args.max_position_embeddings
_A : Optional[int] = ds_args.hidden_size
_A : List[Any] = ds_args.num_layers
_A : List[str] = ds_args.num_attention_heads
_A : int = ds_args.ffn_hidden_size
# pprint(config)
# The number of heads.
_A : Union[str, Any] = config.n_head
# The hidden_size per head.
_A : List[Any] = config.n_embd // config.n_head
# Megatron-LM checkpoint version
if "checkpoint_version" in input_state_dict.keys():
_A : Tuple = input_state_dict["""checkpoint_version"""]
else:
_A : Any = 0.0
# The model.
_A : Any = input_state_dict["""model"""]
# The language model.
_A : Tuple = model["""language_model"""]
# The embeddings.
_A : Any = lm["""embedding"""]
# The word embeddings.
_A : Dict = embeddings["""word_embeddings"""]["""weight"""]
# Truncate the embedding table to vocab_size rows.
_A : Union[str, Any] = word_embeddings[: config.vocab_size, :]
_A : Tuple = word_embeddings
# The position embeddings.
_A : Tuple = embeddings["""position_embeddings"""]["""weight"""]
# Read the causal mask dimension (seqlen). [max_sequence_length, hidden_size]
_A : Any = pos_embeddings.size(0 )
if n_positions != config.n_positions:
raise ValueError(
f'''pos_embeddings.max_sequence_length={n_positions} and config.n_positions={config.n_positions} don\'t match''' )
# Store the position embeddings.
_A : Optional[int] = pos_embeddings
# The transformer.
_A : Any = lm["""transformer"""] if """transformer""" in lm.keys() else lm["""encoder"""]
# The regex to extract layer names.
_A : Optional[int] = re.compile(r"""layers\.(\d+)\.([a-z0-9_.]+)\.([a-z]+)""" )
# The simple map of names for "automated" rules.
_A : Union[str, Any] = {
"""attention.dense""": """.attn.c_proj.""",
"""self_attention.dense""": """.attn.c_proj.""",
"""mlp.dense_h_to_4h""": """.mlp.c_fc.""",
"""mlp.dense_4h_to_h""": """.mlp.c_proj.""",
}
# Extract the layers.
for key, val in transformer.items():
# Match the name.
_A : List[str] = layer_re.match(snake_case_ )
# Stop if that's not a layer
if m is None:
break
# The index of the layer.
_A : Tuple = int(m.group(1 ) )
# The name of the operation.
_A : Optional[Any] = m.group(2 )
# Is it a weight or a bias?
_A : Dict = m.group(3 )
# The name of the layer.
_A : Optional[Any] = f'''transformer.h.{layer_idx}'''
# For layernorm(s), simply store the layer norm.
if op_name.endswith("""layernorm""" ):
_A : Union[str, Any] = """ln_1""" if op_name.startswith("""input""" ) else """ln_2"""
_A : List[str] = val
# Transpose the QKV matrix.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "weight":
# Insert a tensor of 1x1xDxD bias.
_A : List[str] = torch.tril(torch.ones((n_positions, n_positions),dtype=torch.floataa ) ).view(
1,1,snake_case_,snake_case_ )
_A : Any = causal_mask
# Insert a "dummy" tensor for masked_bias.
_A : List[str] = torch.tensor(-1e4,dtype=torch.floataa )
_A : Tuple = masked_bias
_A : Tuple = fix_query_key_value_ordering(snake_case_,snake_case_,3,snake_case_,snake_case_ )
# Megatron stores (3*D) x D but transformers-GPT2 expects D x 3*D.
_A : Tuple = out_val.transpose(0,1 ).contiguous()
# Store.
_A : Any = out_val
# Transpose the bias.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "bias":
_A : List[str] = fix_query_key_value_ordering(snake_case_,snake_case_,3,snake_case_,snake_case_ )
# Store. No change of shape.
_A : Tuple = out_val
# Transpose the weights.
elif weight_or_bias == "weight":
_A : List[str] = megatron_to_transformers[op_name]
_A : Any = val.transpose(0,1 )
# Copy the bias.
elif weight_or_bias == "bias":
_A : Dict = megatron_to_transformers[op_name]
_A : List[Any] = val
# DEBUG.
assert config.n_layer == layer_idx + 1
# The final layernorm.
_A : Optional[Any] = transformer["""final_layernorm.weight"""]
_A : Dict = transformer["""final_layernorm.bias"""]
# For LM head, transformers' wants the matrix to weight embeddings.
_A : List[str] = word_embeddings
# It should be done!
return output_state_dict
def lowerCAmelCase_ ( ):
# Create the argument parser.
_A : Any = argparse.ArgumentParser()
parser.add_argument("""--print-checkpoint-structure""",action="""store_true""" )
parser.add_argument(
"""path_to_checkpoint""",type=snake_case_,help="""Path to the checkpoint file (.zip archive or direct .pt file)""",)
parser.add_argument(
"""--config_file""",default="""""",type=snake_case_,help="""An optional config json file describing the pre-trained model.""",)
_A : Optional[int] = parser.parse_args()
# Extract the basename.
_A : Any = os.path.dirname(args.path_to_checkpoint )
# Load the model.
# the .zip is very optional, let's keep it for backward compatibility
print(f'''Extracting PyTorch state dictionary from {args.path_to_checkpoint}''' )
if args.path_to_checkpoint.endswith(""".zip""" ):
with zipfile.ZipFile(args.path_to_checkpoint,"""r""" ) as checkpoint:
with checkpoint.open("""release/mp_rank_00/model_optim_rng.pt""" ) as pytorch_dict:
_A : Tuple = torch.load(snake_case_,map_location="""cpu""" )
else:
_A : Tuple = torch.load(args.path_to_checkpoint,map_location="""cpu""" )
_A : Optional[Any] = input_state_dict.get("""args""",snake_case_ )
# Read the config, or default to the model released by NVIDIA.
if args.config_file == "":
if ds_args is not None:
if ds_args.bias_gelu_fusion:
_A : Union[str, Any] = """gelu_fast"""
elif ds_args.openai_gelu:
_A : int = """gelu_new"""
else:
_A : Optional[Any] = """gelu"""
else:
# in the very early days this used to be "gelu_new"
_A : Any = """gelu_new"""
# Spell out all parameters in case the defaults change.
_A : Any = GPTaConfig(
vocab_size=50257,n_positions=1024,n_embd=1024,n_layer=24,n_head=16,n_inner=4096,activation_function=snake_case_,resid_pdrop=0.1,embd_pdrop=0.1,attn_pdrop=0.1,layer_norm_epsilon=1e-5,initializer_range=0.02,summary_type="""cls_index""",summary_use_proj=snake_case_,summary_activation=snake_case_,summary_proj_to_labels=snake_case_,summary_first_dropout=0.1,scale_attn_weights=snake_case_,use_cache=snake_case_,bos_token_id=50256,eos_token_id=50256,)
else:
_A : Union[str, Any] = GPTaConfig.from_json_file(args.config_file )
_A : List[str] = ["""GPT2LMHeadModel"""]
# Convert.
print("""Converting""" )
_A : Optional[Any] = convert_megatron_checkpoint(snake_case_,snake_case_,snake_case_ )
# Print the structure of converted state dict.
if args.print_checkpoint_structure:
recursive_print(snake_case_,snake_case_ )
# Add tokenizer class info to config
# see https://github.com/huggingface/transformers/issues/13906)
if ds_args is not None:
_A : int = ds_args.tokenizer_type
if tokenizer_type == "GPT2BPETokenizer":
_A : Any = """gpt2"""
elif tokenizer_type == "PretrainedFromHF":
_A : List[Any] = ds_args.tokenizer_name_or_path
else:
raise ValueError(f'''Unrecognized tokenizer_type {tokenizer_type}''' )
else:
_A : Optional[Any] = """gpt2"""
_A : List[str] = AutoTokenizer.from_pretrained(snake_case_ )
_A : Tuple = type(snake_case_ ).__name__
_A : Union[str, Any] = tokenizer_class
# Store the config to file.
print("""Saving config""" )
config.save_pretrained(snake_case_ )
# Save tokenizer based on args
print(f'''Adding {tokenizer_class} tokenizer files''' )
tokenizer.save_pretrained(snake_case_ )
# Store the state_dict to file.
_A : Union[str, Any] = os.path.join(snake_case_,"""pytorch_model.bin""" )
print(f'''Saving checkpoint to "{output_checkpoint_file}"''' )
torch.save(snake_case_,snake_case_ )
####################################################################################################
if __name__ == "__main__":
main()
####################################################################################################
| 343 | 0 |
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
if len(snake_case_ ) != len(snake_case_ ):
raise ValueError("""String lengths must match!""" )
_A : str = 0
for chara, chara in zip(snake_case_,snake_case_ ):
if chara != chara:
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 362 |
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert import BertTokenizer
_snake_case = logging.get_logger(__name__)
_snake_case = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
_snake_case = {
"vocab_file": {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json"
),
},
}
_snake_case = {
"vocab_file": {
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json"
),
},
}
_snake_case = {
"vocab_file": {
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json"
),
},
}
_snake_case = {
"facebook/dpr-ctx_encoder-single-nq-base": 512,
"facebook/dpr-ctx_encoder-multiset-base": 512,
}
_snake_case = {
"facebook/dpr-question_encoder-single-nq-base": 512,
"facebook/dpr-question_encoder-multiset-base": 512,
}
_snake_case = {
"facebook/dpr-reader-single-nq-base": 512,
"facebook/dpr-reader-multiset-base": 512,
}
_snake_case = {
"facebook/dpr-ctx_encoder-single-nq-base": {"do_lower_case": True},
"facebook/dpr-ctx_encoder-multiset-base": {"do_lower_case": True},
}
_snake_case = {
"facebook/dpr-question_encoder-single-nq-base": {"do_lower_case": True},
"facebook/dpr-question_encoder-multiset-base": {"do_lower_case": True},
}
_snake_case = {
"facebook/dpr-reader-single-nq-base": {"do_lower_case": True},
"facebook/dpr-reader-multiset-base": {"do_lower_case": True},
}
class lowercase ( UpperCamelCase__ ):
_a = VOCAB_FILES_NAMES
_a = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
_a = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
class lowercase ( UpperCamelCase__ ):
_a = VOCAB_FILES_NAMES
_a = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
_a = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
_snake_case = collections.namedtuple(
"DPRSpanPrediction", ["span_score", "relevance_score", "doc_id", "start_index", "end_index", "text"]
)
_snake_case = collections.namedtuple("DPRReaderOutput", ["start_logits", "end_logits", "relevance_logits"])
_snake_case = r"\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n ```\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n ```\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `'tf'`: Return TensorFlow `tf.constant` objects.\n - `'pt'`: Return PyTorch `torch.Tensor` objects.\n - `'np'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer's default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Returns:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n "
@add_start_docstrings(UpperCamelCase__ )
class lowercase :
def __call__( self , _a , _a = None , _a = None , _a = False , _a = False , _a = None , _a = None , _a = None , **_a , ) -> BatchEncoding:
if titles is None and texts is None:
return super().__call__(
_a , padding=_a , truncation=_a , max_length=_a , return_tensors=_a , return_attention_mask=_a , **_a , )
elif titles is None or texts is None:
_A : Optional[Any] = titles if texts is None else texts
return super().__call__(
_a , _a , padding=_a , truncation=_a , max_length=_a , return_tensors=_a , return_attention_mask=_a , **_a , )
_A : Dict = titles if not isinstance(_a , _a ) else [titles]
_A : Tuple = texts if not isinstance(_a , _a ) else [texts]
_A : Any = len(_a )
_A : Optional[Any] = questions if not isinstance(_a , _a ) else [questions] * n_passages
if len(_a ) != len(_a ):
raise ValueError(
F'''There should be as many titles than texts but got {len(_a )} titles and {len(_a )} texts.''' )
_A : str = super().__call__(_a , _a , padding=_a , truncation=_a )["""input_ids"""]
_A : Optional[int] = super().__call__(_a , add_special_tokens=_a , padding=_a , truncation=_a )["""input_ids"""]
_A : Optional[int] = {
"""input_ids""": [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(_a , _a )
]
}
if return_attention_mask is not False:
_A : Any = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
_A : str = attention_mask
return self.pad(_a , padding=_a , max_length=_a , return_tensors=_a )
def a__ ( self , _a , _a , _a = 16 , _a = 64 , _a = 4 , ) -> List[DPRSpanPrediction]:
_A : Dict = reader_input["""input_ids"""]
_A , _A , _A : Tuple = reader_output[:3]
_A : List[str] = len(_a )
_A : Tuple = sorted(range(_a ) , reverse=_a , key=relevance_logits.__getitem__ )
_A : List[DPRReaderOutput] = []
for doc_id in sorted_docs:
_A : Tuple = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
_A : int = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
_A : Tuple = sequence_ids.index(self.pad_token_id )
else:
_A : Tuple = len(_a )
_A : Union[str, Any] = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=_a , top_spans=_a , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=_a , start_index=_a , end_index=_a , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(_a ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def a__ ( self , _a , _a , _a , _a , ) -> List[DPRSpanPrediction]:
_A : Tuple = []
for start_index, start_score in enumerate(_a ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
_A : Tuple = sorted(_a , key=lambda _a : x[1] , reverse=_a )
_A : Union[str, Any] = []
for (start_index, end_index), score in scores:
if start_index > end_index:
raise ValueError(F'''Wrong span indices: [{start_index}:{end_index}]''' )
_A : Dict = end_index - start_index + 1
if length > max_answer_length:
raise ValueError(F'''Span is too long: {length} > {max_answer_length}''' )
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(_a ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(UpperCamelCase__ )
class lowercase ( UpperCamelCase__,UpperCamelCase__ ):
_a = VOCAB_FILES_NAMES
_a = READER_PRETRAINED_VOCAB_FILES_MAP
_a = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a = READER_PRETRAINED_INIT_CONFIGURATION
_a = ["input_ids", "attention_mask"]
| 343 | 0 |
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ):
def update_area_of_max_square(snake_case_,snake_case_ ) -> int:
# BASE CASE
if row >= rows or col >= cols:
return 0
_A : Tuple = update_area_of_max_square(snake_case_,col + 1 )
_A : int = update_area_of_max_square(row + 1,col + 1 )
_A : int = update_area_of_max_square(row + 1,snake_case_ )
if mat[row][col]:
_A : Tuple = 1 + min([right, diagonal, down] )
_A : Optional[int] = max(largest_square_area[0],snake_case_ )
return sub_problem_sol
else:
return 0
_A : List[str] = [0]
update_area_of_max_square(0,0 )
return largest_square_area[0]
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ):
def update_area_of_max_square_using_dp_array(
snake_case_,snake_case_,snake_case_ ) -> int:
if row >= rows or col >= cols:
return 0
if dp_array[row][col] != -1:
return dp_array[row][col]
_A : str = update_area_of_max_square_using_dp_array(snake_case_,col + 1,snake_case_ )
_A : Any = update_area_of_max_square_using_dp_array(row + 1,col + 1,snake_case_ )
_A : List[Any] = update_area_of_max_square_using_dp_array(row + 1,snake_case_,snake_case_ )
if mat[row][col]:
_A : Dict = 1 + min([right, diagonal, down] )
_A : Optional[Any] = max(largest_square_area[0],snake_case_ )
_A : List[str] = sub_problem_sol
return sub_problem_sol
else:
return 0
_A : Dict = [0]
_A : Optional[int] = [[-1] * cols for _ in range(snake_case_ )]
update_area_of_max_square_using_dp_array(0,0,snake_case_ )
return largest_square_area[0]
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ):
_A : Optional[int] = [[0] * (cols + 1) for _ in range(rows + 1 )]
_A : Dict = 0
for row in range(rows - 1,-1,-1 ):
for col in range(cols - 1,-1,-1 ):
_A : Dict = dp_array[row][col + 1]
_A : int = dp_array[row + 1][col + 1]
_A : str = dp_array[row + 1][col]
if mat[row][col] == 1:
_A : Optional[Any] = 1 + min(snake_case_,snake_case_,snake_case_ )
_A : Optional[int] = max(dp_array[row][col],snake_case_ )
else:
_A : List[Any] = 0
return largest_square_area
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ):
_A : List[str] = [0] * (cols + 1)
_A : List[str] = [0] * (cols + 1)
_A : Any = 0
for row in range(rows - 1,-1,-1 ):
for col in range(cols - 1,-1,-1 ):
_A : int = current_row[col + 1]
_A : Union[str, Any] = next_row[col + 1]
_A : Union[str, Any] = next_row[col]
if mat[row][col] == 1:
_A : Optional[Any] = 1 + min(snake_case_,snake_case_,snake_case_ )
_A : Any = max(current_row[col],snake_case_ )
else:
_A : Tuple = 0
_A : Union[str, Any] = current_row
return largest_square_area
if __name__ == "__main__":
import doctest
doctest.testmod()
print(largest_square_area_in_matrix_bottom_up(2, 2, [[1, 1], [1, 1]]))
| 363 |
import unittest
import numpy as np
from diffusers import OnnxStableDiffusionInpaintPipelineLegacy
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
load_numpy,
nightly,
require_onnxruntime,
require_torch_gpu,
)
if is_onnx_available():
import onnxruntime as ort
@nightly
@require_onnxruntime
@require_torch_gpu
class lowercase ( unittest.TestCase ):
@property
def a__ ( self ) -> Dict:
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def a__ ( self ) -> List[Any]:
_A : int = ort.SessionOptions()
_A : Any = False
return options
def a__ ( self ) -> Union[str, Any]:
_A : Tuple = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo.png""" )
_A : Dict = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo_mask.png""" )
_A : List[str] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/red_cat_sitting_on_a_park_bench_onnx.npy""" )
# using the PNDM scheduler by default
_A : str = OnnxStableDiffusionInpaintPipelineLegacy.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , revision="""onnx""" , safety_checker=_a , feature_extractor=_a , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=_a )
_A : Optional[Any] = """A red cat sitting on a park bench"""
_A : Optional[Any] = np.random.RandomState(0 )
_A : Dict = pipe(
prompt=_a , image=_a , mask_image=_a , strength=0.75 , guidance_scale=7.5 , num_inference_steps=15 , generator=_a , output_type="""np""" , )
_A : Optional[int] = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 1e-2
| 343 | 0 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_snake_case = logging.get_logger(__name__)
_snake_case = {
"microsoft/swin-tiny-patch4-window7-224": (
"https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json"
),
# See all Swin models at https://huggingface.co/models?filter=swin
}
class lowercase ( UpperCamelCase__,UpperCamelCase__ ):
_a = "swin"
_a = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self , _a=224 , _a=4 , _a=3 , _a=96 , _a=[2, 2, 6, 2] , _a=[3, 6, 12, 24] , _a=7 , _a=4.0 , _a=True , _a=0.0 , _a=0.0 , _a=0.1 , _a="gelu" , _a=False , _a=0.02 , _a=1e-5 , _a=32 , _a=None , _a=None , **_a , ) -> Optional[Any]:
super().__init__(**_a )
_A : Union[str, Any] = image_size
_A : str = patch_size
_A : Any = num_channels
_A : str = embed_dim
_A : Optional[int] = depths
_A : Optional[int] = len(_a )
_A : Any = num_heads
_A : Union[str, Any] = window_size
_A : Optional[int] = mlp_ratio
_A : Any = qkv_bias
_A : Any = hidden_dropout_prob
_A : int = attention_probs_dropout_prob
_A : Optional[int] = drop_path_rate
_A : List[str] = hidden_act
_A : Any = use_absolute_embeddings
_A : Dict = layer_norm_eps
_A : List[Any] = initializer_range
_A : Any = encoder_stride
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
_A : Optional[Any] = int(embed_dim * 2 ** (len(_a ) - 1) )
_A : Optional[int] = ["""stem"""] + [F'''stage{idx}''' for idx in range(1 , len(_a ) + 1 )]
_A : List[str] = get_aligned_output_features_output_indices(
out_features=_a , out_indices=_a , stage_names=self.stage_names )
class lowercase ( UpperCamelCase__ ):
_a = version.parse("1.11" )
@property
def a__ ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def a__ ( self ) -> float:
return 1e-4
| 364 |
from __future__ import annotations
def lowerCAmelCase_ ( snake_case_ ):
create_state_space_tree(snake_case_,[],0,[0 for i in range(len(snake_case_ ) )] )
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_,snake_case_,):
if index == len(snake_case_ ):
print(snake_case_ )
return
for i in range(len(snake_case_ ) ):
if not index_used[i]:
current_sequence.append(sequence[i] )
_A : Optional[Any] = True
create_state_space_tree(snake_case_,snake_case_,index + 1,snake_case_ )
current_sequence.pop()
_A : str = False
_snake_case = [3, 1, 2, 4]
generate_all_permutations(sequence)
_snake_case = ["A", "B", "C"]
generate_all_permutations(sequence_a)
| 343 | 0 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_dpt import DPTImageProcessor
_snake_case = logging.get_logger(__name__)
class lowercase ( UpperCamelCase__ ):
def __init__( self , *_a , **_a ) -> None:
warnings.warn(
"""The class DPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use DPTImageProcessor instead.""" , _a , )
super().__init__(*_a , **_a )
| 365 |
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def lowerCAmelCase_ ( snake_case_ ):
_A : Tuple = filter(lambda snake_case_ : p.requires_grad,model.parameters() )
_A : str = sum([np.prod(p.size() ) for p in model_parameters] )
return params
_snake_case = logging.getLogger(__name__)
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
if metric == "rouge2":
_A : Optional[int] = """{val_avg_rouge2:.4f}-{step_count}"""
elif metric == "bleu":
_A : Dict = """{val_avg_bleu:.4f}-{step_count}"""
elif metric == "em":
_A : List[str] = """{val_avg_em:.4f}-{step_count}"""
else:
raise NotImplementedError(
f'''seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this'''
""" function.""" )
_A : Optional[int] = ModelCheckpoint(
dirpath=snake_case_,filename=snake_case_,monitor=f'''val_{metric}''',mode="""max""",save_top_k=3,every_n_epochs=1,)
return checkpoint_callback
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
return EarlyStopping(
monitor=f'''val_{metric}''',mode="""min""" if """loss""" in metric else """max""",patience=snake_case_,verbose=snake_case_,)
class lowercase ( pl.Callback ):
def a__ ( self , _a , _a ) -> Optional[Any]:
_A : List[Any] = {F'''lr_group_{i}''': param["""lr"""] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(_a )
@rank_zero_only
def a__ ( self , _a , _a , _a , _a=True ) -> None:
logger.info(F'''***** {type_path} results at step {trainer.global_step:05d} *****''' )
_A : int = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ["""log""", """progress_bar""", """preds"""]} )
# Log results
_A : Dict = Path(pl_module.hparams.output_dir )
if type_path == "test":
_A : List[Any] = od / """test_results.txt"""
_A : List[Any] = od / """test_generations.txt"""
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
_A : Optional[int] = od / F'''{type_path}_results/{trainer.global_step:05d}.txt'''
_A : int = od / F'''{type_path}_generations/{trainer.global_step:05d}.txt'''
results_file.parent.mkdir(exist_ok=_a )
generations_file.parent.mkdir(exist_ok=_a )
with open(_a , """a+""" ) as writer:
for key in sorted(_a ):
if key in ["log", "progress_bar", "preds"]:
continue
_A : List[Any] = metrics[key]
if isinstance(_a , torch.Tensor ):
_A : str = val.item()
_A : str = F'''{key}: {val:.6f}\n'''
writer.write(_a )
if not save_generations:
return
if "preds" in metrics:
_A : List[Any] = """\n""".join(metrics["""preds"""] )
generations_file.open("""w+""" ).write(_a )
@rank_zero_only
def a__ ( self , _a , _a ) -> str:
try:
_A : int = pl_module.model.model.num_parameters()
except AttributeError:
_A : str = pl_module.model.num_parameters()
_A : Optional[int] = count_trainable_parameters(_a )
# mp stands for million parameters
trainer.logger.log_metrics({"""n_params""": npars, """mp""": npars / 1e6, """grad_mp""": n_trainable_pars / 1e6} )
@rank_zero_only
def a__ ( self , _a , _a ) -> Optional[int]:
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(_a , _a , """test""" )
@rank_zero_only
def a__ ( self , _a , _a ) -> Tuple:
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 343 | 0 |
import os
from glob import glob
import imageio
import torch
import torchvision
import wandb
from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan
from loaders import load_vqgan
from PIL import Image
from torch import nn
from transformers import CLIPModel, CLIPTokenizerFast
from utils import get_device, get_timestamp, show_pil
class lowercase :
def __init__( self , _a = "cpu" , _a = "openai/clip-vit-large-patch14" ) -> None:
_A : Optional[Any] = device
_A : str = CLIPTokenizerFast.from_pretrained(_a )
_A : Dict = [0.48145466, 0.4578275, 0.40821073]
_A : Optional[Any] = [0.26862954, 0.26130258, 0.27577711]
_A : Any = torchvision.transforms.Normalize(self.image_mean , self.image_std )
_A : List[Any] = torchvision.transforms.Resize(224 )
_A : str = torchvision.transforms.CenterCrop(224 )
def a__ ( self , _a ) -> List[Any]:
_A : List[str] = self.resize(_a )
_A : Union[str, Any] = self.center_crop(_a )
_A : str = self.normalize(_a )
return images
def __call__( self , _a=None , _a=None , **_a ) -> Dict:
_A : List[str] = self.tokenizer(text=_a , **_a )
_A : List[str] = self.preprocess_img(_a )
_A : List[Any] = {key: value.to(self.device ) for (key, value) in encoding.items()}
return encoding
class lowercase ( nn.Module ):
def __init__( self , _a=10 , _a=0.01 , _a=None , _a=None , _a=None , _a=None , _a=None , _a=None , _a=False , _a=True , _a="image" , _a=True , _a=False , _a=False , _a=False , ) -> None:
super().__init__()
_A : List[Any] = None
_A : str = device if device else get_device()
if vqgan:
_A : List[Any] = vqgan
else:
_A : List[str] = load_vqgan(self.device , conf_path=_a , ckpt_path=_a )
self.vqgan.eval()
if clip:
_A : Optional[int] = clip
else:
_A : int = CLIPModel.from_pretrained("""openai/clip-vit-base-patch32""" )
self.clip.to(self.device )
_A : int = ProcessorGradientFlow(device=self.device )
_A : int = iterations
_A : Dict = lr
_A : Any = log
_A : Tuple = make_grid
_A : Union[str, Any] = return_val
_A : str = quantize
_A : Optional[Any] = self.vqgan.decoder.z_shape
def a__ ( self , _a=None , _a=None , _a=5 , _a=True ) -> Union[str, Any]:
_A : Any = []
if output_path is None:
_A : Tuple = """./animation.gif"""
if input_path is None:
_A : List[Any] = self.save_path
_A : Union[str, Any] = sorted(glob(input_path + """/*""" ) )
if not len(_a ):
raise ValueError(
"""No images found in save path, aborting (did you pass save_intermediate=True to the generate"""
""" function?)""" )
if len(_a ) == 1:
print("""Only one image found in save path, (did you pass save_intermediate=True to the generate function?)""" )
_A : Any = total_duration / len(_a )
_A : Optional[int] = [frame_duration] * len(_a )
if extend_frames:
_A : Union[str, Any] = 1.5
_A : Dict = 3
for file_name in paths:
if file_name.endswith(""".png""" ):
images.append(imageio.imread(_a ) )
imageio.mimsave(_a , _a , duration=_a )
print(F'''gif saved to {output_path}''' )
def a__ ( self , _a=None , _a=None ) -> Optional[int]:
if not (path or img):
raise ValueError("""Input either path or tensor""" )
if img is not None:
raise NotImplementedError
_A : str = preprocess(Image.open(_a ) , target_image_size=256 ).to(self.device )
_A : Any = preprocess_vqgan(_a )
_A : Union[str, Any] = self.vqgan.encode(_a )
return z
def a__ ( self , _a ) -> Optional[Any]:
_A : int = self.latent.detach().requires_grad_()
_A : str = base_latent + transform_vector
if self.quantize:
_A : Dict = self.vqgan.quantize(_a )
else:
_A : int = trans_latent
return self.vqgan.decode(_a )
def a__ ( self , _a , _a , _a=None ) -> List[str]:
_A : List[Any] = self.clip_preprocessor(text=_a , images=_a , return_tensors="""pt""" , padding=_a )
_A : Tuple = self.clip(**_a )
_A : int = clip_outputs.logits_per_image
if weights is not None:
_A : Tuple = similarity_logits * weights
return similarity_logits.sum()
def a__ ( self , _a , _a , _a ) -> str:
_A : Optional[int] = self._get_clip_similarity(pos_prompts["""prompts"""] , _a , weights=(1 / pos_prompts["""weights"""]) )
if neg_prompts:
_A : List[Any] = self._get_clip_similarity(neg_prompts["""prompts"""] , _a , weights=neg_prompts["""weights"""] )
else:
_A : List[str] = torch.tensor([1] , device=self.device )
_A : str = -torch.log(_a ) + torch.log(_a )
return loss
def a__ ( self , _a , _a , _a ) -> Dict:
_A : int = torch.randn_like(self.latent , requires_grad=_a , device=self.device )
_A : List[Any] = torch.optim.Adam([vector] , lr=self.lr )
for i in range(self.iterations ):
optim.zero_grad()
_A : Tuple = self._add_vector(_a )
_A : int = loop_post_process(_a )
_A : Dict = self._get_CLIP_loss(_a , _a , _a )
print("""CLIP loss""" , _a )
if self.log:
wandb.log({"""CLIP Loss""": clip_loss} )
clip_loss.backward(retain_graph=_a )
optim.step()
if self.return_val == "image":
yield custom_to_pil(transformed_img[0] )
else:
yield vector
def a__ ( self , _a , _a , _a ) -> int:
wandb.init(reinit=_a , project="""face-editor""" )
wandb.config.update({"""Positive Prompts""": positive_prompts} )
wandb.config.update({"""Negative Prompts""": negative_prompts} )
wandb.config.update({"""lr""": self.lr, """iterations""": self.iterations} )
if image_path:
_A : Optional[int] = Image.open(_a )
_A : Tuple = image.resize((256, 256) )
wandb.log("""Original Image""" , wandb.Image(_a ) )
def a__ ( self , _a ) -> Union[str, Any]:
if not prompts:
return []
_A : List[str] = []
_A : Optional[int] = []
if isinstance(_a , _a ):
_A : Optional[Any] = [prompt.strip() for prompt in prompts.split("""|""" )]
for prompt in prompts:
if isinstance(_a , (tuple, list) ):
_A : List[str] = prompt[0]
_A : int = float(prompt[1] )
elif ":" in prompt:
_A : Optional[int] = prompt.split(""":""" )
_A : Any = float(_a )
else:
_A : Any = prompt
_A : Tuple = 1.0
processed_prompts.append(_a )
weights.append(_a )
return {
"prompts": processed_prompts,
"weights": torch.tensor(_a , device=self.device ),
}
def a__ ( self , _a , _a=None , _a=None , _a=True , _a=False , _a=True , _a=True , _a=None , ) -> List[Any]:
if image_path:
_A : List[str] = self._get_latent(_a )
else:
_A : Optional[int] = torch.randn(self.latent_dim , device=self.device )
if self.log:
self._init_logging(_a , _a , _a )
assert pos_prompts, "You must provide at least one positive prompt."
_A : Optional[Any] = self.process_prompts(_a )
_A : Tuple = self.process_prompts(_a )
if save_final and save_path is None:
_A : List[str] = os.path.join("""./outputs/""" , """_""".join(pos_prompts["""prompts"""] ) )
if not os.path.exists(_a ):
os.makedirs(_a )
else:
_A : Optional[int] = save_path + """_""" + get_timestamp()
os.makedirs(_a )
_A : Any = save_path
_A : Any = self.vqgan.decode(self.latent )[0]
if show_intermediate:
print("""Original Image""" )
show_pil(custom_to_pil(_a ) )
_A : Dict = loop_post_process(_a )
for iter, transformed_img in enumerate(self._optimize_CLIP(_a , _a , _a ) ):
if show_intermediate:
show_pil(_a )
if save_intermediate:
transformed_img.save(os.path.join(self.save_path , F'''iter_{iter:03d}.png''' ) )
if self.log:
wandb.log({"""Image""": wandb.Image(_a )} )
if show_final:
show_pil(_a )
if save_final:
transformed_img.save(os.path.join(self.save_path , F'''iter_{iter:03d}_final.png''' ) )
| 366 |
from __future__ import annotations
from collections.abc import Callable
_snake_case = list[list[float | int]]
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_A : int = len(snake_case_ )
_A : Matrix = [[0 for _ in range(size + 1 )] for _ in range(snake_case_ )]
_A : int
_A : int
_A : int
_A : int
_A : int
_A : float
for row in range(snake_case_ ):
for col in range(snake_case_ ):
_A : Dict = matrix[row][col]
_A : List[Any] = vector[row][0]
_A : List[Any] = 0
_A : Optional[Any] = 0
while row < size and col < size:
# pivoting
_A : Any = max((abs(augmented[rowa][col] ), rowa) for rowa in range(snake_case_,snake_case_ ) )[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
_A , _A : Optional[Any] = augmented[pivot_row], augmented[row]
for rowa in range(row + 1,snake_case_ ):
_A : str = augmented[rowa][col] / augmented[row][col]
_A : List[Any] = 0
for cola in range(col + 1,size + 1 ):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1,snake_case_ ):
for row in range(snake_case_ ):
_A : int = augmented[row][col] / augmented[col][col]
for cola in range(snake_case_,size + 1 ):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row],10 )] for row in range(snake_case_ )
]
def lowerCAmelCase_ ( snake_case_ ):
_A : int = len(snake_case_ )
_A : Matrix = [[0 for _ in range(snake_case_ )] for _ in range(snake_case_ )]
_A : Matrix = [[0] for _ in range(snake_case_ )]
_A : Matrix
_A : int
_A : int
_A : int
for x_val, y_val in enumerate(snake_case_ ):
for col in range(snake_case_ ):
_A : str = (x_val + 1) ** (size - col - 1)
_A : List[str] = y_val
_A : Any = solve(snake_case_,snake_case_ )
def interpolated_func(snake_case_ ) -> int:
return sum(
round(coeffs[x_val][0] ) * (var ** (size - x_val - 1))
for x_val in range(snake_case_ ) )
return interpolated_func
def lowerCAmelCase_ ( snake_case_ ):
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**10
)
def lowerCAmelCase_ ( snake_case_ = question_function,snake_case_ = 10 ):
_A : list[int] = [func(snake_case_ ) for x_val in range(1,order + 1 )]
_A : list[Callable[[int], int]] = [
interpolate(data_points[:max_coeff] ) for max_coeff in range(1,order + 1 )
]
_A : int = 0
_A : Callable[[int], int]
_A : int
for poly in polynomials:
_A : Optional[int] = 1
while func(snake_case_ ) == poly(snake_case_ ):
x_val += 1
ret += poly(snake_case_ )
return ret
if __name__ == "__main__":
print(f"""{solution() = }""")
| 343 | 0 |
def lowerCAmelCase_ ( snake_case_,snake_case_ = " " ):
_A : List[Any] = []
_A : Optional[Any] = 0
for index, char in enumerate(snake_case_ ):
if char == separator:
split_words.append(string[last_index:index] )
_A : str = index + 1
elif index + 1 == len(snake_case_ ):
split_words.append(string[last_index : index + 1] )
return split_words
if __name__ == "__main__":
from doctest import testmod
testmod()
| 367 |
from __future__ import annotations
from collections.abc import Generator
import requests
from bsa import BeautifulSoup
_snake_case = "https://www.indeed.co.in/jobs?q=mobile+app+development&l="
def lowerCAmelCase_ ( snake_case_ = "mumbai" ):
_A : Optional[Any] = BeautifulSoup(requests.get(url + location ).content,"""html.parser""" )
# This attribute finds out all the specifics listed in a job
for job in soup.find_all("""div""",attrs={"""data-tn-component""": """organicJob"""} ):
_A : Tuple = job.find("""a""",attrs={"""data-tn-element""": """jobTitle"""} ).text.strip()
_A : Optional[int] = job.find("""span""",{"""class""": """company"""} ).text.strip()
yield job_title, company_name
if __name__ == "__main__":
for i, job in enumerate(fetch_jobs("Bangalore"), 1):
print(f"""Job {i:>2} is {job[0]} at {job[1]}""")
| 343 | 0 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..bit import BitConfig
_snake_case = logging.get_logger(__name__)
_snake_case = {
"Intel/dpt-large": "https://huggingface.co/Intel/dpt-large/resolve/main/config.json",
# See all DPT models at https://huggingface.co/models?filter=dpt
}
class lowercase ( UpperCamelCase__ ):
_a = "dpt"
def __init__( self , _a=768 , _a=12 , _a=12 , _a=3072 , _a="gelu" , _a=0.0 , _a=0.0 , _a=0.02 , _a=1e-12 , _a=384 , _a=16 , _a=3 , _a=False , _a=True , _a=[2, 5, 8, 11] , _a="project" , _a=[4, 2, 1, 0.5] , _a=[96, 192, 384, 768] , _a=256 , _a=-1 , _a=False , _a=True , _a=0.4 , _a=255 , _a=0.1 , _a=[1, 1024, 24, 24] , _a=[0, 1] , _a=None , **_a , ) -> Dict:
super().__init__(**_a )
_A : List[str] = hidden_size
_A : Tuple = is_hybrid
if self.is_hybrid:
if backbone_config is None:
logger.info("""Initializing the config with a `BiT` backbone.""" )
_A : List[Any] = {
"""global_padding""": """same""",
"""layer_type""": """bottleneck""",
"""depths""": [3, 4, 9],
"""out_features""": ["""stage1""", """stage2""", """stage3"""],
"""embedding_dynamic_padding""": True,
}
_A : str = BitConfig(**_a )
elif isinstance(_a , _a ):
logger.info("""Initializing the config with a `BiT` backbone.""" )
_A : Union[str, Any] = BitConfig(**_a )
elif isinstance(_a , _a ):
_A : Optional[Any] = backbone_config
else:
raise ValueError(
F'''backbone_config must be a dictionary or a `PretrainedConfig`, got {backbone_config.__class__}.''' )
_A : int = backbone_featmap_shape
_A : List[Any] = neck_ignore_stages
if readout_type != "project":
raise ValueError("""Readout type must be 'project' when using `DPT-hybrid` mode.""" )
else:
_A : Tuple = None
_A : Optional[int] = None
_A : Any = []
_A : Union[str, Any] = num_hidden_layers
_A : Optional[int] = num_attention_heads
_A : Optional[int] = intermediate_size
_A : str = hidden_act
_A : List[str] = hidden_dropout_prob
_A : Union[str, Any] = attention_probs_dropout_prob
_A : Any = initializer_range
_A : Union[str, Any] = layer_norm_eps
_A : Any = image_size
_A : List[Any] = patch_size
_A : Tuple = num_channels
_A : int = qkv_bias
_A : str = backbone_out_indices
if readout_type not in ["ignore", "add", "project"]:
raise ValueError("""Readout_type must be one of ['ignore', 'add', 'project']""" )
_A : str = readout_type
_A : int = reassemble_factors
_A : Optional[int] = neck_hidden_sizes
_A : Tuple = fusion_hidden_size
_A : Optional[int] = head_in_index
_A : int = use_batch_norm_in_fusion_residual
# auxiliary head attributes (semantic segmentation)
_A : str = use_auxiliary_head
_A : Optional[int] = auxiliary_loss_weight
_A : Optional[int] = semantic_loss_ignore_index
_A : Dict = semantic_classifier_dropout
def a__ ( self ) -> Optional[Any]:
_A : str = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
_A : List[Any] = self.backbone_config.to_dict()
_A : str = self.__class__.model_type
return output
| 368 |
from __future__ import annotations
from decimal import Decimal
from numpy import array
def lowerCAmelCase_ ( snake_case_ ):
_A : Tuple = Decimal
# Check if the provided matrix has 2 rows and 2 columns
# since this implementation only works for 2x2 matrices
if len(snake_case_ ) == 2 and len(matrix[0] ) == 2 and len(matrix[1] ) == 2:
# Calculate the determinant of the matrix
_A : List[Any] = float(
d(matrix[0][0] ) * d(matrix[1][1] ) - d(matrix[1][0] ) * d(matrix[0][1] ) )
if determinant == 0:
raise ValueError("""This matrix has no inverse.""" )
# Creates a copy of the matrix with swapped positions of the elements
_A : Tuple = [[0.0, 0.0], [0.0, 0.0]]
_A , _A : List[str] = matrix[1][1], matrix[0][0]
_A , _A : List[str] = -matrix[1][0], -matrix[0][1]
# Calculate the inverse of the matrix
return [
[(float(d(snake_case_ ) ) / determinant) or 0.0 for n in row] for row in swapped_matrix
]
elif (
len(snake_case_ ) == 3
and len(matrix[0] ) == 3
and len(matrix[1] ) == 3
and len(matrix[2] ) == 3
):
# Calculate the determinant of the matrix using Sarrus rule
_A : List[str] = float(
(
(d(matrix[0][0] ) * d(matrix[1][1] ) * d(matrix[2][2] ))
+ (d(matrix[0][1] ) * d(matrix[1][2] ) * d(matrix[2][0] ))
+ (d(matrix[0][2] ) * d(matrix[1][0] ) * d(matrix[2][1] ))
)
- (
(d(matrix[0][2] ) * d(matrix[1][1] ) * d(matrix[2][0] ))
+ (d(matrix[0][1] ) * d(matrix[1][0] ) * d(matrix[2][2] ))
+ (d(matrix[0][0] ) * d(matrix[1][2] ) * d(matrix[2][1] ))
) )
if determinant == 0:
raise ValueError("""This matrix has no inverse.""" )
# Creating cofactor matrix
_A : List[Any] = [
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
]
_A : Union[str, Any] = (d(matrix[1][1] ) * d(matrix[2][2] )) - (
d(matrix[1][2] ) * d(matrix[2][1] )
)
_A : Optional[Any] = -(
(d(matrix[1][0] ) * d(matrix[2][2] )) - (d(matrix[1][2] ) * d(matrix[2][0] ))
)
_A : Any = (d(matrix[1][0] ) * d(matrix[2][1] )) - (
d(matrix[1][1] ) * d(matrix[2][0] )
)
_A : List[Any] = -(
(d(matrix[0][1] ) * d(matrix[2][2] )) - (d(matrix[0][2] ) * d(matrix[2][1] ))
)
_A : int = (d(matrix[0][0] ) * d(matrix[2][2] )) - (
d(matrix[0][2] ) * d(matrix[2][0] )
)
_A : Union[str, Any] = -(
(d(matrix[0][0] ) * d(matrix[2][1] )) - (d(matrix[0][1] ) * d(matrix[2][0] ))
)
_A : Any = (d(matrix[0][1] ) * d(matrix[1][2] )) - (
d(matrix[0][2] ) * d(matrix[1][1] )
)
_A : List[str] = -(
(d(matrix[0][0] ) * d(matrix[1][2] )) - (d(matrix[0][2] ) * d(matrix[1][0] ))
)
_A : Optional[int] = (d(matrix[0][0] ) * d(matrix[1][1] )) - (
d(matrix[0][1] ) * d(matrix[1][0] )
)
# Transpose the cofactor matrix (Adjoint matrix)
_A : List[Any] = array(snake_case_ )
for i in range(3 ):
for j in range(3 ):
_A : List[str] = cofactor_matrix[j][i]
# Inverse of the matrix using the formula (1/determinant) * adjoint matrix
_A : Union[str, Any] = array(snake_case_ )
for i in range(3 ):
for j in range(3 ):
inverse_matrix[i][j] /= d(snake_case_ )
# Calculate the inverse of the matrix
return [[float(d(snake_case_ ) ) or 0.0 for n in row] for row in inverse_matrix]
raise ValueError("""Please provide a matrix of size 2x2 or 3x3.""" )
| 343 | 0 |
"""simple docstring"""
from __future__ import annotations
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_A : int = sorted(numsa + numsa )
_A : Optional[int] = divmod(len(snake_case_ ),2 )
if mod == 1:
return all_numbers[div]
else:
return (all_numbers[div] + all_numbers[div - 1]) / 2
if __name__ == "__main__":
import doctest
doctest.testmod()
_snake_case = [float(x) for x in input("Enter the elements of first array: ").split()]
_snake_case = [float(x) for x in input("Enter the elements of second array: ").split()]
print(f"""The median of two arrays is: {median_of_two_arrays(array_a, array_a)}""")
| 369 |
from dataclasses import dataclass
from typing import Dict, Optional, Union
import torch
import torch.nn.functional as F
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .attention_processor import AttentionProcessor, AttnProcessor
from .embeddings import TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
@dataclass
class lowercase ( UpperCamelCase__ ):
_a = 42
class lowercase ( UpperCamelCase__,UpperCamelCase__ ):
@register_to_config
def __init__( self , _a = 32 , _a = 64 , _a = 20 , _a = 768 , _a=77 , _a=4 , _a = 0.0 , _a = "silu" , _a = None , _a = None , _a = "linear" , _a = "prd" , _a = None , _a = None , _a = None , ) -> Any:
super().__init__()
_A : int = num_attention_heads
_A : Union[str, Any] = attention_head_dim
_A : Tuple = num_attention_heads * attention_head_dim
_A : Any = additional_embeddings
_A : Any = time_embed_dim or inner_dim
_A : List[str] = embedding_proj_dim or embedding_dim
_A : Optional[int] = clip_embed_dim or embedding_dim
_A : Union[str, Any] = Timesteps(_a , _a , 0 )
_A : str = TimestepEmbedding(_a , _a , out_dim=_a , act_fn=_a )
_A : Dict = nn.Linear(_a , _a )
if embedding_proj_norm_type is None:
_A : int = None
elif embedding_proj_norm_type == "layer":
_A : Optional[Any] = nn.LayerNorm(_a )
else:
raise ValueError(F'''unsupported embedding_proj_norm_type: {embedding_proj_norm_type}''' )
_A : Optional[Any] = nn.Linear(_a , _a )
if encoder_hid_proj_type is None:
_A : Union[str, Any] = None
elif encoder_hid_proj_type == "linear":
_A : Tuple = nn.Linear(_a , _a )
else:
raise ValueError(F'''unsupported encoder_hid_proj_type: {encoder_hid_proj_type}''' )
_A : List[str] = nn.Parameter(torch.zeros(1 , num_embeddings + additional_embeddings , _a ) )
if added_emb_type == "prd":
_A : str = nn.Parameter(torch.zeros(1 , 1 , _a ) )
elif added_emb_type is None:
_A : Union[str, Any] = None
else:
raise ValueError(
F'''`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `\'prd\'` or `None`.''' )
_A : int = nn.ModuleList(
[
BasicTransformerBlock(
_a , _a , _a , dropout=_a , activation_fn="""gelu""" , attention_bias=_a , )
for d in range(_a )
] )
if norm_in_type == "layer":
_A : Union[str, Any] = nn.LayerNorm(_a )
elif norm_in_type is None:
_A : Tuple = None
else:
raise ValueError(F'''Unsupported norm_in_type: {norm_in_type}.''' )
_A : int = nn.LayerNorm(_a )
_A : str = nn.Linear(_a , _a )
_A : Any = torch.full(
[num_embeddings + additional_embeddings, num_embeddings + additional_embeddings] , -10000.0 )
causal_attention_mask.triu_(1 )
_A : Optional[int] = causal_attention_mask[None, ...]
self.register_buffer("""causal_attention_mask""" , _a , persistent=_a )
_A : Tuple = nn.Parameter(torch.zeros(1 , _a ) )
_A : Dict = nn.Parameter(torch.zeros(1 , _a ) )
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def a__ ( self ) -> Dict[str, AttentionProcessor]:
_A : List[str] = {}
def fn_recursive_add_processors(_a , _a , _a ):
if hasattr(_a , """set_processor""" ):
_A : Tuple = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(F'''{name}.{sub_name}''' , _a , _a )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(_a , _a , _a )
return processors
def a__ ( self , _a ) -> List[str]:
_A : Optional[int] = len(self.attn_processors.keys() )
if isinstance(_a , _a ) and len(_a ) != count:
raise ValueError(
F'''A dict of processors was passed, but the number of processors {len(_a )} does not match the'''
F''' number of attention layers: {count}. Please make sure to pass {count} processor classes.''' )
def fn_recursive_attn_processor(_a , _a , _a ):
if hasattr(_a , """set_processor""" ):
if not isinstance(_a , _a ):
module.set_processor(_a )
else:
module.set_processor(processor.pop(F'''{name}.processor''' ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(F'''{name}.{sub_name}''' , _a , _a )
for name, module in self.named_children():
fn_recursive_attn_processor(_a , _a , _a )
def a__ ( self ) -> Union[str, Any]:
self.set_attn_processor(AttnProcessor() )
def a__ ( self , _a , _a , _a , _a = None , _a = None , _a = True , ) -> Optional[Any]:
_A : Tuple = hidden_states.shape[0]
_A : List[Any] = timestep
if not torch.is_tensor(_a ):
_A : Dict = torch.tensor([timesteps] , dtype=torch.long , device=hidden_states.device )
elif torch.is_tensor(_a ) and len(timesteps.shape ) == 0:
_A : Tuple = timesteps[None].to(hidden_states.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
_A : Optional[int] = timesteps * torch.ones(_a , dtype=timesteps.dtype , device=timesteps.device )
_A : Dict = self.time_proj(_a )
# timesteps does not contain any weights and will always return f32 tensors
# but time_embedding might be fp16, so we need to cast here.
_A : Tuple = timesteps_projected.to(dtype=self.dtype )
_A : List[Any] = self.time_embedding(_a )
if self.embedding_proj_norm is not None:
_A : Dict = self.embedding_proj_norm(_a )
_A : List[Any] = self.embedding_proj(_a )
if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None:
_A : List[Any] = self.encoder_hidden_states_proj(_a )
elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None:
raise ValueError("""`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set""" )
_A : Optional[int] = self.proj_in(_a )
_A : Optional[int] = self.positional_embedding.to(hidden_states.dtype )
_A : Union[str, Any] = []
_A : List[str] = 0
if encoder_hidden_states is not None:
additional_embeds.append(_a )
additional_embeddings_len += encoder_hidden_states.shape[1]
if len(proj_embeddings.shape ) == 2:
_A : List[str] = proj_embeddings[:, None, :]
if len(hidden_states.shape ) == 2:
_A : List[str] = hidden_states[:, None, :]
_A : Dict = additional_embeds + [
proj_embeddings,
time_embeddings[:, None, :],
hidden_states,
]
if self.prd_embedding is not None:
_A : Optional[int] = self.prd_embedding.to(hidden_states.dtype ).expand(_a , -1 , -1 )
additional_embeds.append(_a )
_A : str = torch.cat(
_a , dim=1 , )
# Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens
_A : Dict = additional_embeddings_len + proj_embeddings.shape[1] + 1
if positional_embeddings.shape[1] < hidden_states.shape[1]:
_A : Union[str, Any] = F.pad(
_a , (
0,
0,
additional_embeddings_len,
self.prd_embedding.shape[1] if self.prd_embedding is not None else 0,
) , value=0.0 , )
_A : Optional[Any] = hidden_states + positional_embeddings
if attention_mask is not None:
_A : Optional[Any] = (1 - attention_mask.to(hidden_states.dtype )) * -10000.0
_A : List[Any] = F.pad(_a , (0, self.additional_embeddings) , value=0.0 )
_A : Optional[Any] = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype )
_A : int = attention_mask.repeat_interleave(self.config.num_attention_heads , dim=0 )
if self.norm_in is not None:
_A : str = self.norm_in(_a )
for block in self.transformer_blocks:
_A : List[Any] = block(_a , attention_mask=_a )
_A : Any = self.norm_out(_a )
if self.prd_embedding is not None:
_A : int = hidden_states[:, -1]
else:
_A : Any = hidden_states[:, additional_embeddings_len:]
_A : Union[str, Any] = self.proj_to_clip_embeddings(_a )
if not return_dict:
return (predicted_image_embedding,)
return PriorTransformerOutput(predicted_image_embedding=_a )
def a__ ( self , _a ) -> Tuple:
_A : List[Any] = (prior_latents * self.clip_std) + self.clip_mean
return prior_latents
| 343 | 0 |
from __future__ import annotations
import json
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
_snake_case = {"UserAgent": UserAgent().random}
def lowerCAmelCase_ ( snake_case_ ):
_A : Tuple = script.contents[0]
_A : Tuple = json.loads(data[data.find("""{\"config\"""" ) : -1] )
return info["entry_data"]["ProfilePage"][0]["graphql"]["user"]
class lowercase :
def __init__( self , _a ) -> Union[str, Any]:
_A : Optional[int] = F'''https://www.instagram.com/{username}/'''
_A : Union[str, Any] = self.get_json()
def a__ ( self ) -> dict:
_A : Tuple = requests.get(self.url , headers=_a ).text
_A : Dict = BeautifulSoup(_a , """html.parser""" ).find_all("""script""" )
try:
return extract_user_profile(scripts[4] )
except (json.decoder.JSONDecodeError, KeyError):
return extract_user_profile(scripts[3] )
def __repr__( self ) -> str:
return F'''{self.__class__.__name__}(\'{self.username}\')'''
def __str__( self ) -> str:
return F'''{self.fullname} ({self.username}) is {self.biography}'''
@property
def a__ ( self ) -> str:
return self.user_data["username"]
@property
def a__ ( self ) -> str:
return self.user_data["full_name"]
@property
def a__ ( self ) -> str:
return self.user_data["biography"]
@property
def a__ ( self ) -> str:
return self.user_data["business_email"]
@property
def a__ ( self ) -> str:
return self.user_data["external_url"]
@property
def a__ ( self ) -> int:
return self.user_data["edge_followed_by"]["count"]
@property
def a__ ( self ) -> int:
return self.user_data["edge_follow"]["count"]
@property
def a__ ( self ) -> int:
return self.user_data["edge_owner_to_timeline_media"]["count"]
@property
def a__ ( self ) -> str:
return self.user_data["profile_pic_url_hd"]
@property
def a__ ( self ) -> bool:
return self.user_data["is_verified"]
@property
def a__ ( self ) -> bool:
return self.user_data["is_private"]
def lowerCAmelCase_ ( snake_case_ = "github" ):
import os
if os.environ.get("""CI""" ):
return # test failing on GitHub Actions
_A : Tuple = InstagramUser(snake_case_ )
assert instagram_user.user_data
assert isinstance(instagram_user.user_data,snake_case_ )
assert instagram_user.username == username
if username != "github":
return
assert instagram_user.fullname == "GitHub"
assert instagram_user.biography == "Built for developers."
assert instagram_user.number_of_posts > 150
assert instagram_user.number_of_followers > 120000
assert instagram_user.number_of_followings > 15
assert instagram_user.email == "[email protected]"
assert instagram_user.website == "https://github.com/readme"
assert instagram_user.profile_picture_url.startswith("""https://instagram.""" )
assert instagram_user.is_verified is True
assert instagram_user.is_private is False
if __name__ == "__main__":
import doctest
doctest.testmod()
_snake_case = InstagramUser("github")
print(instagram_user)
print(f"""{instagram_user.number_of_posts = }""")
print(f"""{instagram_user.number_of_followers = }""")
print(f"""{instagram_user.number_of_followings = }""")
print(f"""{instagram_user.email = }""")
print(f"""{instagram_user.website = }""")
print(f"""{instagram_user.profile_picture_url = }""")
print(f"""{instagram_user.is_verified = }""")
print(f"""{instagram_user.is_private = }""")
| 370 |
import argparse
import json
import math
import os
import time
import traceback
import zipfile
from collections import Counter
import requests
def lowerCAmelCase_ ( snake_case_,snake_case_=None ):
_A : Any = None
if token is not None:
_A : int = {"""Accept""": """application/vnd.github+json""", """Authorization""": f'''Bearer {token}'''}
_A : Any = f'''https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100'''
_A : Union[str, Any] = requests.get(snake_case_,headers=snake_case_ ).json()
_A : str = {}
try:
job_links.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
_A : int = math.ceil((result["""total_count"""] - 100) / 100 )
for i in range(snake_case_ ):
_A : List[str] = requests.get(url + f'''&page={i + 2}''',headers=snake_case_ ).json()
job_links.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
return job_links
except Exception:
print(f'''Unknown error, could not fetch links:\n{traceback.format_exc()}''' )
return {}
def lowerCAmelCase_ ( snake_case_,snake_case_=None ):
_A : int = None
if token is not None:
_A : List[str] = {"""Accept""": """application/vnd.github+json""", """Authorization""": f'''Bearer {token}'''}
_A : str = f'''https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100'''
_A : Optional[Any] = requests.get(snake_case_,headers=snake_case_ ).json()
_A : Any = {}
try:
artifacts.update({artifact["""name"""]: artifact["""archive_download_url"""] for artifact in result["""artifacts"""]} )
_A : Tuple = math.ceil((result["""total_count"""] - 100) / 100 )
for i in range(snake_case_ ):
_A : List[Any] = requests.get(url + f'''&page={i + 2}''',headers=snake_case_ ).json()
artifacts.update({artifact["""name"""]: artifact["""archive_download_url"""] for artifact in result["""artifacts"""]} )
return artifacts
except Exception:
print(f'''Unknown error, could not fetch links:\n{traceback.format_exc()}''' )
return {}
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_,snake_case_ ):
_A : Dict = None
if token is not None:
_A : int = {"""Accept""": """application/vnd.github+json""", """Authorization""": f'''Bearer {token}'''}
_A : Tuple = requests.get(snake_case_,headers=snake_case_,allow_redirects=snake_case_ )
_A : Tuple = result.headers["""Location"""]
_A : Union[str, Any] = requests.get(snake_case_,allow_redirects=snake_case_ )
_A : Dict = os.path.join(snake_case_,f'''{artifact_name}.zip''' )
with open(snake_case_,"""wb""" ) as fp:
fp.write(response.content )
def lowerCAmelCase_ ( snake_case_,snake_case_=None ):
_A : List[str] = []
_A : int = []
_A : Tuple = None
with zipfile.ZipFile(snake_case_ ) as z:
for filename in z.namelist():
if not os.path.isdir(snake_case_ ):
# read the file
if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]:
with z.open(snake_case_ ) as f:
for line in f:
_A : Any = line.decode("""UTF-8""" ).strip()
if filename == "failures_line.txt":
try:
# `error_line` is the place where `error` occurs
_A : Dict = line[: line.index(""": """ )]
_A : Dict = line[line.index(""": """ ) + len(""": """ ) :]
errors.append([error_line, error] )
except Exception:
# skip un-related lines
pass
elif filename == "summary_short.txt" and line.startswith("""FAILED """ ):
# `test` is the test method that failed
_A : List[str] = line[len("""FAILED """ ) :]
failed_tests.append(snake_case_ )
elif filename == "job_name.txt":
_A : Optional[int] = line
if len(snake_case_ ) != len(snake_case_ ):
raise ValueError(
f'''`errors` and `failed_tests` should have the same number of elements. Got {len(snake_case_ )} for `errors` '''
f'''and {len(snake_case_ )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some'''
""" problem.""" )
_A : Any = None
if job_name and job_links:
_A : Dict = job_links.get(snake_case_,snake_case_ )
# A list with elements of the form (line of error, error, failed test)
_A : Optional[int] = [x + [y] + [job_link] for x, y in zip(snake_case_,snake_case_ )]
return result
def lowerCAmelCase_ ( snake_case_,snake_case_=None ):
_A : Dict = []
_A : Optional[int] = [os.path.join(snake_case_,snake_case_ ) for p in os.listdir(snake_case_ ) if p.endswith(""".zip""" )]
for p in paths:
errors.extend(get_errors_from_single_artifact(snake_case_,job_links=snake_case_ ) )
return errors
def lowerCAmelCase_ ( snake_case_,snake_case_=None ):
_A : Dict = Counter()
counter.update([x[1] for x in logs] )
_A : Tuple = counter.most_common()
_A : Tuple = {}
for error, count in counts:
if error_filter is None or error not in error_filter:
_A : str = {"""count""": count, """failed_tests""": [(x[2], x[0]) for x in logs if x[1] == error]}
_A : Union[str, Any] = dict(sorted(r.items(),key=lambda snake_case_ : item[1]["count"],reverse=snake_case_ ) )
return r
def lowerCAmelCase_ ( snake_case_ ):
_A : Union[str, Any] = test.split("""::""" )[0]
if test.startswith("""tests/models/""" ):
_A : Dict = test.split("""/""" )[2]
else:
_A : str = None
return test
def lowerCAmelCase_ ( snake_case_,snake_case_=None ):
_A : str = [(x[0], x[1], get_model(x[2] )) for x in logs]
_A : Union[str, Any] = [x for x in logs if x[2] is not None]
_A : Optional[Any] = {x[2] for x in logs}
_A : List[Any] = {}
for test in tests:
_A : Any = Counter()
# count by errors in `test`
counter.update([x[1] for x in logs if x[2] == test] )
_A : Union[str, Any] = counter.most_common()
_A : Any = {error: count for error, count in counts if (error_filter is None or error not in error_filter)}
_A : str = sum(error_counts.values() )
if n_errors > 0:
_A : Optional[int] = {"""count""": n_errors, """errors""": error_counts}
_A : Union[str, Any] = dict(sorted(r.items(),key=lambda snake_case_ : item[1]["count"],reverse=snake_case_ ) )
return r
def lowerCAmelCase_ ( snake_case_ ):
_A : Optional[int] = """| no. | error | status |"""
_A : List[Any] = """|-:|:-|:-|"""
_A : List[Any] = [header, sep]
for error in reduced_by_error:
_A : List[str] = reduced_by_error[error]["""count"""]
_A : List[Any] = f'''| {count} | {error[:100]} | |'''
lines.append(snake_case_ )
return "\n".join(snake_case_ )
def lowerCAmelCase_ ( snake_case_ ):
_A : List[Any] = """| model | no. of errors | major error | count |"""
_A : Optional[Any] = """|-:|-:|-:|-:|"""
_A : Union[str, Any] = [header, sep]
for model in reduced_by_model:
_A : Dict = reduced_by_model[model]["""count"""]
_A , _A : str = list(reduced_by_model[model]["""errors"""].items() )[0]
_A : Union[str, Any] = f'''| {model} | {count} | {error[:60]} | {_count} |'''
lines.append(snake_case_ )
return "\n".join(snake_case_ )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--workflow_run_id", type=str, required=True, help="A GitHub Actions workflow run id.")
parser.add_argument(
"--output_dir",
type=str,
required=True,
help="Where to store the downloaded artifacts and other result files.",
)
parser.add_argument("--token", default=None, type=str, help="A token that has actions:read permission.")
_snake_case = parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
_snake_case = get_job_links(args.workflow_run_id, token=args.token)
_snake_case = {}
# To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee.
# For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`.
if _job_links:
for k, v in _job_links.items():
# This is how GitHub actions combine job names.
if " / " in k:
_snake_case = k.find(" / ")
_snake_case = k[index + len(" / ") :]
_snake_case = v
with open(os.path.join(args.output_dir, "job_links.json"), "w", encoding="UTF-8") as fp:
json.dump(job_links, fp, ensure_ascii=False, indent=4)
_snake_case = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, "artifacts.json"), "w", encoding="UTF-8") as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
for idx, (name, url) in enumerate(artifacts.items()):
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
_snake_case = get_all_errors(args.output_dir, job_links=job_links)
# `e[1]` is the error
_snake_case = Counter()
counter.update([e[1] for e in errors])
# print the top 30 most common test errors
_snake_case = counter.most_common(30)
for item in most_common:
print(item)
with open(os.path.join(args.output_dir, "errors.json"), "w", encoding="UTF-8") as fp:
json.dump(errors, fp, ensure_ascii=False, indent=4)
_snake_case = reduce_by_error(errors)
_snake_case = reduce_by_model(errors)
_snake_case = make_github_table(reduced_by_error)
_snake_case = make_github_table_per_model(reduced_by_model)
with open(os.path.join(args.output_dir, "reduced_by_error.txt"), "w", encoding="UTF-8") as fp:
fp.write(sa)
with open(os.path.join(args.output_dir, "reduced_by_model.txt"), "w", encoding="UTF-8") as fp:
fp.write(sa)
| 343 | 0 |
import torch
from transformers import PreTrainedModel, XLMRobertaConfig, XLMRobertaModel
class lowercase ( UpperCamelCase__ ):
"""simple docstring"""
_a = "M-CLIP"
def __init__( self , _a=1024 , _a=768 , **_a ) -> Optional[int]:
_A : str = transformerDimSize
_A : Any = imageDimSize
super().__init__(**_a )
class lowercase ( UpperCamelCase__ ):
"""simple docstring"""
_a = MCLIPConfig
def __init__( self , _a , *_a , **_a ) -> str:
super().__init__(_a , *_a , **_a )
_A : Optional[int] = XLMRobertaModel(_a )
_A : int = torch.nn.Linear(
in_features=config.transformerDimensions , out_features=config.numDims )
def a__ ( self , _a , _a ) -> int:
_A : Optional[Any] = self.transformer(input_ids=_a , attention_mask=_a )[0]
_A : Dict = (embs * attention_mask.unsqueeze(2 )).sum(dim=1 ) / attention_mask.sum(dim=1 )[:, None]
return self.LinearTransformation(_a ), embs
| 371 |
import unittest
from accelerate import debug_launcher
from accelerate.test_utils import require_cpu, test_ops, test_script
@require_cpu
class lowercase ( unittest.TestCase ):
def a__ ( self ) -> List[str]:
debug_launcher(test_script.main )
def a__ ( self ) -> Any:
debug_launcher(test_ops.main )
| 343 | 0 |
import unittest
from transformers import SPIECE_UNDERLINE, XLNetTokenizer, XLNetTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
UpperCamelCase_ = get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
@require_tokenizers
class a_ ( _snake_case , unittest.TestCase ):
UpperCamelCase__ : str =XLNetTokenizer
UpperCamelCase__ : List[str] =XLNetTokenizerFast
UpperCamelCase__ : Tuple =True
UpperCamelCase__ : Dict =True
def __a ( self :List[str]) -> Dict:
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase_ = XLNetTokenizer(_lowercase , keep_accents=_lowercase)
tokenizer.sanitize_special_tokens()
tokenizer.save_pretrained(self.tmpdirname)
def __a ( self :Optional[Any]) -> Union[str, Any]:
UpperCAmelCase_ = '''<s>'''
UpperCAmelCase_ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_lowercase) , _lowercase)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_lowercase) , _lowercase)
def __a ( self :str) -> Dict:
UpperCAmelCase_ = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , '''<unk>''')
self.assertEqual(vocab_keys[1] , '''<s>''')
self.assertEqual(vocab_keys[-1] , '''<eod>''')
self.assertEqual(len(_lowercase) , 1006)
def __a ( self :Union[str, Any]) -> List[str]:
self.assertEqual(self.get_tokenizer().vocab_size , 1000)
def __a ( self :int) -> List[str]:
UpperCAmelCase_ = XLNetTokenizer(_lowercase , keep_accents=_lowercase)
UpperCAmelCase_ = tokenizer.tokenize('''This is a test''')
self.assertListEqual(_lowercase , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''])
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowercase) , [285, 46, 10, 170, 382])
UpperCAmelCase_ = tokenizer.tokenize('''I was born in 92000, and this is falsé.''')
self.assertListEqual(
_lowercase , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
UpperCAmelCase_ = tokenizer.convert_tokens_to_ids(_lowercase)
self.assertListEqual(_lowercase , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4])
UpperCAmelCase_ = tokenizer.convert_ids_to_tokens(_lowercase)
self.assertListEqual(
_lowercase , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
def __a ( self :Tuple) -> List[str]:
UpperCAmelCase_ = XLNetTokenizer(_lowercase , do_lower_case=_lowercase)
UpperCAmelCase_ = tokenizer.tokenize('''I was born in 92000, and this is falsé.''')
self.assertListEqual(
_lowercase , [
SPIECE_UNDERLINE + '''''',
'''i''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''se''',
'''.''',
] , )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''') , ['''▁he''', '''ll''', '''o'''])
def __a ( self :str) -> List[Any]:
UpperCAmelCase_ = XLNetTokenizer(_lowercase , do_lower_case=_lowercase)
UpperCAmelCase_ = tokenizer.tokenize('''I was born in 92000, and this is falsé.''')
self.assertListEqual(
_lowercase , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''se''',
'''.''',
] , )
@slow
def __a ( self :Any) -> str:
UpperCAmelCase_ = XLNetTokenizer.from_pretrained('''xlnet-base-cased''')
UpperCAmelCase_ = tokenizer.encode('''sequence builders''' , add_special_tokens=_lowercase)
UpperCAmelCase_ = tokenizer.encode('''multi-sequence build''' , add_special_tokens=_lowercase)
UpperCAmelCase_ = tokenizer.build_inputs_with_special_tokens(_lowercase)
UpperCAmelCase_ = tokenizer.build_inputs_with_special_tokens(_lowercase , _lowercase)
assert encoded_sentence == text + [4, 3]
assert encoded_pair == text + [4] + text_a + [4, 3]
@slow
def __a ( self :str) -> int:
# fmt: off
UpperCAmelCase_ = {'''input_ids''': [[17, 21442, 270, 17, 10, 14645, 318, 34, 17, 4546, 3145, 787, 13, 7752, 22018, 23, 21, 17, 4546, 3145, 787, 13, 3352, 14431, 13, 5500, 11, 1176, 580, 13, 16819, 4797, 23, 17, 10, 17135, 658, 19, 457, 7932, 13, 184, 19, 3154, 17135, 6468, 19, 1404, 12269, 19, 4229, 5356, 16264, 46, 19, 17, 20545, 10395, 9, 9, 9, 11, 28, 6421, 9531, 20729, 17, 10, 353, 17022, 11, 21, 6421, 9531, 16949, 17, 10, 11509, 753, 11, 33, 95, 2421, 7385, 956, 14431, 2626, 25, 842, 7385, 4836, 21, 1429, 2272, 9855, 3120, 161, 24738, 19, 13203, 658, 218, 787, 21, 430, 18482, 847, 2637, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 322, 22178, 27, 1064, 22, 956, 13, 11101, 1429, 5854, 24313, 18953, 40, 422, 24366, 68, 1758, 37, 10483, 14257, 31, 207, 263, 21, 203, 3773, 25, 71, 9735, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 32, 2049, 3442, 17, 13894, 3380, 23, 95, 18, 17634, 2288, 9, 4, 3]], '''token_type_ids''': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_lowercase , model_name='''xlnet-base-cased''' , revision='''c841166438c31ec7ca9a106dee7bb312b73ae511''' , )
| 344 |
def A ( __UpperCAmelCase = 100_0000 ) -> int:
'''simple docstring'''
UpperCAmelCase_ = [i - 1 for i in range(limit + 1 )]
for i in range(2 , limit + 1 ):
if phi[i] == i - 1:
for j in range(2 * i , limit + 1 , __UpperCAmelCase ):
phi[j] -= phi[j] // i
return sum(phi[2 : limit + 1] )
if __name__ == "__main__":
print(solution())
| 344 | 1 |
from ....configuration_utils import PretrainedConfig
from ....utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
"Visual-Attention-Network/van-base": (
"https://huggingface.co/Visual-Attention-Network/van-base/blob/main/config.json"
),
}
class a_ ( _snake_case ):
UpperCamelCase__ : List[Any] ="van"
def __init__( self :Any , _lowercase :Union[str, Any]=224 , _lowercase :int=3 , _lowercase :List[Any]=[7, 3, 3, 3] , _lowercase :Optional[Any]=[4, 2, 2, 2] , _lowercase :Optional[int]=[64, 128, 320, 512] , _lowercase :Optional[Any]=[3, 3, 12, 3] , _lowercase :Dict=[8, 8, 4, 4] , _lowercase :Optional[int]="gelu" , _lowercase :str=0.02 , _lowercase :Optional[int]=1E-6 , _lowercase :Optional[Any]=1E-2 , _lowercase :Union[str, Any]=0.0 , _lowercase :Tuple=0.0 , **_lowercase :List[Any] , ) -> Optional[Any]:
super().__init__(**_lowercase)
UpperCAmelCase_ = image_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = patch_sizes
UpperCAmelCase_ = strides
UpperCAmelCase_ = hidden_sizes
UpperCAmelCase_ = depths
UpperCAmelCase_ = mlp_ratios
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = layer_norm_eps
UpperCAmelCase_ = layer_scale_init_value
UpperCAmelCase_ = drop_path_rate
UpperCAmelCase_ = dropout_rate
| 344 |
import tempfile
import torch
from diffusers import PNDMScheduler
from .test_schedulers import SchedulerCommonTest
class a_ ( _snake_case ):
UpperCamelCase__ : List[Any] =(PNDMScheduler,)
UpperCamelCase__ : Optional[Any] =(("num_inference_steps", 50),)
def __a ( self :Union[str, Any] , **_lowercase :Any) -> Union[str, Any]:
UpperCAmelCase_ = {
'''num_train_timesteps''': 1000,
'''beta_start''': 0.0_001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
}
config.update(**_lowercase)
return config
def __a ( self :str , _lowercase :List[Any]=0 , **_lowercase :str) -> Union[str, Any]:
UpperCAmelCase_ = dict(self.forward_default_kwargs)
UpperCAmelCase_ = kwargs.pop('''num_inference_steps''' , _lowercase)
UpperCAmelCase_ = self.dummy_sample
UpperCAmelCase_ = 0.1 * sample
UpperCAmelCase_ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
UpperCAmelCase_ = self.get_scheduler_config(**_lowercase)
UpperCAmelCase_ = scheduler_class(**_lowercase)
scheduler.set_timesteps(_lowercase)
# copy over dummy past residuals
UpperCAmelCase_ = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_lowercase)
UpperCAmelCase_ = scheduler_class.from_pretrained(_lowercase)
new_scheduler.set_timesteps(_lowercase)
# copy over dummy past residuals
UpperCAmelCase_ = dummy_past_residuals[:]
UpperCAmelCase_ = scheduler.step_prk(_lowercase , _lowercase , _lowercase , **_lowercase).prev_sample
UpperCAmelCase_ = new_scheduler.step_prk(_lowercase , _lowercase , _lowercase , **_lowercase).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical"
UpperCAmelCase_ = scheduler.step_plms(_lowercase , _lowercase , _lowercase , **_lowercase).prev_sample
UpperCAmelCase_ = new_scheduler.step_plms(_lowercase , _lowercase , _lowercase , **_lowercase).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical"
def __a ( self :Any) -> Optional[Any]:
pass
def __a ( self :str , _lowercase :int=0 , **_lowercase :Union[str, Any]) -> List[Any]:
UpperCAmelCase_ = dict(self.forward_default_kwargs)
UpperCAmelCase_ = kwargs.pop('''num_inference_steps''' , _lowercase)
UpperCAmelCase_ = self.dummy_sample
UpperCAmelCase_ = 0.1 * sample
UpperCAmelCase_ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
UpperCAmelCase_ = self.get_scheduler_config()
UpperCAmelCase_ = scheduler_class(**_lowercase)
scheduler.set_timesteps(_lowercase)
# copy over dummy past residuals (must be after setting timesteps)
UpperCAmelCase_ = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_lowercase)
UpperCAmelCase_ = scheduler_class.from_pretrained(_lowercase)
# copy over dummy past residuals
new_scheduler.set_timesteps(_lowercase)
# copy over dummy past residual (must be after setting timesteps)
UpperCAmelCase_ = dummy_past_residuals[:]
UpperCAmelCase_ = scheduler.step_prk(_lowercase , _lowercase , _lowercase , **_lowercase).prev_sample
UpperCAmelCase_ = new_scheduler.step_prk(_lowercase , _lowercase , _lowercase , **_lowercase).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical"
UpperCAmelCase_ = scheduler.step_plms(_lowercase , _lowercase , _lowercase , **_lowercase).prev_sample
UpperCAmelCase_ = new_scheduler.step_plms(_lowercase , _lowercase , _lowercase , **_lowercase).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical"
def __a ( self :int , **_lowercase :str) -> Optional[Any]:
UpperCAmelCase_ = self.scheduler_classes[0]
UpperCAmelCase_ = self.get_scheduler_config(**_lowercase)
UpperCAmelCase_ = scheduler_class(**_lowercase)
UpperCAmelCase_ = 10
UpperCAmelCase_ = self.dummy_model()
UpperCAmelCase_ = self.dummy_sample_deter
scheduler.set_timesteps(_lowercase)
for i, t in enumerate(scheduler.prk_timesteps):
UpperCAmelCase_ = model(_lowercase , _lowercase)
UpperCAmelCase_ = scheduler.step_prk(_lowercase , _lowercase , _lowercase).prev_sample
for i, t in enumerate(scheduler.plms_timesteps):
UpperCAmelCase_ = model(_lowercase , _lowercase)
UpperCAmelCase_ = scheduler.step_plms(_lowercase , _lowercase , _lowercase).prev_sample
return sample
def __a ( self :Union[str, Any]) -> int:
UpperCAmelCase_ = dict(self.forward_default_kwargs)
UpperCAmelCase_ = kwargs.pop('''num_inference_steps''' , _lowercase)
for scheduler_class in self.scheduler_classes:
UpperCAmelCase_ = self.get_scheduler_config()
UpperCAmelCase_ = scheduler_class(**_lowercase)
UpperCAmelCase_ = self.dummy_sample
UpperCAmelCase_ = 0.1 * sample
if num_inference_steps is not None and hasattr(_lowercase , '''set_timesteps'''):
scheduler.set_timesteps(_lowercase)
elif num_inference_steps is not None and not hasattr(_lowercase , '''set_timesteps'''):
UpperCAmelCase_ = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
UpperCAmelCase_ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
UpperCAmelCase_ = dummy_past_residuals[:]
UpperCAmelCase_ = scheduler.step_prk(_lowercase , 0 , _lowercase , **_lowercase).prev_sample
UpperCAmelCase_ = scheduler.step_prk(_lowercase , 1 , _lowercase , **_lowercase).prev_sample
self.assertEqual(output_a.shape , sample.shape)
self.assertEqual(output_a.shape , output_a.shape)
UpperCAmelCase_ = scheduler.step_plms(_lowercase , 0 , _lowercase , **_lowercase).prev_sample
UpperCAmelCase_ = scheduler.step_plms(_lowercase , 1 , _lowercase , **_lowercase).prev_sample
self.assertEqual(output_a.shape , sample.shape)
self.assertEqual(output_a.shape , output_a.shape)
def __a ( self :Any) -> Dict:
for timesteps in [100, 1000]:
self.check_over_configs(num_train_timesteps=_lowercase)
def __a ( self :List[Any]) -> Any:
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=_lowercase)
UpperCAmelCase_ = self.scheduler_classes[0]
UpperCAmelCase_ = self.get_scheduler_config(steps_offset=1)
UpperCAmelCase_ = scheduler_class(**_lowercase)
scheduler.set_timesteps(10)
assert torch.equal(
scheduler.timesteps , torch.LongTensor(
[901, 851, 851, 801, 801, 751, 751, 701, 701, 651, 651, 601, 601, 501, 401, 301, 201, 101, 1]) , )
def __a ( self :Optional[int]) -> str:
for beta_start, beta_end in zip([0.0_001, 0.001] , [0.002, 0.02]):
self.check_over_configs(beta_start=_lowercase , beta_end=_lowercase)
def __a ( self :Any) -> List[str]:
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=_lowercase)
def __a ( self :List[Any]) -> Dict:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_lowercase)
def __a ( self :Any) -> Tuple:
for t in [1, 5, 10]:
self.check_over_forward(time_step=_lowercase)
def __a ( self :Tuple) -> Dict:
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100]):
self.check_over_forward(num_inference_steps=_lowercase)
def __a ( self :str) -> List[Any]:
# earlier version of set_timesteps() caused an error indexing alpha's with inference steps as power of 3
UpperCAmelCase_ = 27
for scheduler_class in self.scheduler_classes:
UpperCAmelCase_ = self.dummy_sample
UpperCAmelCase_ = 0.1 * sample
UpperCAmelCase_ = self.get_scheduler_config()
UpperCAmelCase_ = scheduler_class(**_lowercase)
scheduler.set_timesteps(_lowercase)
# before power of 3 fix, would error on first step, so we only need to do two
for i, t in enumerate(scheduler.prk_timesteps[:2]):
UpperCAmelCase_ = scheduler.step_prk(_lowercase , _lowercase , _lowercase).prev_sample
def __a ( self :List[str]) -> int:
with self.assertRaises(_lowercase):
UpperCAmelCase_ = self.scheduler_classes[0]
UpperCAmelCase_ = self.get_scheduler_config()
UpperCAmelCase_ = scheduler_class(**_lowercase)
scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample).prev_sample
def __a ( self :List[str]) -> Dict:
UpperCAmelCase_ = self.full_loop()
UpperCAmelCase_ = torch.sum(torch.abs(_lowercase))
UpperCAmelCase_ = torch.mean(torch.abs(_lowercase))
assert abs(result_sum.item() - 198.1_318) < 1E-2
assert abs(result_mean.item() - 0.2_580) < 1E-3
def __a ( self :Any) -> Tuple:
UpperCAmelCase_ = self.full_loop(prediction_type='''v_prediction''')
UpperCAmelCase_ = torch.sum(torch.abs(_lowercase))
UpperCAmelCase_ = torch.mean(torch.abs(_lowercase))
assert abs(result_sum.item() - 67.3_986) < 1E-2
assert abs(result_mean.item() - 0.0_878) < 1E-3
def __a ( self :int) -> Any:
# We specify different beta, so that the first alpha is 0.99
UpperCAmelCase_ = self.full_loop(set_alpha_to_one=_lowercase , beta_start=0.01)
UpperCAmelCase_ = torch.sum(torch.abs(_lowercase))
UpperCAmelCase_ = torch.mean(torch.abs(_lowercase))
assert abs(result_sum.item() - 230.0_399) < 1E-2
assert abs(result_mean.item() - 0.2_995) < 1E-3
def __a ( self :Any) -> Dict:
# We specify different beta, so that the first alpha is 0.99
UpperCAmelCase_ = self.full_loop(set_alpha_to_one=_lowercase , beta_start=0.01)
UpperCAmelCase_ = torch.sum(torch.abs(_lowercase))
UpperCAmelCase_ = torch.mean(torch.abs(_lowercase))
assert abs(result_sum.item() - 186.9_482) < 1E-2
assert abs(result_mean.item() - 0.2_434) < 1E-3
| 344 | 1 |
from typing import List, Optional
import numpy as np
from ...processing_utils import ProcessorMixin
from ...utils import to_numpy
class a_ ( _snake_case ):
UpperCamelCase__ : Tuple ="EncodecFeatureExtractor"
UpperCamelCase__ : List[Any] =("T5Tokenizer", "T5TokenizerFast")
def __init__( self :Optional[Any] , _lowercase :Optional[Any] , _lowercase :Union[str, Any]) -> List[Any]:
super().__init__(_lowercase , _lowercase)
UpperCAmelCase_ = self.feature_extractor
UpperCAmelCase_ = False
def __a ( self :Tuple , _lowercase :List[str]=None , _lowercase :Union[str, Any]=None , _lowercase :Dict=True) -> str:
return self.tokenizer.get_decoder_prompt_ids(task=_lowercase , language=_lowercase , no_timestamps=_lowercase)
def __call__( self :Optional[int] , *_lowercase :Any , **_lowercase :List[str]) -> Tuple:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*_lowercase , **_lowercase)
UpperCAmelCase_ = kwargs.pop('''audio''' , _lowercase)
UpperCAmelCase_ = kwargs.pop('''sampling_rate''' , _lowercase)
UpperCAmelCase_ = kwargs.pop('''text''' , _lowercase)
if len(_lowercase) > 0:
UpperCAmelCase_ = args[0]
UpperCAmelCase_ = args[1:]
if audio is None and text is None:
raise ValueError('''You need to specify either an `audio` or `text` input to process.''')
if text is not None:
UpperCAmelCase_ = self.tokenizer(_lowercase , **_lowercase)
if audio is not None:
UpperCAmelCase_ = self.feature_extractor(_lowercase , *_lowercase , sampling_rate=_lowercase , **_lowercase)
if audio is None:
return inputs
elif text is None:
return audio_inputs
else:
UpperCAmelCase_ = audio_inputs['''input_values''']
if "padding_mask" in audio_inputs:
UpperCAmelCase_ = audio_inputs['''padding_mask''']
return inputs
def __a ( self :int , *_lowercase :Dict , **_lowercase :List[str]) -> Optional[int]:
UpperCAmelCase_ = kwargs.pop('''audio''' , _lowercase)
UpperCAmelCase_ = kwargs.pop('''padding_mask''' , _lowercase)
if len(_lowercase) > 0:
UpperCAmelCase_ = args[0]
UpperCAmelCase_ = args[1:]
if audio_values is not None:
return self._decode_audio(_lowercase , padding_mask=_lowercase)
else:
return self.tokenizer.batch_decode(*_lowercase , **_lowercase)
def __a ( self :Tuple , *_lowercase :Tuple , **_lowercase :int) -> Optional[int]:
return self.tokenizer.decode(*_lowercase , **_lowercase)
def __a ( self :Union[str, Any] , _lowercase :Tuple , _lowercase :Optional = None) -> List[np.ndarray]:
UpperCAmelCase_ = to_numpy(_lowercase)
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = audio_values.shape
if padding_mask is None:
return list(_lowercase)
UpperCAmelCase_ = to_numpy(_lowercase)
# match the sequence length of the padding mask to the generated audio arrays by padding with the **non-padding**
# token (so that the generated audio values are **not** treated as padded tokens)
UpperCAmelCase_ = seq_len - padding_mask.shape[-1]
UpperCAmelCase_ = 1 - self.feature_extractor.padding_value
UpperCAmelCase_ = np.pad(_lowercase , ((0, 0), (0, difference)) , '''constant''' , constant_values=_lowercase)
UpperCAmelCase_ = audio_values.tolist()
for i in range(_lowercase):
UpperCAmelCase_ = np.asarray(audio_values[i])[
padding_mask[i][None, :] != self.feature_extractor.padding_value
]
UpperCAmelCase_ = sliced_audio.reshape(_lowercase , -1)
return audio_values
| 344 |
import os
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen, xsplitext
from ..table import array_cast
from ..utils.py_utils import no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
from .features import FeatureType
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = False, False, False
@dataclass
class a_ :
UpperCamelCase__ : Optional[int] =None
UpperCamelCase__ : bool =True
UpperCamelCase__ : bool =True
UpperCamelCase__ : Optional[str] =None
# Automatically constructed
UpperCamelCase__ : ClassVar[str] ="dict"
UpperCamelCase__ : ClassVar[Any] =pa.struct({"bytes": pa.binary(), "path": pa.string()} )
UpperCamelCase__ : str =field(default="Audio" , init=_snake_case , repr=_snake_case )
def __call__( self :List[Any]) -> List[Any]:
return self.pa_type
def __a ( self :Any , _lowercase :Union[str, bytes, dict]) -> dict:
try:
import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files.
except ImportError as err:
raise ImportError('''To support encoding audio data, please install \'soundfile\'.''') from err
if isinstance(_lowercase , _lowercase):
return {"bytes": None, "path": value}
elif isinstance(_lowercase , _lowercase):
return {"bytes": value, "path": None}
elif "array" in value:
# convert the audio array to wav bytes
UpperCAmelCase_ = BytesIO()
sf.write(_lowercase , value['''array'''] , value['''sampling_rate'''] , format='''wav''')
return {"bytes": buffer.getvalue(), "path": None}
elif value.get('''path''') is not None and os.path.isfile(value['''path''']):
# we set "bytes": None to not duplicate the data if they're already available locally
if value["path"].endswith('''pcm'''):
# "PCM" only has raw audio bytes
if value.get('''sampling_rate''') is None:
# At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate
raise KeyError('''To use PCM files, please specify a \'sampling_rate\' in Audio object''')
if value.get('''bytes'''):
# If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!)
UpperCAmelCase_ = np.frombuffer(value['''bytes'''] , dtype=np.intaa).astype(np.floataa) / 32767
else:
UpperCAmelCase_ = np.memmap(value['''path'''] , dtype='''h''' , mode='''r''').astype(np.floataa) / 32767
UpperCAmelCase_ = BytesIO(bytes())
sf.write(_lowercase , _lowercase , value['''sampling_rate'''] , format='''wav''')
return {"bytes": buffer.getvalue(), "path": None}
else:
return {"bytes": None, "path": value.get('''path''')}
elif value.get('''bytes''') is not None or value.get('''path''') is not None:
# store the audio bytes, and path is used to infer the audio format using the file extension
return {"bytes": value.get('''bytes'''), "path": value.get('''path''')}
else:
raise ValueError(
f"An audio sample should have one of 'path' or 'bytes' but they are missing or None in {value}.")
def __a ( self :Dict , _lowercase :dict , _lowercase :Optional[Dict[str, Union[str, bool, None]]] = None) -> dict:
if not self.decode:
raise RuntimeError('''Decoding is disabled for this feature. Please use Audio(decode=True) instead.''')
UpperCAmelCase_ , UpperCAmelCase_ = (value['''path'''], BytesIO(value['''bytes'''])) if value['''bytes'''] is not None else (value['''path'''], None)
if path is None and file is None:
raise ValueError(f"An audio sample should have one of 'path' or 'bytes' but both are None in {value}.")
try:
import librosa
import soundfile as sf
except ImportError as err:
raise ImportError('''To support decoding audio files, please install \'librosa\' and \'soundfile\'.''') from err
UpperCAmelCase_ = xsplitext(_lowercase)[1][1:].lower() if path is not None else None
if not config.IS_OPUS_SUPPORTED and audio_format == "opus":
raise RuntimeError(
'''Decoding \'opus\' files requires system library \'libsndfile\'>=1.0.31, '''
'''You can try to update `soundfile` python library: `pip install "soundfile>=0.12.1"`. ''')
elif not config.IS_MP3_SUPPORTED and audio_format == "mp3":
raise RuntimeError(
'''Decoding \'mp3\' files requires system library \'libsndfile\'>=1.1.0, '''
'''You can try to update `soundfile` python library: `pip install "soundfile>=0.12.1"`. ''')
if file is None:
UpperCAmelCase_ = token_per_repo_id or {}
UpperCAmelCase_ = path.split('''::''')[-1]
try:
UpperCAmelCase_ = string_to_dict(_lowercase , config.HUB_DATASETS_URL)['''repo_id''']
UpperCAmelCase_ = token_per_repo_id[repo_id]
except (ValueError, KeyError):
UpperCAmelCase_ = None
with xopen(_lowercase , '''rb''' , use_auth_token=_lowercase) as f:
UpperCAmelCase_ , UpperCAmelCase_ = sf.read(_lowercase)
else:
UpperCAmelCase_ , UpperCAmelCase_ = sf.read(_lowercase)
UpperCAmelCase_ = array.T
if self.mono:
UpperCAmelCase_ = librosa.to_mono(_lowercase)
if self.sampling_rate and self.sampling_rate != sampling_rate:
UpperCAmelCase_ = librosa.resample(_lowercase , orig_sr=_lowercase , target_sr=self.sampling_rate)
UpperCAmelCase_ = self.sampling_rate
return {"path": path, "array": array, "sampling_rate": sampling_rate}
def __a ( self :Union[str, Any]) -> Union["FeatureType", Dict[str, "FeatureType"]]:
from .features import Value
if self.decode:
raise ValueError('''Cannot flatten a decoded Audio feature.''')
return {
"bytes": Value('''binary'''),
"path": Value('''string'''),
}
def __a ( self :int , _lowercase :Union[pa.StringArray, pa.StructArray]) -> pa.StructArray:
if pa.types.is_string(storage.type):
UpperCAmelCase_ = pa.array([None] * len(_lowercase) , type=pa.binary())
UpperCAmelCase_ = pa.StructArray.from_arrays([bytes_array, storage] , ['''bytes''', '''path'''] , mask=storage.is_null())
elif pa.types.is_binary(storage.type):
UpperCAmelCase_ = pa.array([None] * len(_lowercase) , type=pa.string())
UpperCAmelCase_ = pa.StructArray.from_arrays([storage, path_array] , ['''bytes''', '''path'''] , mask=storage.is_null())
elif pa.types.is_struct(storage.type) and storage.type.get_all_field_indices('''array'''):
UpperCAmelCase_ = pa.array([Audio().encode_example(_lowercase) if x is not None else None for x in storage.to_pylist()])
elif pa.types.is_struct(storage.type):
if storage.type.get_field_index('''bytes''') >= 0:
UpperCAmelCase_ = storage.field('''bytes''')
else:
UpperCAmelCase_ = pa.array([None] * len(_lowercase) , type=pa.binary())
if storage.type.get_field_index('''path''') >= 0:
UpperCAmelCase_ = storage.field('''path''')
else:
UpperCAmelCase_ = pa.array([None] * len(_lowercase) , type=pa.string())
UpperCAmelCase_ = pa.StructArray.from_arrays([bytes_array, path_array] , ['''bytes''', '''path'''] , mask=storage.is_null())
return array_cast(_lowercase , self.pa_type)
def __a ( self :Any , _lowercase :pa.StructArray) -> pa.StructArray:
@no_op_if_value_is_null
def path_to_bytes(_lowercase :Tuple):
with xopen(_lowercase , '''rb''') as f:
UpperCAmelCase_ = f.read()
return bytes_
UpperCAmelCase_ = pa.array(
[
(path_to_bytes(x['''path''']) if x['''bytes'''] is None else x['''bytes''']) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
UpperCAmelCase_ = pa.array(
[os.path.basename(_lowercase) if path is not None else None for path in storage.field('''path''').to_pylist()] , type=pa.string() , )
UpperCAmelCase_ = pa.StructArray.from_arrays([bytes_array, path_array] , ['''bytes''', '''path'''] , mask=bytes_array.is_null())
return array_cast(_lowercase , self.pa_type)
| 344 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase_ = {
"configuration_timesformer": ["TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "TimesformerConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TimesformerModel",
"TimesformerForVideoClassification",
"TimesformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timesformer import (
TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimesformerForVideoClassification,
TimesformerModel,
TimesformerPreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 344 |
from ..utils import DummyObject, requires_backends
class a_ ( metaclass=_snake_case ):
UpperCamelCase__ : Any =["torch", "scipy"]
def __init__( self :List[str] , *_lowercase :List[str] , **_lowercase :Union[str, Any]) -> List[Any]:
requires_backends(self , ['''torch''', '''scipy'''])
@classmethod
def __a ( cls :Dict , *_lowercase :Any , **_lowercase :Dict) -> Union[str, Any]:
requires_backends(cls , ['''torch''', '''scipy'''])
@classmethod
def __a ( cls :Optional[Any] , *_lowercase :str , **_lowercase :Optional[Any]) -> Union[str, Any]:
requires_backends(cls , ['''torch''', '''scipy'''])
| 344 | 1 |
import copy
from typing import Dict, List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
UpperCamelCase_ = {
"facebook/mask2former-swin-small-coco-instance": (
"https://huggingface.co/facebook/mask2former-swin-small-coco-instance/blob/main/config.json"
)
# See all Mask2Former models at https://huggingface.co/models?filter=mask2former
}
UpperCamelCase_ = logging.get_logger(__name__)
class a_ ( _snake_case ):
UpperCamelCase__ : Any ="mask2former"
UpperCamelCase__ : Dict =["swin"]
UpperCamelCase__ : Optional[int] ={"hidden_size": "hidden_dim"}
def __init__( self :List[str] , _lowercase :Optional[Dict] = None , _lowercase :int = 256 , _lowercase :int = 256 , _lowercase :int = 256 , _lowercase :int = 1024 , _lowercase :str = "relu" , _lowercase :int = 6 , _lowercase :int = 10 , _lowercase :int = 8 , _lowercase :float = 0.0 , _lowercase :int = 2048 , _lowercase :bool = False , _lowercase :bool = False , _lowercase :int = 4 , _lowercase :int = 255 , _lowercase :int = 100 , _lowercase :float = 0.1 , _lowercase :float = 2.0 , _lowercase :float = 5.0 , _lowercase :float = 5.0 , _lowercase :int = 12544 , _lowercase :float = 3.0 , _lowercase :float = 0.75 , _lowercase :float = 0.02 , _lowercase :float = 1.0 , _lowercase :bool = True , _lowercase :List[int] = [4, 8, 16, 32] , _lowercase :bool = None , **_lowercase :Tuple , ) -> Optional[Any]:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `Swin` backbone.''')
UpperCAmelCase_ = CONFIG_MAPPING['''swin'''](
image_size=224 , in_channels=3 , patch_size=4 , embed_dim=96 , depths=[2, 2, 18, 2] , num_heads=[3, 6, 12, 24] , window_size=7 , drop_path_rate=0.3 , use_absolute_embeddings=_lowercase , out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] , )
if isinstance(_lowercase , _lowercase):
UpperCAmelCase_ = backbone_config.pop('''model_type''')
UpperCAmelCase_ = CONFIG_MAPPING[backbone_model_type]
UpperCAmelCase_ = config_class.from_dict(_lowercase)
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
f"Backbone {backbone_config.model_type} is not a supported model and may not be compatible with Mask2Former. "
f"Supported model types: {','.join(self.backbones_supported)}")
UpperCAmelCase_ = backbone_config
UpperCAmelCase_ = feature_size
UpperCAmelCase_ = mask_feature_size
UpperCAmelCase_ = hidden_dim
UpperCAmelCase_ = encoder_feedforward_dim
UpperCAmelCase_ = activation_function
UpperCAmelCase_ = encoder_layers
UpperCAmelCase_ = decoder_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = dropout
UpperCAmelCase_ = dim_feedforward
UpperCAmelCase_ = pre_norm
UpperCAmelCase_ = enforce_input_projection
UpperCAmelCase_ = common_stride
UpperCAmelCase_ = ignore_value
UpperCAmelCase_ = num_queries
UpperCAmelCase_ = no_object_weight
UpperCAmelCase_ = class_weight
UpperCAmelCase_ = mask_weight
UpperCAmelCase_ = dice_weight
UpperCAmelCase_ = train_num_points
UpperCAmelCase_ = oversample_ratio
UpperCAmelCase_ = importance_sample_ratio
UpperCAmelCase_ = init_std
UpperCAmelCase_ = init_xavier_std
UpperCAmelCase_ = use_auxiliary_loss
UpperCAmelCase_ = feature_strides
UpperCAmelCase_ = output_auxiliary_logits
UpperCAmelCase_ = decoder_layers
super().__init__(**_lowercase)
@classmethod
def __a ( cls :Optional[Any] , _lowercase :PretrainedConfig , **_lowercase :str) -> Dict:
return cls(
backbone_config=_lowercase , **_lowercase , )
def __a ( self :Any) -> Dict[str, any]:
UpperCAmelCase_ = copy.deepcopy(self.__dict__)
UpperCAmelCase_ = self.backbone_config.to_dict()
UpperCAmelCase_ = self.__class__.model_type
return output
| 344 |
from typing import Dict
import numpy as np
import torch
from . import residue_constants as rc
from .tensor_utils import tensor_tree_map, tree_map
def A ( __UpperCAmelCase ) -> Dict[str, torch.Tensor]:
'''simple docstring'''
UpperCAmelCase_ = []
UpperCAmelCase_ = []
UpperCAmelCase_ = []
for rt in rc.restypes:
UpperCAmelCase_ = rc.restype_name_to_atomaa_names[rc.restype_atoa[rt]]
restype_atomaa_to_atomaa_list.append([(rc.atom_order[name] if name else 0) for name in atom_names] )
UpperCAmelCase_ = {name: i for i, name in enumerate(__UpperCAmelCase )}
restype_atomaa_to_atomaa_list.append(
[(atom_name_to_idxaa[name] if name in atom_name_to_idxaa else 0) for name in rc.atom_types] )
restype_atomaa_mask_list.append([(1.0 if name else 0.0) for name in atom_names] )
# Add dummy mapping for restype 'UNK'
restype_atomaa_to_atomaa_list.append([0] * 14 )
restype_atomaa_to_atomaa_list.append([0] * 37 )
restype_atomaa_mask_list.append([0.0] * 14 )
UpperCAmelCase_ = torch.tensor(
__UpperCAmelCase , dtype=torch.intaa , device=protein['''aatype'''].device , )
UpperCAmelCase_ = torch.tensor(
__UpperCAmelCase , dtype=torch.intaa , device=protein['''aatype'''].device , )
UpperCAmelCase_ = torch.tensor(
__UpperCAmelCase , dtype=torch.floataa , device=protein['''aatype'''].device , )
UpperCAmelCase_ = protein['''aatype'''].to(torch.long )
# create the mapping for (residx, atom14) --> atom37, i.e. an array
# with shape (num_res, 14) containing the atom37 indices for this protein
UpperCAmelCase_ = restype_atomaa_to_atomaa[protein_aatype]
UpperCAmelCase_ = restype_atomaa_mask[protein_aatype]
UpperCAmelCase_ = residx_atomaa_mask
UpperCAmelCase_ = residx_atomaa_to_atomaa.long()
# create the gather indices for mapping back
UpperCAmelCase_ = restype_atomaa_to_atomaa[protein_aatype]
UpperCAmelCase_ = residx_atomaa_to_atomaa.long()
# create the corresponding mask
UpperCAmelCase_ = torch.zeros([21, 37] , dtype=torch.floataa , device=protein['''aatype'''].device )
for restype, restype_letter in enumerate(rc.restypes ):
UpperCAmelCase_ = rc.restype_atoa[restype_letter]
UpperCAmelCase_ = rc.residue_atoms[restype_name]
for atom_name in atom_names:
UpperCAmelCase_ = rc.atom_order[atom_name]
UpperCAmelCase_ = 1
UpperCAmelCase_ = restype_atomaa_mask[protein_aatype]
UpperCAmelCase_ = residx_atomaa_mask
return protein
def A ( __UpperCAmelCase ) -> Dict[str, np.ndarray]:
'''simple docstring'''
UpperCAmelCase_ = tree_map(lambda __UpperCAmelCase : torch.tensor(__UpperCAmelCase , device=batch['''aatype'''].device ) , __UpperCAmelCase , np.ndarray )
UpperCAmelCase_ = tensor_tree_map(lambda __UpperCAmelCase : np.array(__UpperCAmelCase ) , make_atomaa_masks(__UpperCAmelCase ) )
return out
| 344 | 1 |
def A ( __UpperCAmelCase ) -> bool:
'''simple docstring'''
if not all(x.isalpha() for x in string ):
raise ValueError('''String must only contain alphabetic characters.''' )
UpperCAmelCase_ = sorted(string.lower() )
return len(__UpperCAmelCase ) == len(set(__UpperCAmelCase ) )
if __name__ == "__main__":
UpperCamelCase_ = input("Enter a string ").strip()
UpperCamelCase_ = is_isogram(input_str)
print(f"{input_str} is {'an' if isogram else 'not an'} isogram.")
| 344 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor
from .base import PipelineTool
class a_ ( _snake_case ):
UpperCamelCase__ : Dict ="openai/whisper-base"
UpperCamelCase__ : int =(
"This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the "
"transcribed text."
)
UpperCamelCase__ : Any ="transcriber"
UpperCamelCase__ : Optional[int] =WhisperProcessor
UpperCamelCase__ : List[str] =WhisperForConditionalGeneration
UpperCamelCase__ : List[Any] =["audio"]
UpperCamelCase__ : Union[str, Any] =["text"]
def __a ( self :int , _lowercase :Any) -> Tuple:
return self.pre_processor(_lowercase , return_tensors='''pt''').input_features
def __a ( self :Dict , _lowercase :Tuple) -> Any:
return self.model.generate(inputs=_lowercase)
def __a ( self :int , _lowercase :Union[str, Any]) -> Optional[Any]:
return self.pre_processor.batch_decode(_lowercase , skip_special_tokens=_lowercase)[0]
| 344 | 1 |
def A ( __UpperCAmelCase ) -> bool:
'''simple docstring'''
return sum(i for i in range(1 , number // 2 + 1 ) if number % i == 0 ) == number
if __name__ == "__main__":
print("Program to check whether a number is a Perfect number or not...")
UpperCamelCase_ = int(input("Enter number: ").strip())
print(f"{number} is {'' if perfect(number) else 'not '}a Perfect Number.")
| 344 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase_ = {"configuration_opt": ["OPT_PRETRAINED_CONFIG_ARCHIVE_MAP", "OPTConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"OPT_PRETRAINED_MODEL_ARCHIVE_LIST",
"OPTForCausalLM",
"OPTModel",
"OPTPreTrainedModel",
"OPTForSequenceClassification",
"OPTForQuestionAnswering",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["TFOPTForCausalLM", "TFOPTModel", "TFOPTPreTrainedModel"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"FlaxOPTForCausalLM",
"FlaxOPTModel",
"FlaxOPTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_opt import OPT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_opt import (
OPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OPTForCausalLM,
OPTForQuestionAnswering,
OPTForSequenceClassification,
OPTModel,
OPTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_opt import TFOPTForCausalLM, TFOPTModel, TFOPTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_opt import FlaxOPTForCausalLM, FlaxOPTModel, FlaxOPTPreTrainedModel
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 344 | 1 |
from __future__ import annotations
from bisect import bisect_left
from functools import total_ordering
from heapq import merge
@total_ordering
class a_ ( _snake_case ):
def __lt__( self :Any , _lowercase :List[Any]) -> List[Any]:
return self[-1] < other[-1]
def __eq__( self :Any , _lowercase :List[Any]) -> Optional[Any]:
return self[-1] == other[-1]
def A ( __UpperCAmelCase ) -> list:
'''simple docstring'''
UpperCAmelCase_ = []
# sort into stacks
for element in collection:
UpperCAmelCase_ = Stack([element] )
UpperCAmelCase_ = bisect_left(__UpperCAmelCase , __UpperCAmelCase )
if i != len(__UpperCAmelCase ):
stacks[i].append(__UpperCAmelCase )
else:
stacks.append(__UpperCAmelCase )
# use a heap-based merge to merge stack efficiently
UpperCAmelCase_ = merge(*(reversed(__UpperCAmelCase ) for stack in stacks) )
return collection
if __name__ == "__main__":
UpperCamelCase_ = input("Enter numbers separated by a comma:\n").strip()
UpperCamelCase_ = [int(item) for item in user_input.split(",")]
print(patience_sort(unsorted))
| 344 |
import unittest
from transformers import BigBirdTokenizer, BigBirdTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
UpperCamelCase_ = "▁"
UpperCamelCase_ = get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
@require_tokenizers
class a_ ( _snake_case , unittest.TestCase ):
UpperCamelCase__ : str =BigBirdTokenizer
UpperCamelCase__ : Tuple =BigBirdTokenizerFast
UpperCamelCase__ : Union[str, Any] =True
UpperCamelCase__ : List[str] =True
def __a ( self :Any) -> List[str]:
super().setUp()
UpperCAmelCase_ = self.tokenizer_class(_lowercase , keep_accents=_lowercase)
tokenizer.save_pretrained(self.tmpdirname)
def __a ( self :Optional[int]) -> str:
UpperCAmelCase_ = '''<s>'''
UpperCAmelCase_ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_lowercase) , _lowercase)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_lowercase) , _lowercase)
def __a ( self :str) -> str:
UpperCAmelCase_ = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , '''<unk>''')
self.assertEqual(vocab_keys[1] , '''<s>''')
self.assertEqual(vocab_keys[-1] , '''[MASK]''')
self.assertEqual(len(_lowercase) , 1004)
def __a ( self :List[str]) -> int:
self.assertEqual(self.get_tokenizer().vocab_size , 1000)
def __a ( self :Tuple) -> int:
if not self.test_rust_tokenizer:
return
UpperCAmelCase_ = self.get_tokenizer()
UpperCAmelCase_ = self.get_rust_tokenizer()
UpperCAmelCase_ = '''I was born in 92000, and this is falsé.'''
UpperCAmelCase_ = tokenizer.tokenize(_lowercase)
UpperCAmelCase_ = rust_tokenizer.tokenize(_lowercase)
self.assertListEqual(_lowercase , _lowercase)
UpperCAmelCase_ = tokenizer.encode(_lowercase , add_special_tokens=_lowercase)
UpperCAmelCase_ = rust_tokenizer.encode(_lowercase , add_special_tokens=_lowercase)
self.assertListEqual(_lowercase , _lowercase)
UpperCAmelCase_ = self.get_rust_tokenizer()
UpperCAmelCase_ = tokenizer.encode(_lowercase)
UpperCAmelCase_ = rust_tokenizer.encode(_lowercase)
self.assertListEqual(_lowercase , _lowercase)
def __a ( self :Optional[Any]) -> List[str]:
UpperCAmelCase_ = BigBirdTokenizer(_lowercase , keep_accents=_lowercase)
UpperCAmelCase_ = tokenizer.tokenize('''This is a test''')
self.assertListEqual(_lowercase , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_lowercase) , [285, 46, 10, 170, 382] , )
UpperCAmelCase_ = tokenizer.tokenize('''I was born in 92000, and this is falsé.''')
self.assertListEqual(
_lowercase , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
UpperCAmelCase_ = tokenizer.convert_tokens_to_ids(_lowercase)
self.assertListEqual(
_lowercase , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
UpperCAmelCase_ = tokenizer.convert_ids_to_tokens(_lowercase)
self.assertListEqual(
_lowercase , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
@cached_property
def __a ( self :Any) -> List[Any]:
return BigBirdTokenizer.from_pretrained('''google/bigbird-roberta-base''')
@slow
def __a ( self :int) -> List[Any]:
UpperCAmelCase_ = '''Hello World!'''
UpperCAmelCase_ = [65, 18536, 2260, 101, 66]
self.assertListEqual(_lowercase , self.big_tokenizer.encode(_lowercase))
@slow
def __a ( self :int) -> Any:
UpperCAmelCase_ = (
'''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'''
''' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'''
)
# fmt: off
UpperCAmelCase_ = [65, 871, 419, 358, 946, 991, 2521, 452, 358, 1357, 387, 7751, 3536, 112, 985, 456, 126, 865, 938, 5400, 5734, 458, 1368, 467, 786, 2462, 5246, 1159, 633, 865, 4519, 457, 582, 852, 2557, 427, 916, 508, 405, 34324, 497, 391, 408, 11342, 1244, 385, 100, 938, 985, 456, 574, 362, 12597, 3200, 3129, 1172, 66] # noqa: E231
# fmt: on
self.assertListEqual(_lowercase , self.big_tokenizer.encode(_lowercase))
@require_torch
@slow
def __a ( self :Dict) -> Union[str, Any]:
import torch
from transformers import BigBirdConfig, BigBirdModel
# Build sequence
UpperCAmelCase_ = list(self.big_tokenizer.get_vocab().keys())[:10]
UpperCAmelCase_ = ''' '''.join(_lowercase)
UpperCAmelCase_ = self.big_tokenizer.encode_plus(_lowercase , return_tensors='''pt''' , return_token_type_ids=_lowercase)
UpperCAmelCase_ = self.big_tokenizer.batch_encode_plus(
[sequence + ''' ''' + sequence] , return_tensors='''pt''' , return_token_type_ids=_lowercase)
UpperCAmelCase_ = BigBirdConfig(attention_type='''original_full''')
UpperCAmelCase_ = BigBirdModel(_lowercase)
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**_lowercase)
model(**_lowercase)
@slow
def __a ( self :Optional[int]) -> Any:
UpperCAmelCase_ = BigBirdTokenizer.from_pretrained('''google/bigbird-roberta-base''')
UpperCAmelCase_ = tokenizer.decode(tokenizer('''Paris is the [MASK].''').input_ids)
self.assertTrue(decoded_text == '''[CLS] Paris is the[MASK].[SEP]''')
@slow
def __a ( self :Dict) -> List[str]:
# fmt: off
UpperCAmelCase_ = {'''input_ids''': [[65, 39286, 458, 36335, 2001, 456, 13073, 13266, 455, 113, 7746, 1741, 11157, 391, 13073, 13266, 455, 113, 3967, 35412, 113, 4936, 109, 3870, 2377, 113, 30084, 45720, 458, 134, 17496, 112, 503, 11672, 113, 118, 112, 5665, 13347, 38687, 112, 1496, 31389, 112, 3268, 47264, 134, 962, 112, 16377, 8035, 23130, 430, 12169, 15518, 28592, 458, 146, 41697, 109, 391, 12169, 15518, 16689, 458, 146, 41358, 109, 452, 726, 4034, 111, 763, 35412, 5082, 388, 1903, 111, 9051, 391, 2870, 48918, 1900, 1123, 550, 998, 112, 9586, 15985, 455, 391, 410, 22955, 37636, 114, 66], [65, 448, 17496, 419, 3663, 385, 763, 113, 27533, 2870, 3283, 13043, 1639, 24713, 523, 656, 24013, 18550, 2521, 517, 27014, 21244, 420, 1212, 1465, 391, 927, 4833, 388, 578, 11786, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [65, 484, 2169, 7687, 21932, 18146, 726, 363, 17032, 3391, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_lowercase , model_name='''google/bigbird-roberta-base''' , revision='''215c99f1600e06f83acce68422f2035b2b5c3510''' , )
| 344 | 1 |
def A ( __UpperCAmelCase ) -> Dict:
'''simple docstring'''
stooge(__UpperCAmelCase , 0 , len(__UpperCAmelCase ) - 1 )
return arr
def A ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> List[str]:
'''simple docstring'''
if i >= h:
return
# If first element is smaller than the last then swap them
if arr[i] > arr[h]:
UpperCAmelCase_ , UpperCAmelCase_ = arr[h], arr[i]
# If there are more than 2 elements in the array
if h - i + 1 > 2:
UpperCAmelCase_ = (int)((h - i + 1) / 3 )
# Recursively sort first 2/3 elements
stooge(__UpperCAmelCase , __UpperCAmelCase , (h - t) )
# Recursively sort last 2/3 elements
stooge(__UpperCAmelCase , i + t , (__UpperCAmelCase) )
# Recursively sort first 2/3 elements
stooge(__UpperCAmelCase , __UpperCAmelCase , (h - t) )
if __name__ == "__main__":
UpperCamelCase_ = input("Enter numbers separated by a comma:\n").strip()
UpperCamelCase_ = [int(item) for item in user_input.split(",")]
print(stooge_sort(unsorted))
| 344 |
from __future__ import annotations
import string
from itertools import cycle, product
from pathlib import Path
UpperCamelCase_ = (
string.ascii_letters + string.digits + string.punctuation + string.whitespace
)
UpperCamelCase_ = [ord(letter) for letter in string.ascii_lowercase]
UpperCamelCase_ = {ord(char) for char in VALID_CHARS}
UpperCamelCase_ = ["the", "be", "to", "of", "and", "in", "that", "have"]
def A ( __UpperCAmelCase , __UpperCAmelCase ) -> str | None:
'''simple docstring'''
UpperCAmelCase_ = ""
UpperCAmelCase_ = 42
UpperCAmelCase_ = 42
UpperCAmelCase_ = 42
for keychar, cipherchar in zip(cycle(__UpperCAmelCase ) , __UpperCAmelCase ):
UpperCAmelCase_ = cipherchar ^ keychar
if decodedchar not in VALID_INTS:
return None
decoded += chr(__UpperCAmelCase )
return decoded
def A ( __UpperCAmelCase ) -> list[str]:
'''simple docstring'''
UpperCAmelCase_ = []
for key in product(__UpperCAmelCase , repeat=3 ):
UpperCAmelCase_ = try_key(__UpperCAmelCase , __UpperCAmelCase )
if encoded is not None:
possibles.append(__UpperCAmelCase )
return possibles
def A ( __UpperCAmelCase , __UpperCAmelCase ) -> list[str]:
'''simple docstring'''
return [possible for possible in possibles if common_word in possible.lower()]
def A ( __UpperCAmelCase = "p059_cipher.txt" ) -> int:
'''simple docstring'''
UpperCAmelCase_ = 42
UpperCAmelCase_ = 42
UpperCAmelCase_ = 42
UpperCAmelCase_ = 42
UpperCAmelCase_ = Path(__UpperCAmelCase ).parent.joinpath(__UpperCAmelCase ).read_text(encoding='''utf-8''' )
UpperCAmelCase_ = [int(__UpperCAmelCase ) for number in data.strip().split(''',''' )]
UpperCAmelCase_ = filter_valid_chars(__UpperCAmelCase )
for common_word in COMMON_WORDS:
UpperCAmelCase_ = filter_common_word(__UpperCAmelCase , __UpperCAmelCase )
if len(__UpperCAmelCase ) == 1:
break
UpperCAmelCase_ = possibles[0]
return sum(ord(__UpperCAmelCase ) for char in decoded_text )
if __name__ == "__main__":
print(f"{solution() = }")
| 344 | 1 |
UpperCamelCase_ = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
def A ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Any:
'''simple docstring'''
UpperCAmelCase_ = [False] * len(__UpperCAmelCase )
UpperCAmelCase_ = [s]
UpperCAmelCase_ = True
while queue:
UpperCAmelCase_ = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(__UpperCAmelCase )
UpperCAmelCase_ = True
UpperCAmelCase_ = u
return visited[t]
def A ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ = [-1] * (len(__UpperCAmelCase ))
UpperCAmelCase_ = 0
UpperCAmelCase_ = []
UpperCAmelCase_ = [i[:] for i in graph] # Record original cut, copy.
while bfs(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
UpperCAmelCase_ = float('''Inf''' )
UpperCAmelCase_ = sink
while s != source:
# Find the minimum value in select path
UpperCAmelCase_ = min(__UpperCAmelCase , graph[parent[s]][s] )
UpperCAmelCase_ = parent[s]
max_flow += path_flow
UpperCAmelCase_ = sink
while v != source:
UpperCAmelCase_ = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
UpperCAmelCase_ = parent[v]
for i in range(len(__UpperCAmelCase ) ):
for j in range(len(graph[0] ) ):
if graph[i][j] == 0 and temp[i][j] > 0:
res.append((i, j) )
return res
if __name__ == "__main__":
print(mincut(test_graph, source=0, sink=5))
| 344 |
import pytest
UpperCamelCase_ = "__dummy_dataset1__"
UpperCamelCase_ = "\nimport json\nimport os\n\nimport datasets\n\n\nREPO_URL = \"https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/\"\nURLS = {\"train\": REPO_URL + \"wikiann-bn-train.jsonl\", \"validation\": REPO_URL + \"wikiann-bn-validation.jsonl\"}\n\n\nclass __DummyDataset1__(datasets.GeneratorBasedBuilder):\n\n def _info(self):\n features = datasets.Features(\n {\n \"tokens\": datasets.Sequence(datasets.Value(\"string\")),\n \"ner_tags\": datasets.Sequence(\n datasets.features.ClassLabel(\n names=[\n \"O\",\n \"B-PER\",\n \"I-PER\",\n \"B-ORG\",\n \"I-ORG\",\n \"B-LOC\",\n \"I-LOC\",\n ]\n )\n ),\n \"langs\": datasets.Sequence(datasets.Value(\"string\")),\n \"spans\": datasets.Sequence(datasets.Value(\"string\")),\n }\n )\n return datasets.DatasetInfo(features=features)\n\n def _split_generators(self, dl_manager):\n dl_path = dl_manager.download(URLS)\n return [\n datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={\"filepath\": dl_path[\"train\"]}),\n datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={\"filepath\": dl_path[\"validation\"]}),\n ]\n\n def _generate_examples(self, filepath):\n with open(filepath, \"r\", encoding=\"utf-8\") as f:\n for i, line in enumerate(f):\n yield i, json.loads(line)\n"
@pytest.fixture
def A ( ) -> str:
'''simple docstring'''
return DATASET_LOADING_SCRIPT_NAME
@pytest.fixture
def A ( ) -> Any:
'''simple docstring'''
return DATASET_LOADING_SCRIPT_CODE
@pytest.fixture
def A ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = dataset_loading_script_name
UpperCAmelCase_ = tmp_path / '''datasets''' / script_name
script_dir.mkdir(parents=__UpperCAmelCase )
UpperCAmelCase_ = script_dir / f"{script_name}.py"
with open(__UpperCAmelCase , '''w''' ) as f:
f.write(__UpperCAmelCase )
return str(__UpperCAmelCase )
| 344 | 1 |
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase_ = {
"configuration_informer": [
"INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"InformerConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"InformerForPrediction",
"InformerModel",
"InformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_informer import (
INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
InformerForPrediction,
InformerModel,
InformerPreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 344 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
"s-JoL/Open-Llama-V1": "https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json",
}
class a_ ( _snake_case ):
UpperCamelCase__ : Dict ="open-llama"
def __init__( self :Union[str, Any] , _lowercase :List[Any]=100000 , _lowercase :Dict=4096 , _lowercase :List[Any]=11008 , _lowercase :Optional[int]=32 , _lowercase :Union[str, Any]=32 , _lowercase :List[str]="silu" , _lowercase :Union[str, Any]=2048 , _lowercase :Any=0.02 , _lowercase :Optional[Any]=1E-6 , _lowercase :str=True , _lowercase :str=0 , _lowercase :Any=1 , _lowercase :Optional[Any]=2 , _lowercase :str=False , _lowercase :Dict=True , _lowercase :Optional[Any]=0.1 , _lowercase :Tuple=0.1 , _lowercase :Dict=True , _lowercase :List[Any]=True , _lowercase :Dict=None , **_lowercase :Optional[int] , ) -> List[Any]:
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = rms_norm_eps
UpperCAmelCase_ = use_cache
UpperCAmelCase_ = kwargs.pop(
'''use_memorry_efficient_attention''' , _lowercase)
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_dropout_prob
UpperCAmelCase_ = use_stable_embedding
UpperCAmelCase_ = shared_input_output_embedding
UpperCAmelCase_ = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=_lowercase , bos_token_id=_lowercase , eos_token_id=_lowercase , tie_word_embeddings=_lowercase , **_lowercase , )
def __a ( self :int) -> str:
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , _lowercase) or len(self.rope_scaling) != 2:
raise ValueError(
'''`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '''
f"got {self.rope_scaling}")
UpperCAmelCase_ = self.rope_scaling.get('''type''' , _lowercase)
UpperCAmelCase_ = self.rope_scaling.get('''factor''' , _lowercase)
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f"`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}")
if rope_scaling_factor is None or not isinstance(_lowercase , _lowercase) or rope_scaling_factor <= 1.0:
raise ValueError(f"`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}")
| 344 | 1 |
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .tokenization_wavaveca import WavaVecaCTCTokenizer
class a_ ( _snake_case ):
UpperCamelCase__ : Any ="Wav2Vec2FeatureExtractor"
UpperCamelCase__ : Optional[Any] ="AutoTokenizer"
def __init__( self :Dict , _lowercase :Any , _lowercase :Optional[Any]) -> Any:
super().__init__(_lowercase , _lowercase)
UpperCAmelCase_ = self.feature_extractor
UpperCAmelCase_ = False
@classmethod
def __a ( cls :List[str] , _lowercase :str , **_lowercase :List[str]) -> List[Any]:
try:
return super().from_pretrained(_lowercase , **_lowercase)
except OSError:
warnings.warn(
f"Loading a tokenizer inside {cls.__name__} from a config that does not"
''' include a `tokenizer_class` attribute is deprecated and will be '''
'''removed in v5. Please add `\'tokenizer_class\': \'Wav2Vec2CTCTokenizer\'`'''
''' attribute to either your `config.json` or `tokenizer_config.json` '''
'''file to suppress this warning: ''' , _lowercase , )
UpperCAmelCase_ = WavaVecaFeatureExtractor.from_pretrained(_lowercase , **_lowercase)
UpperCAmelCase_ = WavaVecaCTCTokenizer.from_pretrained(_lowercase , **_lowercase)
return cls(feature_extractor=_lowercase , tokenizer=_lowercase)
def __call__( self :Tuple , *_lowercase :Optional[Any] , **_lowercase :Optional[int]) -> Tuple:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*_lowercase , **_lowercase)
if "raw_speech" in kwargs:
warnings.warn('''Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.''')
UpperCAmelCase_ = kwargs.pop('''raw_speech''')
else:
UpperCAmelCase_ = kwargs.pop('''audio''' , _lowercase)
UpperCAmelCase_ = kwargs.pop('''sampling_rate''' , _lowercase)
UpperCAmelCase_ = kwargs.pop('''text''' , _lowercase)
if len(_lowercase) > 0:
UpperCAmelCase_ = args[0]
UpperCAmelCase_ = args[1:]
if audio is None and text is None:
raise ValueError('''You need to specify either an `audio` or `text` input to process.''')
if audio is not None:
UpperCAmelCase_ = self.feature_extractor(_lowercase , *_lowercase , sampling_rate=_lowercase , **_lowercase)
if text is not None:
UpperCAmelCase_ = self.tokenizer(_lowercase , **_lowercase)
if text is None:
return inputs
elif audio is None:
return encodings
else:
UpperCAmelCase_ = encodings['''input_ids''']
return inputs
def __a ( self :int , *_lowercase :Tuple , **_lowercase :int) -> Tuple:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor.pad(*_lowercase , **_lowercase)
UpperCAmelCase_ = kwargs.pop('''input_features''' , _lowercase)
UpperCAmelCase_ = kwargs.pop('''labels''' , _lowercase)
if len(_lowercase) > 0:
UpperCAmelCase_ = args[0]
UpperCAmelCase_ = args[1:]
if input_features is not None:
UpperCAmelCase_ = self.feature_extractor.pad(_lowercase , *_lowercase , **_lowercase)
if labels is not None:
UpperCAmelCase_ = self.tokenizer.pad(_lowercase , **_lowercase)
if labels is None:
return input_features
elif input_features is None:
return labels
else:
UpperCAmelCase_ = labels['''input_ids''']
return input_features
def __a ( self :Union[str, Any] , *_lowercase :Optional[int] , **_lowercase :List[Any]) -> Tuple:
return self.tokenizer.batch_decode(*_lowercase , **_lowercase)
def __a ( self :Dict , *_lowercase :int , **_lowercase :Optional[Any]) -> Any:
return self.tokenizer.decode(*_lowercase , **_lowercase)
@contextmanager
def __a ( self :Any) -> Tuple:
warnings.warn(
'''`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '''
'''labels by using the argument `text` of the regular `__call__` method (either in the same call as '''
'''your audio inputs, or in a separate call.''')
UpperCAmelCase_ = True
UpperCAmelCase_ = self.tokenizer
yield
UpperCAmelCase_ = self.feature_extractor
UpperCAmelCase_ = False
| 344 |
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class a_ ( _snake_case ):
UpperCamelCase__ : Optional[Any] =(DPMSolverSinglestepScheduler,)
UpperCamelCase__ : Tuple =(("num_inference_steps", 25),)
def __a ( self :List[Any] , **_lowercase :Optional[Any]) -> int:
UpperCAmelCase_ = {
'''num_train_timesteps''': 1000,
'''beta_start''': 0.0_001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''solver_order''': 2,
'''prediction_type''': '''epsilon''',
'''thresholding''': False,
'''sample_max_value''': 1.0,
'''algorithm_type''': '''dpmsolver++''',
'''solver_type''': '''midpoint''',
'''lambda_min_clipped''': -float('''inf'''),
'''variance_type''': None,
}
config.update(**_lowercase)
return config
def __a ( self :Union[str, Any] , _lowercase :List[Any]=0 , **_lowercase :Optional[int]) -> List[Any]:
UpperCAmelCase_ = dict(self.forward_default_kwargs)
UpperCAmelCase_ = kwargs.pop('''num_inference_steps''' , _lowercase)
UpperCAmelCase_ = self.dummy_sample
UpperCAmelCase_ = 0.1 * sample
UpperCAmelCase_ = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
UpperCAmelCase_ = self.get_scheduler_config(**_lowercase)
UpperCAmelCase_ = scheduler_class(**_lowercase)
scheduler.set_timesteps(_lowercase)
# copy over dummy past residuals
UpperCAmelCase_ = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_lowercase)
UpperCAmelCase_ = scheduler_class.from_pretrained(_lowercase)
new_scheduler.set_timesteps(_lowercase)
# copy over dummy past residuals
UpperCAmelCase_ = dummy_past_residuals[: new_scheduler.config.solver_order]
UpperCAmelCase_ , UpperCAmelCase_ = sample, sample
for t in range(_lowercase , time_step + scheduler.config.solver_order + 1):
UpperCAmelCase_ = scheduler.step(_lowercase , _lowercase , _lowercase , **_lowercase).prev_sample
UpperCAmelCase_ = new_scheduler.step(_lowercase , _lowercase , _lowercase , **_lowercase).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical"
def __a ( self :Union[str, Any]) -> List[Any]:
pass
def __a ( self :Optional[Any] , _lowercase :str=0 , **_lowercase :Union[str, Any]) -> Dict:
UpperCAmelCase_ = dict(self.forward_default_kwargs)
UpperCAmelCase_ = kwargs.pop('''num_inference_steps''' , _lowercase)
UpperCAmelCase_ = self.dummy_sample
UpperCAmelCase_ = 0.1 * sample
UpperCAmelCase_ = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
UpperCAmelCase_ = self.get_scheduler_config()
UpperCAmelCase_ = scheduler_class(**_lowercase)
scheduler.set_timesteps(_lowercase)
# copy over dummy past residuals (must be after setting timesteps)
UpperCAmelCase_ = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_lowercase)
UpperCAmelCase_ = scheduler_class.from_pretrained(_lowercase)
# copy over dummy past residuals
new_scheduler.set_timesteps(_lowercase)
# copy over dummy past residual (must be after setting timesteps)
UpperCAmelCase_ = dummy_past_residuals[: new_scheduler.config.solver_order]
UpperCAmelCase_ = scheduler.step(_lowercase , _lowercase , _lowercase , **_lowercase).prev_sample
UpperCAmelCase_ = new_scheduler.step(_lowercase , _lowercase , _lowercase , **_lowercase).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical"
def __a ( self :Dict , _lowercase :Union[str, Any]=None , **_lowercase :List[Any]) -> int:
if scheduler is None:
UpperCAmelCase_ = self.scheduler_classes[0]
UpperCAmelCase_ = self.get_scheduler_config(**_lowercase)
UpperCAmelCase_ = scheduler_class(**_lowercase)
UpperCAmelCase_ = self.scheduler_classes[0]
UpperCAmelCase_ = self.get_scheduler_config(**_lowercase)
UpperCAmelCase_ = scheduler_class(**_lowercase)
UpperCAmelCase_ = 10
UpperCAmelCase_ = self.dummy_model()
UpperCAmelCase_ = self.dummy_sample_deter
scheduler.set_timesteps(_lowercase)
for i, t in enumerate(scheduler.timesteps):
UpperCAmelCase_ = model(_lowercase , _lowercase)
UpperCAmelCase_ = scheduler.step(_lowercase , _lowercase , _lowercase).prev_sample
return sample
def __a ( self :int) -> Tuple:
UpperCAmelCase_ = DPMSolverSinglestepScheduler(**self.get_scheduler_config())
UpperCAmelCase_ = 50
UpperCAmelCase_ = self.dummy_model()
UpperCAmelCase_ = self.dummy_sample_deter
scheduler.set_timesteps(_lowercase)
# make sure that the first t is uneven
for i, t in enumerate(scheduler.timesteps[3:]):
UpperCAmelCase_ = model(_lowercase , _lowercase)
UpperCAmelCase_ = scheduler.step(_lowercase , _lowercase , _lowercase).prev_sample
UpperCAmelCase_ = torch.mean(torch.abs(_lowercase))
assert abs(result_mean.item() - 0.2_574) < 1E-3
def __a ( self :List[Any]) -> List[Any]:
for timesteps in [25, 50, 100, 999, 1000]:
self.check_over_configs(num_train_timesteps=_lowercase)
def __a ( self :int) -> Optional[Any]:
# make sure that iterating over schedulers with same config names gives same results
# for defaults
UpperCAmelCase_ = DPMSolverSinglestepScheduler(**self.get_scheduler_config())
UpperCAmelCase_ = self.full_loop(scheduler=_lowercase)
UpperCAmelCase_ = torch.mean(torch.abs(_lowercase))
assert abs(result_mean.item() - 0.2_791) < 1E-3
UpperCAmelCase_ = DEISMultistepScheduler.from_config(scheduler.config)
UpperCAmelCase_ = DPMSolverMultistepScheduler.from_config(scheduler.config)
UpperCAmelCase_ = UniPCMultistepScheduler.from_config(scheduler.config)
UpperCAmelCase_ = DPMSolverSinglestepScheduler.from_config(scheduler.config)
UpperCAmelCase_ = self.full_loop(scheduler=_lowercase)
UpperCAmelCase_ = torch.mean(torch.abs(_lowercase))
assert abs(result_mean.item() - 0.2_791) < 1E-3
def __a ( self :Tuple) -> int:
self.check_over_configs(thresholding=_lowercase)
for order in [1, 2, 3]:
for solver_type in ["midpoint", "heun"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=_lowercase , prediction_type=_lowercase , sample_max_value=_lowercase , algorithm_type='''dpmsolver++''' , solver_order=_lowercase , solver_type=_lowercase , )
def __a ( self :List[Any]) -> Any:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_lowercase)
def __a ( self :Any) -> Optional[int]:
for algorithm_type in ["dpmsolver", "dpmsolver++"]:
for solver_type in ["midpoint", "heun"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=_lowercase , solver_type=_lowercase , prediction_type=_lowercase , algorithm_type=_lowercase , )
UpperCAmelCase_ = self.full_loop(
solver_order=_lowercase , solver_type=_lowercase , prediction_type=_lowercase , algorithm_type=_lowercase , )
assert not torch.isnan(_lowercase).any(), "Samples have nan numbers"
def __a ( self :Tuple) -> int:
self.check_over_configs(lower_order_final=_lowercase)
self.check_over_configs(lower_order_final=_lowercase)
def __a ( self :Tuple) -> Optional[Any]:
self.check_over_configs(lambda_min_clipped=-float('''inf'''))
self.check_over_configs(lambda_min_clipped=-5.1)
def __a ( self :Any) -> List[str]:
self.check_over_configs(variance_type=_lowercase)
self.check_over_configs(variance_type='''learned_range''')
def __a ( self :Any) -> Dict:
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]:
self.check_over_forward(num_inference_steps=_lowercase , time_step=0)
def __a ( self :Dict) -> Union[str, Any]:
UpperCAmelCase_ = self.full_loop()
UpperCAmelCase_ = torch.mean(torch.abs(_lowercase))
assert abs(result_mean.item() - 0.2_791) < 1E-3
def __a ( self :Any) -> Union[str, Any]:
UpperCAmelCase_ = self.full_loop(use_karras_sigmas=_lowercase)
UpperCAmelCase_ = torch.mean(torch.abs(_lowercase))
assert abs(result_mean.item() - 0.2_248) < 1E-3
def __a ( self :str) -> Optional[int]:
UpperCAmelCase_ = self.full_loop(prediction_type='''v_prediction''')
UpperCAmelCase_ = torch.mean(torch.abs(_lowercase))
assert abs(result_mean.item() - 0.1_453) < 1E-3
def __a ( self :List[Any]) -> Dict:
UpperCAmelCase_ = self.full_loop(prediction_type='''v_prediction''' , use_karras_sigmas=_lowercase)
UpperCAmelCase_ = torch.mean(torch.abs(_lowercase))
assert abs(result_mean.item() - 0.0_649) < 1E-3
def __a ( self :Any) -> Optional[Any]:
UpperCAmelCase_ = self.scheduler_classes[0]
UpperCAmelCase_ = self.get_scheduler_config(thresholding=_lowercase , dynamic_thresholding_ratio=0)
UpperCAmelCase_ = scheduler_class(**_lowercase)
UpperCAmelCase_ = 10
UpperCAmelCase_ = self.dummy_model()
UpperCAmelCase_ = self.dummy_sample_deter.half()
scheduler.set_timesteps(_lowercase)
for i, t in enumerate(scheduler.timesteps):
UpperCAmelCase_ = model(_lowercase , _lowercase)
UpperCAmelCase_ = scheduler.step(_lowercase , _lowercase , _lowercase).prev_sample
assert sample.dtype == torch.floataa
| 344 | 1 |
UpperCamelCase_ = {
"joule": 1.0,
"kilojoule": 1_000,
"megajoule": 1_000_000,
"gigajoule": 1_000_000_000,
"wattsecond": 1.0,
"watthour": 3_600,
"kilowatthour": 3_600_000,
"newtonmeter": 1.0,
"calorie_nutr": 4_186.8,
"kilocalorie_nutr": 4_186_800.00,
"electronvolt": 1.6_0217_6634e-19,
"britishthermalunit_it": 1_055.05_585,
"footpound": 1.35_58_18,
}
def A ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> float:
'''simple docstring'''
if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION:
UpperCAmelCase_ = (
f"Incorrect 'from_type' or 'to_type' value: {from_type!r}, {to_type!r}\n"
f"Valid values are: {', '.join(__UpperCAmelCase )}"
)
raise ValueError(__UpperCAmelCase )
return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 344 |
import inspect
import unittest
import torch
import torch.nn as nn
from accelerate.hooks import (
AlignDevicesHook,
ModelHook,
SequentialHook,
add_hook_to_module,
attach_align_device_hook,
remove_hook_from_module,
remove_hook_from_submodules,
)
from accelerate.test_utils import require_multi_gpu
class a_ ( nn.Module ):
def __init__( self :Optional[Any]) -> Union[str, Any]:
super().__init__()
UpperCAmelCase_ = nn.Linear(3 , 4)
UpperCAmelCase_ = nn.BatchNormad(4)
UpperCAmelCase_ = nn.Linear(4 , 5)
def __a ( self :Dict , _lowercase :int) -> str:
return self.lineara(self.batchnorm(self.lineara(_lowercase)))
class a_ ( _snake_case ):
def __a ( self :Tuple , _lowercase :Optional[int] , *_lowercase :Union[str, Any] , **_lowercase :Any) -> Optional[Any]:
return (args[0] + 1,) + args[1:], kwargs
class a_ ( _snake_case ):
def __a ( self :Union[str, Any] , _lowercase :Dict , _lowercase :Tuple) -> int:
return output + 1
class a_ ( unittest.TestCase ):
def __a ( self :str) -> Optional[int]:
UpperCAmelCase_ = ModelForTest()
UpperCAmelCase_ = ModelHook()
add_hook_to_module(_lowercase , _lowercase)
self.assertEqual(test_model._hf_hook , _lowercase)
self.assertTrue(hasattr(_lowercase , '''_old_forward'''))
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , '''forward''')
self.assertListEqual(list(inspect.signature(test_model.forward).parameters) , ['''x'''])
remove_hook_from_module(_lowercase)
self.assertFalse(hasattr(_lowercase , '''_hf_hook'''))
self.assertFalse(hasattr(_lowercase , '''_old_forward'''))
def __a ( self :Optional[Any]) -> Any:
UpperCAmelCase_ = ModelForTest()
UpperCAmelCase_ = ModelHook()
add_hook_to_module(_lowercase , _lowercase)
add_hook_to_module(_lowercase , _lowercase , append=_lowercase)
self.assertEqual(isinstance(test_model._hf_hook , _lowercase) , _lowercase)
self.assertEqual(len(test_model._hf_hook.hooks) , 2)
self.assertTrue(hasattr(_lowercase , '''_old_forward'''))
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , '''forward''')
self.assertListEqual(list(inspect.signature(test_model.forward).parameters) , ['''x'''])
remove_hook_from_module(_lowercase)
self.assertFalse(hasattr(_lowercase , '''_hf_hook'''))
self.assertFalse(hasattr(_lowercase , '''_old_forward'''))
def __a ( self :Optional[int]) -> Optional[int]:
UpperCAmelCase_ = ModelForTest()
UpperCAmelCase_ = torch.randn(2 , 3)
UpperCAmelCase_ = test_model(x + 1)
UpperCAmelCase_ = test_model(x + 2)
UpperCAmelCase_ = PreForwardHook()
add_hook_to_module(_lowercase , _lowercase)
UpperCAmelCase_ = test_model(_lowercase)
self.assertTrue(torch.allclose(_lowercase , _lowercase , atol=1E-5))
# Attaching a hook to a model when it already has one replaces, does not chain
UpperCAmelCase_ = PreForwardHook()
add_hook_to_module(_lowercase , _lowercase)
UpperCAmelCase_ = test_model(_lowercase)
self.assertTrue(torch.allclose(_lowercase , _lowercase , atol=1E-5))
# You need to use the sequential hook to chain two or more hooks
UpperCAmelCase_ = SequentialHook(PreForwardHook() , PreForwardHook())
add_hook_to_module(_lowercase , _lowercase)
UpperCAmelCase_ = test_model(_lowercase)
assert torch.allclose(_lowercase , _lowercase , atol=1E-5)
def __a ( self :List[str]) -> int:
UpperCAmelCase_ = ModelForTest()
UpperCAmelCase_ = torch.randn(2 , 3)
UpperCAmelCase_ = test_model(_lowercase)
UpperCAmelCase_ = PostForwardHook()
add_hook_to_module(_lowercase , _lowercase)
UpperCAmelCase_ = test_model(_lowercase)
self.assertTrue(torch.allclose(_lowercase , output + 1 , atol=1E-5))
# Attaching a hook to a model when it already has one replaces, does not chain
UpperCAmelCase_ = PostForwardHook()
add_hook_to_module(_lowercase , _lowercase)
UpperCAmelCase_ = test_model(_lowercase)
self.assertTrue(torch.allclose(_lowercase , output + 1 , atol=1E-5))
# You need to use the sequential hook to chain two or more hooks
UpperCAmelCase_ = SequentialHook(PostForwardHook() , PostForwardHook())
add_hook_to_module(_lowercase , _lowercase)
UpperCAmelCase_ = test_model(_lowercase)
assert torch.allclose(_lowercase , output + 2 , atol=1E-5)
def __a ( self :str) -> List[Any]:
UpperCAmelCase_ = ModelForTest()
UpperCAmelCase_ = torch.randn(2 , 3)
UpperCAmelCase_ = test_model(_lowercase)
UpperCAmelCase_ = PostForwardHook()
add_hook_to_module(_lowercase , _lowercase)
UpperCAmelCase_ = test_model(_lowercase)
self.assertTrue(torch.allclose(_lowercase , output + 1))
self.assertTrue(outputa.requires_grad)
UpperCAmelCase_ = True
UpperCAmelCase_ = test_model(_lowercase)
self.assertFalse(outputa.requires_grad)
@require_multi_gpu
def __a ( self :Tuple) -> Optional[int]:
UpperCAmelCase_ = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu'''))
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
# This will move each submodule on different devices
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=0))
add_hook_to_module(model.batchnorm , AlignDevicesHook(execution_device=0))
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=1))
self.assertEqual(model.lineara.weight.device , torch.device(0))
self.assertEqual(model.batchnorm.weight.device , torch.device(0))
self.assertEqual(model.batchnorm.running_mean.device , torch.device(0))
self.assertEqual(model.lineara.weight.device , torch.device(1))
# We can still make a forward pass. The input does not need to be on any particular device
UpperCAmelCase_ = torch.randn(2 , 3)
UpperCAmelCase_ = model(_lowercase)
self.assertEqual(output.device , torch.device(1))
# We can add a general hook to put back output on same device as input.
add_hook_to_module(_lowercase , AlignDevicesHook(io_same_device=_lowercase))
UpperCAmelCase_ = torch.randn(2 , 3).to(0)
UpperCAmelCase_ = model(_lowercase)
self.assertEqual(output.device , torch.device(0))
def __a ( self :str) -> List[Any]:
UpperCAmelCase_ = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu'''))
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
# This will move each submodule on different devices
UpperCAmelCase_ = {'''execution_device''': 0 if torch.cuda.is_available() else '''cpu''', '''offload''': True}
add_hook_to_module(model.lineara , AlignDevicesHook(**_lowercase))
add_hook_to_module(model.batchnorm , AlignDevicesHook(**_lowercase))
add_hook_to_module(model.lineara , AlignDevicesHook(**_lowercase))
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device('''meta'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta'''))
self.assertEqual(model.lineara.weight.device , torch.device('''meta'''))
# Buffers are not included in the offload by default, so are on the execution device
UpperCAmelCase_ = torch.device(hook_kwargs['''execution_device'''])
self.assertEqual(model.batchnorm.running_mean.device , _lowercase)
UpperCAmelCase_ = torch.randn(2 , 3)
UpperCAmelCase_ = model(_lowercase)
self.assertEqual(output.device , _lowercase)
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara)
remove_hook_from_module(model.batchnorm)
remove_hook_from_module(model.lineara)
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu'''))
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
# Now test with buffers included in the offload
UpperCAmelCase_ = {
'''execution_device''': 0 if torch.cuda.is_available() else '''cpu''',
'''offload''': True,
'''offload_buffers''': True,
}
add_hook_to_module(model.lineara , AlignDevicesHook(**_lowercase))
add_hook_to_module(model.batchnorm , AlignDevicesHook(**_lowercase))
add_hook_to_module(model.lineara , AlignDevicesHook(**_lowercase))
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device('''meta'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta'''))
self.assertEqual(model.lineara.weight.device , torch.device('''meta'''))
self.assertEqual(model.batchnorm.running_mean.device , torch.device('''meta'''))
UpperCAmelCase_ = torch.randn(2 , 3)
UpperCAmelCase_ = model(_lowercase)
self.assertEqual(output.device , _lowercase)
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara)
remove_hook_from_module(model.batchnorm)
remove_hook_from_module(model.lineara)
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu'''))
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
def __a ( self :List[Any]) -> str:
UpperCAmelCase_ = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu'''))
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
# This will move each submodule on different devices
UpperCAmelCase_ = 0 if torch.cuda.is_available() else '''cpu'''
attach_align_device_hook(_lowercase , execution_device=_lowercase , offload=_lowercase)
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device('''meta'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta'''))
self.assertEqual(model.lineara.weight.device , torch.device('''meta'''))
# Buffers are not included in the offload by default, so are on the execution device
UpperCAmelCase_ = torch.device(_lowercase)
self.assertEqual(model.batchnorm.running_mean.device , _lowercase)
UpperCAmelCase_ = torch.randn(2 , 3)
UpperCAmelCase_ = model(_lowercase)
self.assertEqual(output.device , _lowercase)
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(_lowercase)
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu'''))
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
# Now test with buffers included in the offload
attach_align_device_hook(_lowercase , execution_device=_lowercase , offload=_lowercase , offload_buffers=_lowercase)
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device('''meta'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta'''))
self.assertEqual(model.lineara.weight.device , torch.device('''meta'''))
self.assertEqual(model.batchnorm.running_mean.device , torch.device('''meta'''))
UpperCAmelCase_ = torch.randn(2 , 3)
UpperCAmelCase_ = model(_lowercase)
self.assertEqual(output.device , _lowercase)
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(_lowercase)
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu'''))
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
def __a ( self :Optional[Any]) -> int:
UpperCAmelCase_ = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu'''))
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
# This will move each submodule on different devices
UpperCAmelCase_ = 0 if torch.cuda.is_available() else '''cpu'''
attach_align_device_hook(
_lowercase , execution_device=_lowercase , offload=_lowercase , weights_map=model.state_dict())
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device('''meta'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta'''))
self.assertEqual(model.lineara.weight.device , torch.device('''meta'''))
# Buffers are not included in the offload by default, so are on the execution device
UpperCAmelCase_ = torch.device(_lowercase)
self.assertEqual(model.batchnorm.running_mean.device , _lowercase)
UpperCAmelCase_ = torch.randn(2 , 3)
UpperCAmelCase_ = model(_lowercase)
self.assertEqual(output.device , _lowercase)
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(_lowercase)
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu'''))
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
# Now test with buffers included in the offload
attach_align_device_hook(
_lowercase , execution_device=_lowercase , offload=_lowercase , weights_map=model.state_dict() , offload_buffers=_lowercase , )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device('''meta'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta'''))
self.assertEqual(model.lineara.weight.device , torch.device('''meta'''))
self.assertEqual(model.batchnorm.running_mean.device , torch.device('''meta'''))
UpperCAmelCase_ = torch.randn(2 , 3)
UpperCAmelCase_ = model(_lowercase)
self.assertEqual(output.device , _lowercase)
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(_lowercase)
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu'''))
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
| 344 | 1 |
import copy
import json
import os
import tempfile
from transformers import is_torch_available
from .test_configuration_utils import config_common_kwargs
class a_ ( _snake_case ):
def __init__( self :int , _lowercase :Dict , _lowercase :List[str]=None , _lowercase :Any=True , _lowercase :Tuple=None , **_lowercase :List[Any]) -> Tuple:
UpperCAmelCase_ = parent
UpperCAmelCase_ = config_class
UpperCAmelCase_ = has_text_modality
UpperCAmelCase_ = kwargs
UpperCAmelCase_ = common_properties
def __a ( self :Optional[Any]) -> Optional[Any]:
UpperCAmelCase_ = self.config_class(**self.inputs_dict)
UpperCAmelCase_ = (
['''hidden_size''', '''num_attention_heads''', '''num_hidden_layers''']
if self.common_properties is None
else self.common_properties
)
# Add common fields for text models
if self.has_text_modality:
common_properties.extend(['''vocab_size'''])
# Test that config has the common properties as getters
for prop in common_properties:
self.parent.assertTrue(hasattr(_lowercase , _lowercase) , msg=f"`{prop}` does not exist")
# Test that config has the common properties as setter
for idx, name in enumerate(_lowercase):
try:
setattr(_lowercase , _lowercase , _lowercase)
self.parent.assertEqual(
getattr(_lowercase , _lowercase) , _lowercase , msg=f"`{name} value {idx} expected, but was {getattr(_lowercase , _lowercase)}")
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
# Test if config class can be called with Config(prop_name=..)
for idx, name in enumerate(_lowercase):
try:
UpperCAmelCase_ = self.config_class(**{name: idx})
self.parent.assertEqual(
getattr(_lowercase , _lowercase) , _lowercase , msg=f"`{name} value {idx} expected, but was {getattr(_lowercase , _lowercase)}")
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
def __a ( self :Optional[int]) -> str:
UpperCAmelCase_ = self.config_class(**self.inputs_dict)
UpperCAmelCase_ = json.loads(config.to_json_string())
for key, value in self.inputs_dict.items():
self.parent.assertEqual(obj[key] , _lowercase)
def __a ( self :Optional[Any]) -> Tuple:
UpperCAmelCase_ = self.config_class(**self.inputs_dict)
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase_ = os.path.join(_lowercase , '''config.json''')
config_first.to_json_file(_lowercase)
UpperCAmelCase_ = self.config_class.from_json_file(_lowercase)
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict())
def __a ( self :Dict) -> List[str]:
UpperCAmelCase_ = self.config_class(**self.inputs_dict)
with tempfile.TemporaryDirectory() as tmpdirname:
config_first.save_pretrained(_lowercase)
UpperCAmelCase_ = self.config_class.from_pretrained(_lowercase)
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict())
def __a ( self :Tuple) -> Optional[int]:
UpperCAmelCase_ = self.config_class(**self.inputs_dict)
UpperCAmelCase_ = '''test'''
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase_ = os.path.join(_lowercase , _lowercase)
config_first.save_pretrained(_lowercase)
UpperCAmelCase_ = self.config_class.from_pretrained(_lowercase , subfolder=_lowercase)
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict())
def __a ( self :List[Any]) -> Any:
UpperCAmelCase_ = self.config_class(**self.inputs_dict , num_labels=5)
self.parent.assertEqual(len(config.idalabel) , 5)
self.parent.assertEqual(len(config.labelaid) , 5)
UpperCAmelCase_ = 3
self.parent.assertEqual(len(config.idalabel) , 3)
self.parent.assertEqual(len(config.labelaid) , 3)
def __a ( self :Optional[Any]) -> Any:
if self.config_class.is_composition:
return
UpperCAmelCase_ = self.config_class()
self.parent.assertIsNotNone(_lowercase)
def __a ( self :List[Any]) -> int:
UpperCAmelCase_ = copy.deepcopy(_lowercase)
UpperCAmelCase_ = self.config_class(**_lowercase)
UpperCAmelCase_ = []
for key, value in config_common_kwargs.items():
if key == "torch_dtype":
if not is_torch_available():
continue
else:
import torch
if config.torch_dtype != torch.floataa:
wrong_values.append(('''torch_dtype''', config.torch_dtype, torch.floataa))
elif getattr(_lowercase , _lowercase) != value:
wrong_values.append((key, getattr(_lowercase , _lowercase), value))
if len(_lowercase) > 0:
UpperCAmelCase_ = '''\n'''.join([f"- {v[0]}: got {v[1]} instead of {v[2]}" for v in wrong_values])
raise ValueError(f"The following keys were not properly set in the config:\n{errors}")
def __a ( self :Any) -> List[str]:
self.create_and_test_config_common_properties()
self.create_and_test_config_to_json_string()
self.create_and_test_config_to_json_file()
self.create_and_test_config_from_and_save_pretrained()
self.create_and_test_config_from_and_save_pretrained_subfolder()
self.create_and_test_config_with_num_labels()
self.check_config_can_be_init_without_params()
self.check_config_arguments_init()
| 344 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import (
AutoProcessor,
BertTokenizerFast,
BlipImageProcessor,
GPTaTokenizer,
InstructBlipProcessor,
PreTrainedTokenizerFast,
)
@require_vision
class a_ ( unittest.TestCase ):
def __a ( self :Optional[Any]) -> int:
UpperCAmelCase_ = tempfile.mkdtemp()
UpperCAmelCase_ = BlipImageProcessor()
UpperCAmelCase_ = GPTaTokenizer.from_pretrained('''hf-internal-testing/tiny-random-GPT2Model''')
UpperCAmelCase_ = BertTokenizerFast.from_pretrained('''hf-internal-testing/tiny-random-bert''')
UpperCAmelCase_ = InstructBlipProcessor(_lowercase , _lowercase , _lowercase)
processor.save_pretrained(self.tmpdirname)
def __a ( self :List[Any] , **_lowercase :Dict) -> str:
return AutoProcessor.from_pretrained(self.tmpdirname , **_lowercase).tokenizer
def __a ( self :Optional[Any] , **_lowercase :Optional[Any]) -> Optional[int]:
return AutoProcessor.from_pretrained(self.tmpdirname , **_lowercase).image_processor
def __a ( self :Dict , **_lowercase :Tuple) -> str:
return AutoProcessor.from_pretrained(self.tmpdirname , **_lowercase).qformer_tokenizer
def __a ( self :Optional[int]) -> str:
shutil.rmtree(self.tmpdirname)
def __a ( self :Any) -> List[str]:
UpperCAmelCase_ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta)]
UpperCAmelCase_ = [Image.fromarray(np.moveaxis(_lowercase , 0 , -1)) for x in image_inputs]
return image_inputs
def __a ( self :Tuple) -> int:
UpperCAmelCase_ = InstructBlipProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() , qformer_tokenizer=self.get_qformer_tokenizer() , )
processor.save_pretrained(self.tmpdirname)
UpperCAmelCase_ = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''')
UpperCAmelCase_ = self.get_image_processor(do_normalize=_lowercase , padding_value=1.0)
UpperCAmelCase_ = InstructBlipProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=_lowercase , padding_value=1.0)
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.tokenizer , _lowercase)
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor , _lowercase)
self.assertIsInstance(processor.qformer_tokenizer , _lowercase)
def __a ( self :Dict) -> Any:
UpperCAmelCase_ = self.get_image_processor()
UpperCAmelCase_ = self.get_tokenizer()
UpperCAmelCase_ = self.get_qformer_tokenizer()
UpperCAmelCase_ = InstructBlipProcessor(
tokenizer=_lowercase , image_processor=_lowercase , qformer_tokenizer=_lowercase)
UpperCAmelCase_ = self.prepare_image_inputs()
UpperCAmelCase_ = image_processor(_lowercase , return_tensors='''np''')
UpperCAmelCase_ = processor(images=_lowercase , return_tensors='''np''')
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2)
def __a ( self :Union[str, Any]) -> Dict:
UpperCAmelCase_ = self.get_image_processor()
UpperCAmelCase_ = self.get_tokenizer()
UpperCAmelCase_ = self.get_qformer_tokenizer()
UpperCAmelCase_ = InstructBlipProcessor(
tokenizer=_lowercase , image_processor=_lowercase , qformer_tokenizer=_lowercase)
UpperCAmelCase_ = '''lower newer'''
UpperCAmelCase_ = processor(text=_lowercase)
UpperCAmelCase_ = tokenizer(_lowercase , return_token_type_ids=_lowercase)
UpperCAmelCase_ = qformer_tokenizer(_lowercase , return_token_type_ids=_lowercase)
for key in encoded_tokens.keys():
self.assertListEqual(encoded_tokens[key] , encoded_processor[key])
for key in encoded_tokens_qformer.keys():
self.assertListEqual(encoded_tokens_qformer[key] , encoded_processor['''qformer_''' + key])
def __a ( self :Dict) -> Optional[Any]:
UpperCAmelCase_ = self.get_image_processor()
UpperCAmelCase_ = self.get_tokenizer()
UpperCAmelCase_ = self.get_qformer_tokenizer()
UpperCAmelCase_ = InstructBlipProcessor(
tokenizer=_lowercase , image_processor=_lowercase , qformer_tokenizer=_lowercase)
UpperCAmelCase_ = '''lower newer'''
UpperCAmelCase_ = self.prepare_image_inputs()
UpperCAmelCase_ = processor(text=_lowercase , images=_lowercase)
self.assertListEqual(
list(inputs.keys()) , ['''input_ids''', '''attention_mask''', '''qformer_input_ids''', '''qformer_attention_mask''', '''pixel_values'''] , )
# test if it raises when no input is passed
with pytest.raises(_lowercase):
processor()
def __a ( self :Optional[int]) -> Optional[Any]:
UpperCAmelCase_ = self.get_image_processor()
UpperCAmelCase_ = self.get_tokenizer()
UpperCAmelCase_ = self.get_qformer_tokenizer()
UpperCAmelCase_ = InstructBlipProcessor(
tokenizer=_lowercase , image_processor=_lowercase , qformer_tokenizer=_lowercase)
UpperCAmelCase_ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
UpperCAmelCase_ = processor.batch_decode(_lowercase)
UpperCAmelCase_ = tokenizer.batch_decode(_lowercase)
self.assertListEqual(_lowercase , _lowercase)
def __a ( self :str) -> int:
UpperCAmelCase_ = self.get_image_processor()
UpperCAmelCase_ = self.get_tokenizer()
UpperCAmelCase_ = self.get_qformer_tokenizer()
UpperCAmelCase_ = InstructBlipProcessor(
tokenizer=_lowercase , image_processor=_lowercase , qformer_tokenizer=_lowercase)
UpperCAmelCase_ = '''lower newer'''
UpperCAmelCase_ = self.prepare_image_inputs()
UpperCAmelCase_ = processor(text=_lowercase , images=_lowercase)
self.assertListEqual(
list(inputs.keys()) , ['''input_ids''', '''attention_mask''', '''qformer_input_ids''', '''qformer_attention_mask''', '''pixel_values'''] , )
| 344 | 1 |
import string
from math import logaa
def A ( __UpperCAmelCase , __UpperCAmelCase ) -> int:
'''simple docstring'''
UpperCAmelCase_ = document.translate(
str.maketrans('''''' , '''''' , string.punctuation ) ).replace('''\n''' , '''''' )
UpperCAmelCase_ = document_without_punctuation.split(''' ''' ) # word tokenization
return len([word for word in tokenize_document if word.lower() == term.lower()] )
def A ( __UpperCAmelCase , __UpperCAmelCase ) -> tuple[int, int]:
'''simple docstring'''
UpperCAmelCase_ = corpus.lower().translate(
str.maketrans('''''' , '''''' , string.punctuation ) ) # strip all punctuation and replace it with ''
UpperCAmelCase_ = corpus_without_punctuation.split('''\n''' )
UpperCAmelCase_ = term.lower()
return (len([doc for doc in docs if term in doc] ), len(__UpperCAmelCase ))
def A ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=False ) -> float:
'''simple docstring'''
if smoothing:
if n == 0:
raise ValueError('''log10(0) is undefined.''' )
return round(1 + logaa(n / (1 + df) ) , 3 )
if df == 0:
raise ZeroDivisionError('''df must be > 0''' )
elif n == 0:
raise ValueError('''log10(0) is undefined.''' )
return round(logaa(n / df ) , 3 )
def A ( __UpperCAmelCase , __UpperCAmelCase ) -> float:
'''simple docstring'''
return round(tf * idf , 3 )
| 344 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
"facebook/levit-128S": "https://huggingface.co/facebook/levit-128S/resolve/main/config.json",
# See all LeViT models at https://huggingface.co/models?filter=levit
}
class a_ ( _snake_case ):
UpperCamelCase__ : Optional[int] ="levit"
def __init__( self :List[str] , _lowercase :List[Any]=224 , _lowercase :str=3 , _lowercase :Optional[int]=3 , _lowercase :str=2 , _lowercase :List[Any]=1 , _lowercase :str=16 , _lowercase :Dict=[128, 256, 384] , _lowercase :Union[str, Any]=[4, 8, 12] , _lowercase :Tuple=[4, 4, 4] , _lowercase :Dict=[16, 16, 16] , _lowercase :Any=0 , _lowercase :Dict=[2, 2, 2] , _lowercase :Any=[2, 2, 2] , _lowercase :Tuple=0.02 , **_lowercase :Union[str, Any] , ) -> Optional[Any]:
super().__init__(**_lowercase)
UpperCAmelCase_ = image_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = kernel_size
UpperCAmelCase_ = stride
UpperCAmelCase_ = padding
UpperCAmelCase_ = hidden_sizes
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = depths
UpperCAmelCase_ = key_dim
UpperCAmelCase_ = drop_path_rate
UpperCAmelCase_ = patch_size
UpperCAmelCase_ = attention_ratio
UpperCAmelCase_ = mlp_ratio
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = [
['''Subsample''', key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
['''Subsample''', key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
class a_ ( _snake_case ):
UpperCamelCase__ : Union[str, Any] =version.parse("1.11" )
@property
def __a ( self :Any) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
])
@property
def __a ( self :List[Any]) -> float:
return 1E-4
| 344 | 1 |
import unittest
from transformers import GPTSwaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
UpperCamelCase_ = get_tests_dir("fixtures/test_sentencepiece_with_bytefallback.model")
@require_sentencepiece
@require_tokenizers
class a_ ( _snake_case , unittest.TestCase ):
UpperCamelCase__ : Tuple =GPTSwaTokenizer
UpperCamelCase__ : Optional[int] =False
UpperCamelCase__ : Optional[int] =True
UpperCamelCase__ : Tuple =False
def __a ( self :str) -> List[str]:
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase_ = GPTSwaTokenizer(_lowercase , eos_token='''<unk>''' , bos_token='''<unk>''' , pad_token='''<unk>''')
tokenizer.save_pretrained(self.tmpdirname)
def __a ( self :List[Any] , _lowercase :int) -> str:
UpperCAmelCase_ = '''This is a test'''
UpperCAmelCase_ = '''This is a test'''
return input_text, output_text
def __a ( self :Optional[Any]) -> str:
UpperCAmelCase_ = '''<s>'''
UpperCAmelCase_ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_lowercase) , _lowercase)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_lowercase) , _lowercase)
def __a ( self :int) -> int:
UpperCAmelCase_ = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , '''<unk>''')
self.assertEqual(vocab_keys[1] , '''<s>''')
self.assertEqual(vocab_keys[-1] , '''j''')
self.assertEqual(len(_lowercase) , 2000)
def __a ( self :Union[str, Any]) -> Tuple:
self.assertEqual(self.get_tokenizer().vocab_size , 2000)
def __a ( self :Dict) -> str:
UpperCAmelCase_ = GPTSwaTokenizer(_lowercase)
UpperCAmelCase_ = tokenizer.tokenize('''This is a test''')
self.assertListEqual(_lowercase , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''])
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowercase) , [465, 287, 265, 631, 842])
UpperCAmelCase_ = tokenizer.tokenize('''I was born in 92000, and this is falsé.''')
# fmt: off
self.assertListEqual(
_lowercase , ['''▁I''', '''▁was''', '''▁bor''', '''n''', '''▁in''', '''▁''', '''<0x39>''', '''2''', '''0''', '''0''', '''0''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁f''', '''al''', '''s''', '''<0xC3>''', '''<0xA9>''', '''.'''] , )
# fmt: on
UpperCAmelCase_ = tokenizer.convert_tokens_to_ids(_lowercase)
self.assertListEqual(
_lowercase , [262, 272, 1525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260] , )
UpperCAmelCase_ = tokenizer.convert_ids_to_tokens(_lowercase)
# fmt: off
self.assertListEqual(
_lowercase , ['''▁I''', '''▁was''', '''▁bor''', '''n''', '''▁in''', '''▁''', '''<0x39>''', '''2''', '''0''', '''0''', '''0''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁f''', '''al''', '''s''', '''<0xC3>''', '''<0xA9>''', '''.'''])
# fmt: on
def __a ( self :str) -> Any:
UpperCAmelCase_ = GPTSwaTokenizer(_lowercase)
UpperCAmelCase_ = ['''This is a test''', '''I was born in 92000, and this is falsé.''']
UpperCAmelCase_ = [
[465, 287, 265, 631, 842],
[262, 272, 1525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260],
]
# Test that encode_fast returns the same as tokenize + convert_tokens_to_ids
for text, expected_ids in zip(_lowercase , _lowercase):
self.assertListEqual(tokenizer.encode_fast(_lowercase) , _lowercase)
# Test that decode_fast returns the input text
for text, token_ids in zip(_lowercase , _lowercase):
self.assertEqual(tokenizer.decode_fast(_lowercase) , _lowercase)
@slow
def __a ( self :Optional[Any]) -> Optional[int]:
UpperCAmelCase_ = [
'''<|python|>def fibonacci(n)\n if n < 0:\n print(\'Incorrect input\')''',
'''Hey there, how are you doing this fine day?''',
'''This is a text with a trailing spaces followed by a dot .''',
'''Häj sväjs lillebrör! =)''',
'''Det är inget fel på Mr. Cool''',
]
# fmt: off
UpperCAmelCase_ = {'''input_ids''': [[63423, 5, 6811, 14954, 282, 816, 3821, 63466, 63425, 63462, 18, 63978, 678, 301, 1320, 63423, 63455, 63458, 18, 63982, 4246, 3940, 1901, 47789, 5547, 18994], [19630, 1100, 63446, 1342, 633, 544, 4488, 593, 5102, 2416, 63495, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1652, 428, 268, 1936, 515, 268, 58593, 22413, 9106, 546, 268, 33213, 63979, 698, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [55130, 63450, 924, 63449, 2249, 4062, 1558, 318, 63504, 21498, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [509, 377, 2827, 2559, 332, 6575, 63443, 26801, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''token_type_ids''': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_lowercase , model_name='''AI-Sweden/gpt-sw3-126m''' , sequences=_lowercase , )
| 344 |
import os
import sys
import tempfile
import torch
from .state import AcceleratorState
from .utils import PrecisionType, PrepareForLaunch, is_mps_available, patch_environment
def A ( __UpperCAmelCase , __UpperCAmelCase=() , __UpperCAmelCase=None , __UpperCAmelCase="no" , __UpperCAmelCase="29500" ) -> int:
'''simple docstring'''
UpperCAmelCase_ = False
UpperCAmelCase_ = False
if any(key.startswith('''KAGGLE''' ) for key in os.environ.keys() ):
UpperCAmelCase_ = True
elif "IPython" in sys.modules:
UpperCAmelCase_ = '''google.colab''' in str(sys.modules['''IPython'''].get_ipython() )
try:
UpperCAmelCase_ = PrecisionType(mixed_precision.lower() )
except ValueError:
raise ValueError(
f"Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}." )
if (in_colab or in_kaggle) and (os.environ.get('''TPU_NAME''' , __UpperCAmelCase ) is not None):
# TPU launch
import torch_xla.distributed.xla_multiprocessing as xmp
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
'''To train on TPU in Colab or Kaggle Kernel, the `Accelerator` should only be initialized inside '''
'''your training function. Restart your notebook and make sure no cells initializes an '''
'''`Accelerator`.''' )
if num_processes is None:
UpperCAmelCase_ = 8
UpperCAmelCase_ = PrepareForLaunch(__UpperCAmelCase , distributed_type='''TPU''' )
print(f"Launching a training on {num_processes} TPU cores." )
xmp.spawn(__UpperCAmelCase , args=__UpperCAmelCase , nprocs=__UpperCAmelCase , start_method='''fork''' )
elif in_colab:
# No need for a distributed launch otherwise as it's either CPU or one GPU.
if torch.cuda.is_available():
print('''Launching training on one GPU.''' )
else:
print('''Launching training on one CPU.''' )
function(*__UpperCAmelCase )
else:
if num_processes is None:
raise ValueError(
'''You have to specify the number of GPUs you would like to use, add `num_processes=...` to your call.''' )
if num_processes > 1:
# Multi-GPU launch
from torch.multiprocessing import start_processes
from torch.multiprocessing.spawn import ProcessRaisedException
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
'''To launch a multi-GPU training from your notebook, the `Accelerator` should only be initialized '''
'''inside your training function. Restart your notebook and make sure no cells initializes an '''
'''`Accelerator`.''' )
if torch.cuda.is_initialized():
raise ValueError(
'''To launch a multi-GPU training from your notebook, you need to avoid running any instruction '''
'''using `torch.cuda` in any cell. Restart your notebook and make sure no cells use any CUDA '''
'''function.''' )
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=__UpperCAmelCase , master_addr='''127.0.01''' , master_port=__UpperCAmelCase , mixed_precision=__UpperCAmelCase ):
UpperCAmelCase_ = PrepareForLaunch(__UpperCAmelCase , distributed_type='''MULTI_GPU''' )
print(f"Launching training on {num_processes} GPUs." )
try:
start_processes(__UpperCAmelCase , args=__UpperCAmelCase , nprocs=__UpperCAmelCase , start_method='''fork''' )
except ProcessRaisedException as e:
if "Cannot re-initialize CUDA in forked subprocess" in e.args[0]:
raise RuntimeError(
'''CUDA has been initialized before the `notebook_launcher` could create a forked subprocess. '''
'''This likely stems from an outside import causing issues once the `notebook_launcher()` is called. '''
'''Please review your imports and test them when running the `notebook_launcher()` to identify '''
'''which one is problematic.''' ) from e
else:
# No need for a distributed launch otherwise as it's either CPU, GPU or MPS.
if is_mps_available():
UpperCAmelCase_ = '''1'''
print('''Launching training on MPS.''' )
elif torch.cuda.is_available():
print('''Launching training on one GPU.''' )
else:
print('''Launching training on CPU.''' )
function(*__UpperCAmelCase )
def A ( __UpperCAmelCase , __UpperCAmelCase=() , __UpperCAmelCase=2 ) -> Optional[Any]:
'''simple docstring'''
from torch.multiprocessing import start_processes
with tempfile.NamedTemporaryFile() as tmp_file:
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=__UpperCAmelCase , master_addr='''127.0.01''' , master_port='''29500''' , accelerate_mixed_precision='''no''' , accelerate_debug_rdv_file=tmp_file.name , accelerate_use_cpu='''yes''' , ):
UpperCAmelCase_ = PrepareForLaunch(__UpperCAmelCase , debug=__UpperCAmelCase )
start_processes(__UpperCAmelCase , args=__UpperCAmelCase , nprocs=__UpperCAmelCase , start_method='''fork''' )
| 344 | 1 |
import unittest
import torch
from torch import nn
from accelerate.test_utils import require_cuda
from accelerate.utils.memory import find_executable_batch_size, release_memory
def A ( ) -> Any:
'''simple docstring'''
raise RuntimeError('''CUDA out of memory.''' )
class a_ ( nn.Module ):
def __init__( self :Optional[int]) -> Optional[int]:
super().__init__()
UpperCAmelCase_ = nn.Linear(3 , 4)
UpperCAmelCase_ = nn.BatchNormad(4)
UpperCAmelCase_ = nn.Linear(4 , 5)
def __a ( self :List[str] , _lowercase :Optional[int]) -> List[str]:
return self.lineara(self.batchnorm(self.lineara(_lowercase)))
class a_ ( unittest.TestCase ):
def __a ( self :Any) -> Dict:
UpperCAmelCase_ = []
@find_executable_batch_size(starting_batch_size=128)
def mock_training_loop_function(_lowercase :Union[str, Any]):
nonlocal batch_sizes
batch_sizes.append(_lowercase)
if batch_size != 8:
raise_fake_out_of_memory()
mock_training_loop_function()
self.assertListEqual(_lowercase , [128, 64, 32, 16, 8])
def __a ( self :List[str]) -> Dict:
UpperCAmelCase_ = []
@find_executable_batch_size(starting_batch_size=128)
def mock_training_loop_function(_lowercase :List[str] , _lowercase :Optional[int]):
nonlocal batch_sizes
batch_sizes.append(_lowercase)
if batch_size != 8:
raise_fake_out_of_memory()
return batch_size, arga
UpperCAmelCase_ , UpperCAmelCase_ = mock_training_loop_function('''hello''')
self.assertListEqual(_lowercase , [128, 64, 32, 16, 8])
self.assertListEqual([bs, arga] , [8, '''hello'''])
def __a ( self :Optional[Any]) -> Tuple:
@find_executable_batch_size(starting_batch_size=0)
def mock_training_loop_function(_lowercase :List[str]):
pass
with self.assertRaises(_lowercase) as cm:
mock_training_loop_function()
self.assertIn('''No executable batch size found, reached zero.''' , cm.exception.args[0])
def __a ( self :Dict) -> Any:
@find_executable_batch_size(starting_batch_size=16)
def mock_training_loop_function(_lowercase :Dict):
if batch_size > 0:
raise_fake_out_of_memory()
pass
with self.assertRaises(_lowercase) as cm:
mock_training_loop_function()
self.assertIn('''No executable batch size found, reached zero.''' , cm.exception.args[0])
def __a ( self :Optional[Any]) -> Tuple:
@find_executable_batch_size(starting_batch_size=128)
def mock_training_loop_function(_lowercase :str , _lowercase :List[Any] , _lowercase :Optional[int]):
if batch_size != 8:
raise raise_fake_out_of_memory()
with self.assertRaises(_lowercase) as cm:
mock_training_loop_function(128 , '''hello''' , '''world''')
self.assertIn('''Batch size was passed into `f`''' , cm.exception.args[0])
self.assertIn('''`f(arg1=\'hello\', arg2=\'world\')''' , cm.exception.args[0])
def __a ( self :Optional[Any]) -> Dict:
@find_executable_batch_size(starting_batch_size=16)
def mock_training_loop_function(_lowercase :str):
raise ValueError('''Oops, we had an error!''')
with self.assertRaises(_lowercase) as cm:
mock_training_loop_function()
self.assertIn('''Oops, we had an error!''' , cm.exception.args[0])
@require_cuda
def __a ( self :Union[str, Any]) -> Any:
UpperCAmelCase_ = torch.cuda.memory_allocated()
UpperCAmelCase_ = ModelForTest()
model.cuda()
self.assertGreater(torch.cuda.memory_allocated() , _lowercase)
UpperCAmelCase_ = release_memory(_lowercase)
self.assertEqual(torch.cuda.memory_allocated() , _lowercase)
| 344 |
import math
import os
import re
import sys
import unittest
from pathlib import Path
from typing import Tuple
from unittest.mock import patch
from parameterized import parameterized
from transformers.testing_utils import (
CaptureStderr,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
get_torch_dist_unique_port,
require_apex,
require_bitsandbytes,
require_fairscale,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
require_torch_non_multi_gpu,
slow,
)
from transformers.trainer_callback import TrainerState
from transformers.trainer_utils import set_seed
UpperCamelCase_ = os.path.abspath(os.path.dirname(__file__))
with ExtendSysPath(f"{bindir}/../../examples/pytorch/translation"):
from run_translation import main # noqa
set_seed(42)
UpperCamelCase_ = "sshleifer/student_marian_en_ro_6_1"
UpperCamelCase_ = "sshleifer/tiny-mbart"
@require_torch
class a_ ( _snake_case ):
def __a ( self :str , _lowercase :Any=False , _lowercase :Tuple=None , _lowercase :Dict=True , _lowercase :Tuple=True , _lowercase :List[Any]=True , _lowercase :List[str]=True , ) -> int:
UpperCAmelCase_ = self.run_trainer(
eval_steps=1 , max_len=12 , model_name=_lowercase , num_train_epochs=1 , distributed=_lowercase , extra_args_str=_lowercase , predict_with_generate=_lowercase , do_train=_lowercase , do_eval=_lowercase , do_predict=_lowercase , )
UpperCAmelCase_ = TrainerState.load_from_json(os.path.join(_lowercase , '''trainer_state.json''')).log_history
if not do_eval:
return
UpperCAmelCase_ = [log for log in logs if '''eval_loss''' in log.keys()]
UpperCAmelCase_ = eval_metrics[0]
if predict_with_generate:
assert "eval_bleu" in first_step_stats
UpperCAmelCase_ = eval_metrics[-1]
assert isinstance(last_step_stats['''eval_bleu'''] , _lowercase)
assert not math.isnan(float(last_step_stats['''eval_loss'''])), "eval_loss must not be `nan`"
@require_torch_non_multi_gpu
def __a ( self :Dict) -> str:
self.run_seqaseq_quick()
@require_torch_multi_gpu
def __a ( self :Any) -> int:
self.run_seqaseq_quick(distributed=_lowercase)
@require_torch_multi_gpu
def __a ( self :int) -> Any:
self.run_seqaseq_quick(distributed=_lowercase)
@unittest.skip('''Requires an update of the env running those tests''')
@require_torch_multi_gpu
@require_fairscale
def __a ( self :Tuple) -> Any:
self.run_seqaseq_quick(distributed=_lowercase , extra_args_str='''--sharded_ddp simple''')
@unittest.skip('''Requires an update of the env running those tests''')
@require_torch_multi_gpu
@require_fairscale
def __a ( self :Tuple) -> List[str]:
self.run_seqaseq_quick(distributed=_lowercase , extra_args_str='''--sharded_ddp simple --fp16''')
@unittest.skip('''Requires an update of the env running those tests''')
@require_torch_multi_gpu
@require_fairscale
def __a ( self :Union[str, Any]) -> Any:
self.run_seqaseq_quick(distributed=_lowercase , extra_args_str='''--sharded_ddp zero_dp_2''' , predict_with_generate=_lowercase)
@unittest.skip('''Requires an update of the env running those tests''')
@require_torch_multi_gpu
@require_fairscale
def __a ( self :int) -> Any:
self.run_seqaseq_quick(
distributed=_lowercase , extra_args_str='''--sharded_ddp zero_dp_2 --fp16''' , predict_with_generate=_lowercase)
@require_apex
@require_torch_gpu
def __a ( self :Tuple) -> str:
# XXX: apex breaks the trainer if it's run twice e.g. run_seq2seq.main() from the same
# program and it breaks other tests that run from the same pytest worker, therefore until this is
# sorted out it must be run only in an external program, that is distributed=True in this
# test and only under one or more gpus - if we want cpu will need to make a special test
#
# specifically to the problem traced it to self.optimizer.step() - if it's run 2nd time via
# 2nd main() call it botches the future eval.
#
self.run_seqaseq_quick(distributed=_lowercase , extra_args_str='''--fp16 --fp16_backend=apex''')
# test 2nd time - was getting eval_loss': nan'
# to reproduce the problem set distributed=False
self.run_seqaseq_quick(distributed=_lowercase , extra_args_str='''--fp16 --fp16_backend=apex''')
@parameterized.expand(['''base''', '''low''', '''high''', '''mixed'''])
@require_torch_multi_gpu
def __a ( self :str , _lowercase :Any) -> List[str]:
# as each sub-test is slow-ish split into multiple sub-tests to avoid CI timeout
UpperCAmelCase_ = {
# test with the default log_level - should be info and thus log info once
'''base''': {'''extra_args_str''': '''''', '''n_matches''': 1},
# test with low log_level and log_level_replica - should be noisy on all processes
# now the info string should appear twice on 2 processes
'''low''': {'''extra_args_str''': '''--log_level debug --log_level_replica debug''', '''n_matches''': 2},
# test with high log_level and low log_level_replica
# now the info string should appear once only on the replica
'''high''': {'''extra_args_str''': '''--log_level error --log_level_replica debug''', '''n_matches''': 1},
# test with high log_level and log_level_replica - should be quiet on all processes
'''mixed''': {'''extra_args_str''': '''--log_level error --log_level_replica error''', '''n_matches''': 0},
}
UpperCAmelCase_ = experiments[experiment_id]
UpperCAmelCase_ = {'''distributed''': True, '''predict_with_generate''': False, '''do_eval''': False, '''do_predict''': False}
UpperCAmelCase_ = '''Running training'''
with CaptureStderr() as cl:
self.run_seqaseq_quick(**_lowercase , extra_args_str=data['''extra_args_str'''])
UpperCAmelCase_ = len(re.findall(_lowercase , cl.err))
self.assertEqual(_lowercase , data['''n_matches'''])
@slow
def __a ( self :Any) -> Dict:
UpperCAmelCase_ = self.run_trainer(
eval_steps=2 , max_len=128 , model_name=_lowercase , learning_rate=3E-4 , num_train_epochs=10 , distributed=_lowercase , )
# Check metrics
UpperCAmelCase_ = TrainerState.load_from_json(os.path.join(_lowercase , '''trainer_state.json''')).log_history
UpperCAmelCase_ = [log for log in logs if '''eval_loss''' in log.keys()]
UpperCAmelCase_ = eval_metrics[0]
UpperCAmelCase_ = eval_metrics[-1]
assert first_step_stats["eval_loss"] > last_step_stats["eval_loss"], "model learned nothing"
assert isinstance(last_step_stats['''eval_bleu'''] , _lowercase)
# test if do_predict saves generations and metrics
UpperCAmelCase_ = os.listdir(_lowercase)
UpperCAmelCase_ = {os.path.basename(_lowercase) for p in contents}
assert "generated_predictions.txt" in contents
assert "predict_results.json" in contents
@slow
@require_bitsandbytes
def __a ( self :List[str]) -> str:
from transformers.training_args import OptimizerNames
def train_and_return_metrics(_lowercase :str) -> Tuple[int, float]:
UpperCAmelCase_ = '''--skip_memory_metrics 0'''
UpperCAmelCase_ = self.run_trainer(
max_len=128 , model_name=_lowercase , learning_rate=3E-4 , num_train_epochs=1 , optim=_lowercase , distributed=_lowercase , extra_args_str=_lowercase , do_eval=_lowercase , do_predict=_lowercase , n_gpus_to_use=1 , )
# Check metrics
UpperCAmelCase_ = TrainerState.load_from_json(Path(_lowercase , '''trainer_state.json''')).log_history
UpperCAmelCase_ = int(logs[0]['''train_mem_gpu_peaked_delta'''] / 2**20)
UpperCAmelCase_ = int(logs[0]['''train_mem_gpu_alloc_delta'''] / 2**20)
UpperCAmelCase_ = logs[0]['''train_loss''']
return gpu_peak_mem_mb, gpu_alloc_mem_mb, loss
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = train_and_return_metrics(OptimizerNames.ADAMW_TORCH.value)
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = train_and_return_metrics(OptimizerNames.ADAMW_BNB.value)
UpperCAmelCase_ = gpu_alloc_mem_orig - gpu_alloc_mem_bnb
UpperCAmelCase_ = gpu_peak_mem_orig + gpu_alloc_mem_orig
UpperCAmelCase_ = gpu_peak_mem_bnb + gpu_alloc_mem_bnb
UpperCAmelCase_ = gpu_total_mem_orig - gpu_total_mem_bnb
# sshleifer/student_marian_en_ro_6_1 has 54M parameter, 29M of which is `nn.Embedding` which
# doesn't get quantized and remains in fp32. Therefore we only have 25M parameters quantized
# in 2 bytes and the diff in optim memory usage is derived as so:
#
# - normal 25*8=~200MB (8 bytes per param)
# - bnb 25*2= ~50MB (2 bytes per param)
#
# Thus we should expect ~150MB total memory saved.
#
# Peak memory should be the same - the total should be different by about that same margin
#
# After leaving a small margin to accommodate for differences between gpus let's check
# that we have at least 120MB in savings
UpperCAmelCase_ = 120
# uncomment the following if this test starts failing - requires py38 for a new print feature
# gpu_peak_mem_diff = gpu_peak_mem_orig - gpu_peak_mem_bnb
# print(f"{gpu_alloc_mem_orig=}MB {gpu_peak_mem_orig=}MB {gpu_alloc_mem_orig+gpu_peak_mem_orig=}MB")
# print(f" {gpu_alloc_mem_bnb=}MB {gpu_peak_mem_bnb=}MB {gpu_alloc_mem_bnb+gpu_peak_mem_bnb=}MB")
# print(f"{gpu_alloc_mem_diff=}MB")
# print(f"{gpu_peak_mem_diff=}MB")
# print(f"{gpu_total_mem_orig=}MB, {gpu_total_mem_bnb=}MB")
# print(f"{gpu_total_mem_diff=}MB, {gpu_total_mem_diff=}MB")
self.assertGreater(
_lowercase , _lowercase , '''should use ~150MB less alloc gpu memory with BNB, compared to without it for this model but got'''
f" a difference of {gpu_alloc_mem_diff}MB, with gpu_alloc_mem_orig={gpu_alloc_mem_orig}MB and"
f" gpu_alloc_mem_bnb={gpu_alloc_mem_bnb}MB" , )
self.assertGreater(
_lowercase , _lowercase , '''should use ~150MB less total gpu memory with BNB, compared to without it for this model but got'''
f" a difference of {gpu_total_mem_diff}MB, with gpu_total_mem_orig={gpu_total_mem_orig}MB and"
f" gpu_total_mem_bnb={gpu_total_mem_bnb}MB" , )
self.assertEqual(
_lowercase , _lowercase , f"loss should be the same, but got loss_orig={loss_orig}, loss_bnb={loss_bnb}")
def __a ( self :Any , _lowercase :int , _lowercase :str , _lowercase :int , _lowercase :float = 3E-3 , _lowercase :str = "adafactor" , _lowercase :bool = False , _lowercase :str = None , _lowercase :int = 0 , _lowercase :bool = True , _lowercase :bool = True , _lowercase :bool = True , _lowercase :bool = True , _lowercase :int = None , ) -> List[Any]:
UpperCAmelCase_ = self.test_file_dir / '''../fixtures/tests_samples/wmt_en_ro'''
UpperCAmelCase_ = self.get_auto_remove_tmp_dir()
UpperCAmelCase_ = f"\n --model_name_or_path {model_name}\n --train_file {data_dir}/train.json\n --validation_file {data_dir}/val.json\n --test_file {data_dir}/test.json\n --output_dir {output_dir}\n --overwrite_output_dir\n --max_train_samples 8\n --max_source_length {max_len}\n --max_target_length {max_len}\n --do_train\n --num_train_epochs {str(_lowercase)}\n --per_device_train_batch_size 4\n --learning_rate {learning_rate}\n --warmup_steps 8\n --logging_steps 0\n --logging_strategy no\n --save_steps {str(_lowercase)}\n --group_by_length\n --label_smoothing_factor 0.1\n --target_lang ro_RO\n --source_lang en_XX\n ".split()
UpperCAmelCase_ = f"\n --do_eval\n --per_device_eval_batch_size 4\n --max_eval_samples 8\n --val_max_target_length {max_len}\n --evaluation_strategy steps\n --eval_steps {str(_lowercase)}\n ".split()
UpperCAmelCase_ = '''
--do_predict
'''.split()
UpperCAmelCase_ = []
if do_train:
args += args_train
if do_eval:
args += args_eval
if do_predict:
args += args_predict
if predict_with_generate:
args += "--predict_with_generate".split()
if do_train:
if optim == "adafactor":
args += "--adafactor".split()
else:
args += f"--optim {optim}".split()
if extra_args_str is not None:
args += extra_args_str.split()
if distributed:
if n_gpus_to_use is None:
UpperCAmelCase_ = get_gpu_count()
UpperCAmelCase_ = get_torch_dist_unique_port()
UpperCAmelCase_ = f"\n -m torch.distributed.run\n --nproc_per_node={n_gpus_to_use}\n --master_port={master_port}\n {self.examples_dir_str}/pytorch/translation/run_translation.py\n ".split()
UpperCAmelCase_ = [sys.executable] + distributed_args + args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(_lowercase , env=self.get_env())
else:
UpperCAmelCase_ = ['''run_translation.py'''] + args
with patch.object(_lowercase , '''argv''' , _lowercase):
main()
return output_dir
| 344 | 1 |
import unittest
import numpy as np
from diffusers import OnnxStableDiffusionInpaintPipelineLegacy
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
load_numpy,
nightly,
require_onnxruntime,
require_torch_gpu,
)
if is_onnx_available():
import onnxruntime as ort
@nightly
@require_onnxruntime
@require_torch_gpu
class a_ ( unittest.TestCase ):
@property
def __a ( self :Optional[int]) -> Optional[Any]:
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def __a ( self :Union[str, Any]) -> str:
UpperCAmelCase_ = ort.SessionOptions()
UpperCAmelCase_ = False
return options
def __a ( self :Tuple) -> Union[str, Any]:
UpperCAmelCase_ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo.png''')
UpperCAmelCase_ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo_mask.png''')
UpperCAmelCase_ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/red_cat_sitting_on_a_park_bench_onnx.npy''')
# using the PNDM scheduler by default
UpperCAmelCase_ = OnnxStableDiffusionInpaintPipelineLegacy.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''onnx''' , safety_checker=_lowercase , feature_extractor=_lowercase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=_lowercase)
UpperCAmelCase_ = '''A red cat sitting on a park bench'''
UpperCAmelCase_ = np.random.RandomState(0)
UpperCAmelCase_ = pipe(
prompt=_lowercase , image=_lowercase , mask_image=_lowercase , strength=0.75 , guidance_scale=7.5 , num_inference_steps=15 , generator=_lowercase , output_type='''np''' , )
UpperCAmelCase_ = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image).max() < 1E-2
| 344 |
import functools
def A ( __UpperCAmelCase , __UpperCAmelCase ) -> int:
'''simple docstring'''
UpperCAmelCase_ = len(__UpperCAmelCase )
UpperCAmelCase_ = len(__UpperCAmelCase )
@functools.cache
def min_distance(__UpperCAmelCase , __UpperCAmelCase ) -> int:
# if first word index is overflow - delete all from the second word
if indexa >= len_worda:
return len_worda - indexa
# if second word index is overflow - delete all from the first word
if indexa >= len_worda:
return len_worda - indexa
UpperCAmelCase_ = int(worda[indexa] != worda[indexa] ) # current letters not identical
return min(
1 + min_distance(indexa + 1 , __UpperCAmelCase ) , 1 + min_distance(__UpperCAmelCase , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , )
return min_distance(0 , 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 344 | 1 |
from __future__ import annotations
# This is the precision for this function which can be altered.
# It is recommended for users to keep this number greater than or equal to 10.
UpperCamelCase_ = 10
def A ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> int:
'''simple docstring'''
for i in range(__UpperCAmelCase , __UpperCAmelCase ):
if array[i] == target:
return i
return -1
def A ( __UpperCAmelCase , __UpperCAmelCase ) -> int:
'''simple docstring'''
UpperCAmelCase_ = 0
UpperCAmelCase_ = len(__UpperCAmelCase )
while left <= right:
if right - left < precision:
return lin_search(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
UpperCAmelCase_ = (left + right) // 3 + 1
UpperCAmelCase_ = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
UpperCAmelCase_ = one_third - 1
elif array[two_third] < target:
UpperCAmelCase_ = two_third + 1
else:
UpperCAmelCase_ = one_third + 1
UpperCAmelCase_ = two_third - 1
else:
return -1
def A ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> int:
'''simple docstring'''
if left < right:
if right - left < precision:
return lin_search(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
UpperCAmelCase_ = (left + right) // 3 + 1
UpperCAmelCase_ = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
return rec_ternary_search(__UpperCAmelCase , one_third - 1 , __UpperCAmelCase , __UpperCAmelCase )
elif array[two_third] < target:
return rec_ternary_search(two_third + 1 , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
else:
return rec_ternary_search(one_third + 1 , two_third - 1 , __UpperCAmelCase , __UpperCAmelCase )
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase_ = input("Enter numbers separated by comma:\n").strip()
UpperCamelCase_ = [int(item.strip()) for item in user_input.split(",")]
assert collection == sorted(collection), f"List must be ordered.\n{collection}."
UpperCamelCase_ = int(input("Enter the number to be found in the list:\n").strip())
UpperCamelCase_ = ite_ternary_search(collection, target)
UpperCamelCase_ = rec_ternary_search(0, len(collection) - 1, collection, target)
if resulta != -1:
print(f"Iterative search: {target} found at positions: {resulta}")
print(f"Recursive search: {target} found at positions: {resulta}")
else:
print("Not found")
| 344 |
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {"vocab_file": "spiece.model"}
UpperCamelCase_ = {
"vocab_file": {
"xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model",
"xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model",
}
}
UpperCamelCase_ = {
"xlnet-base-cased": None,
"xlnet-large-cased": None,
}
# Segments (not really needed)
UpperCamelCase_ = 0
UpperCamelCase_ = 1
UpperCamelCase_ = 2
UpperCamelCase_ = 3
UpperCamelCase_ = 4
class a_ ( _snake_case ):
UpperCamelCase__ : List[Any] =VOCAB_FILES_NAMES
UpperCamelCase__ : Optional[Any] =PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ : Tuple =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase__ : Any ="left"
def __init__( self :Optional[int] , _lowercase :Union[str, Any] , _lowercase :Union[str, Any]=False , _lowercase :Optional[int]=True , _lowercase :Union[str, Any]=False , _lowercase :Tuple="<s>" , _lowercase :Any="</s>" , _lowercase :Dict="<unk>" , _lowercase :str="<sep>" , _lowercase :Tuple="<pad>" , _lowercase :Any="<cls>" , _lowercase :List[str]="<mask>" , _lowercase :Union[str, Any]=["<eop>", "<eod>"] , _lowercase :Optional[Dict[str, Any]] = None , **_lowercase :Union[str, Any] , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
UpperCAmelCase_ = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase) if isinstance(_lowercase , _lowercase) else mask_token
UpperCAmelCase_ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=_lowercase , remove_space=_lowercase , keep_accents=_lowercase , bos_token=_lowercase , eos_token=_lowercase , unk_token=_lowercase , sep_token=_lowercase , pad_token=_lowercase , cls_token=_lowercase , mask_token=_lowercase , additional_special_tokens=_lowercase , sp_model_kwargs=self.sp_model_kwargs , **_lowercase , )
UpperCAmelCase_ = 3
UpperCAmelCase_ = do_lower_case
UpperCAmelCase_ = remove_space
UpperCAmelCase_ = keep_accents
UpperCAmelCase_ = vocab_file
UpperCAmelCase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(_lowercase)
@property
def __a ( self :int) -> List[Any]:
return len(self.sp_model)
def __a ( self :Optional[int]) -> List[Any]:
UpperCAmelCase_ = {self.convert_ids_to_tokens(_lowercase): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def __getstate__( self :Dict) -> Union[str, Any]:
UpperCAmelCase_ = self.__dict__.copy()
UpperCAmelCase_ = None
return state
def __setstate__( self :Optional[Any] , _lowercase :Optional[Any]) -> List[Any]:
UpperCAmelCase_ = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs'''):
UpperCAmelCase_ = {}
UpperCAmelCase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def __a ( self :List[str] , _lowercase :Tuple) -> Optional[int]:
if self.remove_space:
UpperCAmelCase_ = ''' '''.join(inputs.strip().split())
else:
UpperCAmelCase_ = inputs
UpperCAmelCase_ = outputs.replace('''``''' , '''"''').replace('''\'\'''' , '''"''')
if not self.keep_accents:
UpperCAmelCase_ = unicodedata.normalize('''NFKD''' , _lowercase)
UpperCAmelCase_ = ''''''.join([c for c in outputs if not unicodedata.combining(_lowercase)])
if self.do_lower_case:
UpperCAmelCase_ = outputs.lower()
return outputs
def __a ( self :str , _lowercase :str) -> List[str]:
UpperCAmelCase_ = self.preprocess_text(_lowercase)
UpperCAmelCase_ = self.sp_model.encode(_lowercase , out_type=_lowercase)
UpperCAmelCase_ = []
for piece in pieces:
if len(_lowercase) > 1 and piece[-1] == str(''',''') and piece[-2].isdigit():
UpperCAmelCase_ = self.sp_model.EncodeAsPieces(piece[:-1].replace(_lowercase , ''''''))
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0]) == 1:
UpperCAmelCase_ = cur_pieces[1:]
else:
UpperCAmelCase_ = cur_pieces[0][1:]
cur_pieces.append(piece[-1])
new_pieces.extend(_lowercase)
else:
new_pieces.append(_lowercase)
return new_pieces
def __a ( self :Optional[Any] , _lowercase :Union[str, Any]) -> Tuple:
return self.sp_model.PieceToId(_lowercase)
def __a ( self :Optional[int] , _lowercase :Optional[Any]) -> List[str]:
return self.sp_model.IdToPiece(_lowercase)
def __a ( self :List[Any] , _lowercase :Optional[Any]) -> int:
UpperCAmelCase_ = ''''''.join(_lowercase).replace(_lowercase , ''' ''').strip()
return out_string
def __a ( self :Union[str, Any] , _lowercase :List[int] , _lowercase :bool = False , _lowercase :bool = None , _lowercase :bool = True , **_lowercase :Tuple , ) -> str:
UpperCAmelCase_ = kwargs.pop('''use_source_tokenizer''' , _lowercase)
UpperCAmelCase_ = self.convert_ids_to_tokens(_lowercase , skip_special_tokens=_lowercase)
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
UpperCAmelCase_ = []
UpperCAmelCase_ = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_lowercase))
UpperCAmelCase_ = []
sub_texts.append(_lowercase)
else:
current_sub_text.append(_lowercase)
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_lowercase))
# Mimic the behavior of the Rust tokenizer:
# By default, there are no spaces between special tokens
UpperCAmelCase_ = ''''''.join(_lowercase)
UpperCAmelCase_ = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
UpperCAmelCase_ = self.clean_up_tokenization(_lowercase)
return clean_text
else:
return text
def __a ( self :str , _lowercase :List[int] , _lowercase :Optional[List[int]] = None) -> List[int]:
UpperCAmelCase_ = [self.sep_token_id]
UpperCAmelCase_ = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def __a ( self :Dict , _lowercase :List[int] , _lowercase :Optional[List[int]] = None , _lowercase :bool = False) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowercase , token_ids_a=_lowercase , already_has_special_tokens=_lowercase)
if token_ids_a is not None:
return ([0] * len(_lowercase)) + [1] + ([0] * len(_lowercase)) + [1, 1]
return ([0] * len(_lowercase)) + [1, 1]
def __a ( self :Optional[int] , _lowercase :List[int] , _lowercase :Optional[List[int]] = None) -> List[int]:
UpperCAmelCase_ = [self.sep_token_id]
UpperCAmelCase_ = [2]
if token_ids_a is None:
return len(token_ids_a + sep) * [0] + cls_segment_id
return len(token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1] + cls_segment_id
def __a ( self :str , _lowercase :str , _lowercase :Optional[str] = None) -> Tuple[str]:
if not os.path.isdir(_lowercase):
logger.error(f"Vocabulary path ({save_directory}) should be a directory")
return
UpperCAmelCase_ = os.path.join(
_lowercase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''])
if os.path.abspath(self.vocab_file) != os.path.abspath(_lowercase) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , _lowercase)
elif not os.path.isfile(self.vocab_file):
with open(_lowercase , '''wb''') as fi:
UpperCAmelCase_ = self.sp_model.serialized_model_proto()
fi.write(_lowercase)
return (out_vocab_file,)
| 344 | 1 |
def A ( __UpperCAmelCase , __UpperCAmelCase ) -> str:
'''simple docstring'''
print('''\nThe shortest path matrix using Floyd Warshall algorithm\n''' )
for i in range(__UpperCAmelCase ):
for j in range(__UpperCAmelCase ):
if dist[i][j] != float('''inf''' ):
print(int(dist[i][j] ) , end='''\t''' )
else:
print('''INF''' , end='''\t''' )
print()
def A ( __UpperCAmelCase , __UpperCAmelCase ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = [[float('''inf''' ) for _ in range(__UpperCAmelCase )] for _ in range(__UpperCAmelCase )]
for i in range(__UpperCAmelCase ):
for j in range(__UpperCAmelCase ):
UpperCAmelCase_ = graph[i][j]
# check vertex k against all other vertices (i, j)
for k in range(__UpperCAmelCase ):
# looping through rows of graph array
for i in range(__UpperCAmelCase ):
# looping through columns of graph array
for j in range(__UpperCAmelCase ):
if (
dist[i][k] != float('''inf''' )
and dist[k][j] != float('''inf''' )
and dist[i][k] + dist[k][j] < dist[i][j]
):
UpperCAmelCase_ = dist[i][k] + dist[k][j]
_print_dist(__UpperCAmelCase , __UpperCAmelCase )
return dist, v
if __name__ == "__main__":
UpperCamelCase_ = int(input("Enter number of vertices: "))
UpperCamelCase_ = int(input("Enter number of edges: "))
UpperCamelCase_ = [[float("inf") for i in range(v)] for j in range(v)]
for i in range(v):
UpperCamelCase_ = 0.0
# src and dst are indices that must be within the array size graph[e][v]
# failure to follow this will result in an error
for i in range(e):
print("\nEdge ", i + 1)
UpperCamelCase_ = int(input("Enter source:"))
UpperCamelCase_ = int(input("Enter destination:"))
UpperCamelCase_ = float(input("Enter weight:"))
UpperCamelCase_ = weight
floyd_warshall(graph, v)
# Example Input
# Enter number of vertices: 3
# Enter number of edges: 2
# # generated graph from vertex and edge inputs
# [[inf, inf, inf], [inf, inf, inf], [inf, inf, inf]]
# [[0.0, inf, inf], [inf, 0.0, inf], [inf, inf, 0.0]]
# specify source, destination and weight for edge #1
# Edge 1
# Enter source:1
# Enter destination:2
# Enter weight:2
# specify source, destination and weight for edge #2
# Edge 2
# Enter source:2
# Enter destination:1
# Enter weight:1
# # Expected Output from the vertice, edge and src, dst, weight inputs!!
# 0 INF INF
# INF 0 2
# INF 1 0
| 344 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
UpperCamelCase_ = logging.get_logger(__name__)
class a_ ( _snake_case , _snake_case ):
UpperCamelCase__ : Union[str, Any] ="maskformer-swin"
UpperCamelCase__ : List[str] ={
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self :Union[str, Any] , _lowercase :Optional[int]=224 , _lowercase :List[str]=4 , _lowercase :Tuple=3 , _lowercase :List[Any]=96 , _lowercase :Any=[2, 2, 6, 2] , _lowercase :int=[3, 6, 12, 24] , _lowercase :List[Any]=7 , _lowercase :Dict=4.0 , _lowercase :Any=True , _lowercase :int=0.0 , _lowercase :List[Any]=0.0 , _lowercase :Tuple=0.1 , _lowercase :str="gelu" , _lowercase :Union[str, Any]=False , _lowercase :Tuple=0.02 , _lowercase :List[str]=1E-5 , _lowercase :List[str]=None , _lowercase :Any=None , **_lowercase :Any , ) -> Union[str, Any]:
super().__init__(**_lowercase)
UpperCAmelCase_ = image_size
UpperCAmelCase_ = patch_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = embed_dim
UpperCAmelCase_ = depths
UpperCAmelCase_ = len(_lowercase)
UpperCAmelCase_ = num_heads
UpperCAmelCase_ = window_size
UpperCAmelCase_ = mlp_ratio
UpperCAmelCase_ = qkv_bias
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = drop_path_rate
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = use_absolute_embeddings
UpperCAmelCase_ = layer_norm_eps
UpperCAmelCase_ = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
UpperCAmelCase_ = int(embed_dim * 2 ** (len(_lowercase) - 1))
UpperCAmelCase_ = ['''stem'''] + [f"stage{idx}" for idx in range(1 , len(_lowercase) + 1)]
UpperCAmelCase_ , UpperCAmelCase_ = get_aligned_output_features_output_indices(
out_features=_lowercase , out_indices=_lowercase , stage_names=self.stage_names)
| 344 | 1 |
from math import sqrt
def A ( __UpperCAmelCase = 100_0000 ) -> int:
'''simple docstring'''
UpperCAmelCase_ = 0
UpperCAmelCase_ = 0
UpperCAmelCase_ = 42
while num_cuboids <= limit:
max_cuboid_size += 1
for sum_shortest_sides in range(2 , 2 * max_cuboid_size + 1 ):
if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer():
num_cuboids += (
min(__UpperCAmelCase , sum_shortest_sides // 2 )
- max(1 , sum_shortest_sides - max_cuboid_size )
+ 1
)
return max_cuboid_size
if __name__ == "__main__":
print(f"{solution() = }")
| 344 |
import os
from argparse import ArgumentParser, Namespace
from ..data import SingleSentenceClassificationProcessor as Processor
from ..pipelines import TextClassificationPipeline
from ..utils import is_tf_available, is_torch_available, logging
from . import BaseTransformersCLICommand
if not is_tf_available() and not is_torch_available():
raise RuntimeError("At least one of PyTorch or TensorFlow 2.0+ should be installed to use CLI training")
# TF training parameters
UpperCamelCase_ = False
UpperCamelCase_ = False
def A ( __UpperCAmelCase ) -> Any:
'''simple docstring'''
return TrainCommand(__UpperCAmelCase )
class a_ ( _snake_case ):
@staticmethod
def __a ( _lowercase :ArgumentParser) -> List[Any]:
UpperCAmelCase_ = parser.add_parser('''train''' , help='''CLI tool to train a model on a task.''')
train_parser.add_argument(
'''--train_data''' , type=_lowercase , required=_lowercase , help='''path to train (and optionally evaluation) dataset as a csv with tab separated labels and sentences.''' , )
train_parser.add_argument(
'''--column_label''' , type=_lowercase , default=0 , help='''Column of the dataset csv file with example labels.''')
train_parser.add_argument(
'''--column_text''' , type=_lowercase , default=1 , help='''Column of the dataset csv file with example texts.''')
train_parser.add_argument(
'''--column_id''' , type=_lowercase , default=2 , help='''Column of the dataset csv file with example ids.''')
train_parser.add_argument(
'''--skip_first_row''' , action='''store_true''' , help='''Skip the first row of the csv file (headers).''')
train_parser.add_argument('''--validation_data''' , type=_lowercase , default='''''' , help='''path to validation dataset.''')
train_parser.add_argument(
'''--validation_split''' , type=_lowercase , default=0.1 , help='''if validation dataset is not provided, fraction of train dataset to use as validation dataset.''' , )
train_parser.add_argument('''--output''' , type=_lowercase , default='''./''' , help='''path to saved the trained model.''')
train_parser.add_argument(
'''--task''' , type=_lowercase , default='''text_classification''' , help='''Task to train the model on.''')
train_parser.add_argument(
'''--model''' , type=_lowercase , default='''bert-base-uncased''' , help='''Model\'s name or path to stored model.''')
train_parser.add_argument('''--train_batch_size''' , type=_lowercase , default=32 , help='''Batch size for training.''')
train_parser.add_argument('''--valid_batch_size''' , type=_lowercase , default=64 , help='''Batch size for validation.''')
train_parser.add_argument('''--learning_rate''' , type=_lowercase , default=3E-5 , help='''Learning rate.''')
train_parser.add_argument('''--adam_epsilon''' , type=_lowercase , default=1E-0_8 , help='''Epsilon for Adam optimizer.''')
train_parser.set_defaults(func=_lowercase)
def __init__( self :Union[str, Any] , _lowercase :Namespace) -> Union[str, Any]:
UpperCAmelCase_ = logging.get_logger('''transformers-cli/training''')
UpperCAmelCase_ = '''tf''' if is_tf_available() else '''torch'''
os.makedirs(args.output , exist_ok=_lowercase)
UpperCAmelCase_ = args.output
UpperCAmelCase_ = args.column_label
UpperCAmelCase_ = args.column_text
UpperCAmelCase_ = args.column_id
self.logger.info(f"Loading {args.task} pipeline for {args.model}")
if args.task == "text_classification":
UpperCAmelCase_ = TextClassificationPipeline.from_pretrained(args.model)
elif args.task == "token_classification":
raise NotImplementedError
elif args.task == "question_answering":
raise NotImplementedError
self.logger.info(f"Loading dataset from {args.train_data}")
UpperCAmelCase_ = Processor.create_from_csv(
args.train_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
UpperCAmelCase_ = None
if args.validation_data:
self.logger.info(f"Loading validation dataset from {args.validation_data}")
UpperCAmelCase_ = Processor.create_from_csv(
args.validation_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
UpperCAmelCase_ = args.validation_split
UpperCAmelCase_ = args.train_batch_size
UpperCAmelCase_ = args.valid_batch_size
UpperCAmelCase_ = args.learning_rate
UpperCAmelCase_ = args.adam_epsilon
def __a ( self :int) -> Tuple:
if self.framework == "tf":
return self.run_tf()
return self.run_torch()
def __a ( self :Optional[Any]) -> Any:
raise NotImplementedError
def __a ( self :int) -> Optional[Any]:
self.pipeline.fit(
self.train_dataset , validation_data=self.valid_dataset , validation_split=self.validation_split , learning_rate=self.learning_rate , adam_epsilon=self.adam_epsilon , train_batch_size=self.train_batch_size , valid_batch_size=self.valid_batch_size , )
# Save trained pipeline
self.pipeline.save_pretrained(self.output)
| 344 | 1 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetaImageProcessor
class a_ ( unittest.TestCase ):
def __init__( self :Optional[Any] , _lowercase :Any , _lowercase :List[str]=7 , _lowercase :int=3 , _lowercase :List[Any]=30 , _lowercase :Union[str, Any]=400 , _lowercase :str=True , _lowercase :Tuple=None , _lowercase :Union[str, Any]=True , _lowercase :str=[0.5, 0.5, 0.5] , _lowercase :int=[0.5, 0.5, 0.5] , _lowercase :Tuple=True , _lowercase :int=1 / 255 , _lowercase :Tuple=True , ) -> Union[str, Any]:
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
UpperCAmelCase_ = size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 1333}
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = min_resolution
UpperCAmelCase_ = max_resolution
UpperCAmelCase_ = do_resize
UpperCAmelCase_ = size
UpperCAmelCase_ = do_normalize
UpperCAmelCase_ = image_mean
UpperCAmelCase_ = image_std
UpperCAmelCase_ = do_rescale
UpperCAmelCase_ = rescale_factor
UpperCAmelCase_ = do_pad
def __a ( self :Any) -> str:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def __a ( self :List[Any] , _lowercase :List[str] , _lowercase :Any=False) -> Any:
if not batched:
UpperCAmelCase_ = image_inputs[0]
if isinstance(_lowercase , Image.Image):
UpperCAmelCase_ , UpperCAmelCase_ = image.size
else:
UpperCAmelCase_ , UpperCAmelCase_ = image.shape[1], image.shape[2]
if w < h:
UpperCAmelCase_ = int(self.size['''shortest_edge'''] * h / w)
UpperCAmelCase_ = self.size['''shortest_edge''']
elif w > h:
UpperCAmelCase_ = self.size['''shortest_edge''']
UpperCAmelCase_ = int(self.size['''shortest_edge'''] * w / h)
else:
UpperCAmelCase_ = self.size['''shortest_edge''']
UpperCAmelCase_ = self.size['''shortest_edge''']
else:
UpperCAmelCase_ = []
for image in image_inputs:
UpperCAmelCase_ , UpperCAmelCase_ = self.get_expected_values([image])
expected_values.append((expected_height, expected_width))
UpperCAmelCase_ = max(_lowercase , key=lambda _lowercase: item[0])[0]
UpperCAmelCase_ = max(_lowercase , key=lambda _lowercase: item[1])[1]
return expected_height, expected_width
@require_torch
@require_vision
class a_ ( _snake_case , unittest.TestCase ):
UpperCamelCase__ : Any =DetaImageProcessor if is_vision_available() else None
def __a ( self :int) -> List[Any]:
UpperCAmelCase_ = DetaImageProcessingTester(self)
@property
def __a ( self :Tuple) -> List[Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def __a ( self :Optional[int]) -> Optional[int]:
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(_lowercase , '''image_mean'''))
self.assertTrue(hasattr(_lowercase , '''image_std'''))
self.assertTrue(hasattr(_lowercase , '''do_normalize'''))
self.assertTrue(hasattr(_lowercase , '''do_resize'''))
self.assertTrue(hasattr(_lowercase , '''do_rescale'''))
self.assertTrue(hasattr(_lowercase , '''do_pad'''))
self.assertTrue(hasattr(_lowercase , '''size'''))
def __a ( self :str) -> Optional[Any]:
UpperCAmelCase_ = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size , {'''shortest_edge''': 18, '''longest_edge''': 1333})
self.assertEqual(image_processor.do_pad , _lowercase)
def __a ( self :Any) -> List[Any]:
pass
def __a ( self :List[str]) -> List[Any]:
# Initialize image_processing
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase)
for image in image_inputs:
self.assertIsInstance(_lowercase , Image.Image)
# Test not batched input
UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values
UpperCAmelCase_ , UpperCAmelCase_ = self.image_processor_tester.get_expected_values(_lowercase)
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase_ , UpperCAmelCase_ = self.image_processor_tester.get_expected_values(_lowercase , batched=_lowercase)
UpperCAmelCase_ = image_processing(_lowercase , return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __a ( self :Union[str, Any]) -> Optional[int]:
# Initialize image_processing
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase , numpify=_lowercase)
for image in image_inputs:
self.assertIsInstance(_lowercase , np.ndarray)
# Test not batched input
UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values
UpperCAmelCase_ , UpperCAmelCase_ = self.image_processor_tester.get_expected_values(_lowercase)
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase_ = image_processing(_lowercase , return_tensors='''pt''').pixel_values
UpperCAmelCase_ , UpperCAmelCase_ = self.image_processor_tester.get_expected_values(_lowercase , batched=_lowercase)
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __a ( self :str) -> Optional[Any]:
# Initialize image_processing
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase , torchify=_lowercase)
for image in image_inputs:
self.assertIsInstance(_lowercase , torch.Tensor)
# Test not batched input
UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values
UpperCAmelCase_ , UpperCAmelCase_ = self.image_processor_tester.get_expected_values(_lowercase)
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase_ = image_processing(_lowercase , return_tensors='''pt''').pixel_values
UpperCAmelCase_ , UpperCAmelCase_ = self.image_processor_tester.get_expected_values(_lowercase , batched=_lowercase)
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def __a ( self :Optional[Any]) -> List[str]:
# prepare image and target
UpperCAmelCase_ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''')
with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''') as f:
UpperCAmelCase_ = json.loads(f.read())
UpperCAmelCase_ = {'''image_id''': 39769, '''annotations''': target}
# encode them
UpperCAmelCase_ = DetaImageProcessor()
UpperCAmelCase_ = image_processing(images=_lowercase , annotations=_lowercase , return_tensors='''pt''')
# verify pixel values
UpperCAmelCase_ = torch.Size([1, 3, 800, 1066])
self.assertEqual(encoding['''pixel_values'''].shape , _lowercase)
UpperCAmelCase_ = torch.tensor([0.2_796, 0.3_138, 0.3_481])
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , _lowercase , atol=1E-4))
# verify area
UpperCAmelCase_ = torch.tensor([5_887.9_600, 11_250.2_061, 489_353.8_438, 837_122.7_500, 147_967.5_156, 165_732.3_438])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , _lowercase))
# verify boxes
UpperCAmelCase_ = torch.Size([6, 4])
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , _lowercase)
UpperCAmelCase_ = torch.tensor([0.5_503, 0.2_765, 0.0_604, 0.2_215])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , _lowercase , atol=1E-3))
# verify image_id
UpperCAmelCase_ = torch.tensor([39769])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , _lowercase))
# verify is_crowd
UpperCAmelCase_ = torch.tensor([0, 0, 0, 0, 0, 0])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , _lowercase))
# verify class_labels
UpperCAmelCase_ = torch.tensor([75, 75, 63, 65, 17, 17])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , _lowercase))
# verify orig_size
UpperCAmelCase_ = torch.tensor([480, 640])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , _lowercase))
# verify size
UpperCAmelCase_ = torch.tensor([800, 1066])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , _lowercase))
@slow
def __a ( self :Optional[Any]) -> str:
# prepare image, target and masks_path
UpperCAmelCase_ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''')
with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''') as f:
UpperCAmelCase_ = json.loads(f.read())
UpperCAmelCase_ = {'''file_name''': '''000000039769.png''', '''image_id''': 39769, '''segments_info''': target}
UpperCAmelCase_ = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''')
# encode them
UpperCAmelCase_ = DetaImageProcessor(format='''coco_panoptic''')
UpperCAmelCase_ = image_processing(images=_lowercase , annotations=_lowercase , masks_path=_lowercase , return_tensors='''pt''')
# verify pixel values
UpperCAmelCase_ = torch.Size([1, 3, 800, 1066])
self.assertEqual(encoding['''pixel_values'''].shape , _lowercase)
UpperCAmelCase_ = torch.tensor([0.2_796, 0.3_138, 0.3_481])
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , _lowercase , atol=1E-4))
# verify area
UpperCAmelCase_ = torch.tensor([147_979.6_875, 165_527.0_469, 484_638.5_938, 11_292.9_375, 5_879.6_562, 7_634.1_147])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , _lowercase))
# verify boxes
UpperCAmelCase_ = torch.Size([6, 4])
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , _lowercase)
UpperCAmelCase_ = torch.tensor([0.2_625, 0.5_437, 0.4_688, 0.8_625])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , _lowercase , atol=1E-3))
# verify image_id
UpperCAmelCase_ = torch.tensor([39769])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , _lowercase))
# verify is_crowd
UpperCAmelCase_ = torch.tensor([0, 0, 0, 0, 0, 0])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , _lowercase))
# verify class_labels
UpperCAmelCase_ = torch.tensor([17, 17, 63, 75, 75, 93])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , _lowercase))
# verify masks
UpperCAmelCase_ = 822873
self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , _lowercase)
# verify orig_size
UpperCAmelCase_ = torch.tensor([480, 640])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , _lowercase))
# verify size
UpperCAmelCase_ = torch.tensor([800, 1066])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , _lowercase))
| 344 |
import unittest
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BridgeTowerImageProcessor
class a_ ( unittest.TestCase ):
def __init__( self :Tuple , _lowercase :List[Any] , _lowercase :bool = True , _lowercase :Dict[str, int] = None , _lowercase :int = 32 , _lowercase :bool = True , _lowercase :Union[int, float] = 1 / 255 , _lowercase :bool = True , _lowercase :bool = True , _lowercase :Optional[Union[float, List[float]]] = [0.48_145_466, 0.4_578_275, 0.40_821_073] , _lowercase :Optional[Union[float, List[float]]] = [0.26_862_954, 0.26_130_258, 0.27_577_711] , _lowercase :bool = True , _lowercase :List[Any]=7 , _lowercase :Dict=30 , _lowercase :Optional[int]=400 , _lowercase :Any=3 , ) -> Any:
UpperCAmelCase_ = parent
UpperCAmelCase_ = do_resize
UpperCAmelCase_ = size if size is not None else {'''shortest_edge''': 288}
UpperCAmelCase_ = size_divisor
UpperCAmelCase_ = do_rescale
UpperCAmelCase_ = rescale_factor
UpperCAmelCase_ = do_normalize
UpperCAmelCase_ = do_center_crop
UpperCAmelCase_ = image_mean
UpperCAmelCase_ = image_std
UpperCAmelCase_ = do_pad
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = min_resolution
UpperCAmelCase_ = max_resolution
def __a ( self :str) -> Tuple:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"size_divisor": self.size_divisor,
}
def __a ( self :List[Any] , _lowercase :Tuple , _lowercase :List[str]=False) -> int:
if not batched:
UpperCAmelCase_ = self.size['''shortest_edge''']
UpperCAmelCase_ = image_inputs[0]
if isinstance(_lowercase , Image.Image):
UpperCAmelCase_ , UpperCAmelCase_ = image.size
else:
UpperCAmelCase_ , UpperCAmelCase_ = image.shape[1], image.shape[2]
UpperCAmelCase_ = size / min(_lowercase , _lowercase)
if h < w:
UpperCAmelCase_ , UpperCAmelCase_ = size, scale * w
else:
UpperCAmelCase_ , UpperCAmelCase_ = scale * h, size
UpperCAmelCase_ = int((1333 / 800) * size)
if max(_lowercase , _lowercase) > max_size:
UpperCAmelCase_ = max_size / max(_lowercase , _lowercase)
UpperCAmelCase_ = newh * scale
UpperCAmelCase_ = neww * scale
UpperCAmelCase_ , UpperCAmelCase_ = int(newh + 0.5), int(neww + 0.5)
UpperCAmelCase_ , UpperCAmelCase_ = (
newh // self.size_divisor * self.size_divisor,
neww // self.size_divisor * self.size_divisor,
)
else:
UpperCAmelCase_ = []
for image in image_inputs:
UpperCAmelCase_ , UpperCAmelCase_ = self.get_expected_values([image])
expected_values.append((expected_height, expected_width))
UpperCAmelCase_ = max(_lowercase , key=lambda _lowercase: item[0])[0]
UpperCAmelCase_ = max(_lowercase , key=lambda _lowercase: item[1])[1]
return expected_height, expected_width
@require_torch
@require_vision
class a_ ( _snake_case , unittest.TestCase ):
UpperCamelCase__ : Tuple =BridgeTowerImageProcessor if is_vision_available() else None
def __a ( self :int) -> Dict:
UpperCAmelCase_ = BridgeTowerImageProcessingTester(self)
@property
def __a ( self :Dict) -> Any:
return self.image_processor_tester.prepare_image_processor_dict()
def __a ( self :Dict) -> Tuple:
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(_lowercase , '''image_mean'''))
self.assertTrue(hasattr(_lowercase , '''image_std'''))
self.assertTrue(hasattr(_lowercase , '''do_normalize'''))
self.assertTrue(hasattr(_lowercase , '''do_resize'''))
self.assertTrue(hasattr(_lowercase , '''size'''))
self.assertTrue(hasattr(_lowercase , '''size_divisor'''))
def __a ( self :Union[str, Any]) -> Tuple:
pass
def __a ( self :List[str]) -> Tuple:
# Initialize image processor
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase)
for image in image_inputs:
self.assertIsInstance(_lowercase , Image.Image)
# Test not batched input
UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values
UpperCAmelCase_ , UpperCAmelCase_ = self.image_processor_tester.get_expected_values(_lowercase)
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase_ = image_processing(_lowercase , return_tensors='''pt''').pixel_values
UpperCAmelCase_ , UpperCAmelCase_ = self.image_processor_tester.get_expected_values(_lowercase , batched=_lowercase)
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __a ( self :Union[str, Any]) -> Optional[int]:
# Initialize image processor
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase , numpify=_lowercase)
for image in image_inputs:
self.assertIsInstance(_lowercase , np.ndarray)
# Test not batched input
UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values
UpperCAmelCase_ , UpperCAmelCase_ = self.image_processor_tester.get_expected_values(_lowercase)
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase_ = image_processing(_lowercase , return_tensors='''pt''').pixel_values
UpperCAmelCase_ , UpperCAmelCase_ = self.image_processor_tester.get_expected_values(_lowercase , batched=_lowercase)
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __a ( self :str) -> int:
# Initialize image processor
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase , torchify=_lowercase)
for image in image_inputs:
self.assertIsInstance(_lowercase , torch.Tensor)
# Test not batched input
UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values
UpperCAmelCase_ , UpperCAmelCase_ = self.image_processor_tester.get_expected_values(_lowercase)
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase_ = image_processing(_lowercase , return_tensors='''pt''').pixel_values
UpperCAmelCase_ , UpperCAmelCase_ = self.image_processor_tester.get_expected_values(_lowercase , batched=_lowercase)
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
| 344 | 1 |
# DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax
import jax.numpy as jnp
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils_flax import (
CommonSchedulerState,
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
add_noise_common,
get_velocity_common,
)
@flax.struct.dataclass
class a_ :
UpperCamelCase__ : CommonSchedulerState
# setable values
UpperCamelCase__ : jnp.ndarray
UpperCamelCase__ : jnp.ndarray
UpperCamelCase__ : Optional[int] =None
@classmethod
def __a ( cls :Union[str, Any] , _lowercase :CommonSchedulerState , _lowercase :jnp.ndarray , _lowercase :jnp.ndarray) -> Any:
return cls(common=_lowercase , init_noise_sigma=_lowercase , timesteps=_lowercase)
@dataclass
class a_ ( _snake_case ):
UpperCamelCase__ : DDPMSchedulerState
class a_ ( _snake_case , _snake_case ):
UpperCamelCase__ : Union[str, Any] =[e.name for e in FlaxKarrasDiffusionSchedulers]
UpperCamelCase__ : jnp.dtype
@property
def __a ( self :List[Any]) -> int:
return True
@register_to_config
def __init__( self :Any , _lowercase :int = 1000 , _lowercase :float = 0.0_001 , _lowercase :float = 0.02 , _lowercase :str = "linear" , _lowercase :Optional[jnp.ndarray] = None , _lowercase :str = "fixed_small" , _lowercase :bool = True , _lowercase :str = "epsilon" , _lowercase :jnp.dtype = jnp.floataa , ) -> str:
UpperCAmelCase_ = dtype
def __a ( self :int , _lowercase :Optional[CommonSchedulerState] = None) -> DDPMSchedulerState:
if common is None:
UpperCAmelCase_ = CommonSchedulerState.create(self)
# standard deviation of the initial noise distribution
UpperCAmelCase_ = jnp.array(1.0 , dtype=self.dtype)
UpperCAmelCase_ = jnp.arange(0 , self.config.num_train_timesteps).round()[::-1]
return DDPMSchedulerState.create(
common=_lowercase , init_noise_sigma=_lowercase , timesteps=_lowercase , )
def __a ( self :Optional[int] , _lowercase :DDPMSchedulerState , _lowercase :jnp.ndarray , _lowercase :Optional[int] = None) -> jnp.ndarray:
return sample
def __a ( self :Union[str, Any] , _lowercase :DDPMSchedulerState , _lowercase :int , _lowercase :Tuple = ()) -> DDPMSchedulerState:
UpperCAmelCase_ = self.config.num_train_timesteps // num_inference_steps
# creates integer timesteps by multiplying by ratio
# rounding to avoid issues when num_inference_step is power of 3
UpperCAmelCase_ = (jnp.arange(0 , _lowercase) * step_ratio).round()[::-1]
return state.replace(
num_inference_steps=_lowercase , timesteps=_lowercase , )
def __a ( self :Union[str, Any] , _lowercase :DDPMSchedulerState , _lowercase :List[Any] , _lowercase :Optional[int]=None , _lowercase :List[Any]=None) -> Union[str, Any]:
UpperCAmelCase_ = state.common.alphas_cumprod[t]
UpperCAmelCase_ = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype))
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
UpperCAmelCase_ = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t]
if variance_type is None:
UpperCAmelCase_ = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small":
UpperCAmelCase_ = jnp.clip(_lowercase , a_min=1E-2_0)
# for rl-diffuser https://arxiv.org/abs/2205.09991
elif variance_type == "fixed_small_log":
UpperCAmelCase_ = jnp.log(jnp.clip(_lowercase , a_min=1E-2_0))
elif variance_type == "fixed_large":
UpperCAmelCase_ = state.common.betas[t]
elif variance_type == "fixed_large_log":
# Glide max_log
UpperCAmelCase_ = jnp.log(state.common.betas[t])
elif variance_type == "learned":
return predicted_variance
elif variance_type == "learned_range":
UpperCAmelCase_ = variance
UpperCAmelCase_ = state.common.betas[t]
UpperCAmelCase_ = (predicted_variance + 1) / 2
UpperCAmelCase_ = frac * max_log + (1 - frac) * min_log
return variance
def __a ( self :Union[str, Any] , _lowercase :DDPMSchedulerState , _lowercase :jnp.ndarray , _lowercase :int , _lowercase :jnp.ndarray , _lowercase :Optional[jax.random.KeyArray] = None , _lowercase :bool = True , ) -> Union[FlaxDDPMSchedulerOutput, Tuple]:
UpperCAmelCase_ = timestep
if key is None:
UpperCAmelCase_ = jax.random.PRNGKey(0)
if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]:
UpperCAmelCase_ , UpperCAmelCase_ = jnp.split(_lowercase , sample.shape[1] , axis=1)
else:
UpperCAmelCase_ = None
# 1. compute alphas, betas
UpperCAmelCase_ = state.common.alphas_cumprod[t]
UpperCAmelCase_ = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype))
UpperCAmelCase_ = 1 - alpha_prod_t
UpperCAmelCase_ = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
UpperCAmelCase_ = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
UpperCAmelCase_ = model_output
elif self.config.prediction_type == "v_prediction":
UpperCAmelCase_ = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
else:
raise ValueError(
f"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` "
''' for the FlaxDDPMScheduler.''')
# 3. Clip "predicted x_0"
if self.config.clip_sample:
UpperCAmelCase_ = jnp.clip(_lowercase , -1 , 1)
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
UpperCAmelCase_ = (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t
UpperCAmelCase_ = state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
UpperCAmelCase_ = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
def random_variance():
UpperCAmelCase_ = jax.random.split(_lowercase , num=1)
UpperCAmelCase_ = jax.random.normal(_lowercase , shape=model_output.shape , dtype=self.dtype)
return (self._get_variance(_lowercase , _lowercase , predicted_variance=_lowercase) ** 0.5) * noise
UpperCAmelCase_ = jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype))
UpperCAmelCase_ = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample, state)
return FlaxDDPMSchedulerOutput(prev_sample=_lowercase , state=_lowercase)
def __a ( self :Optional[Any] , _lowercase :DDPMSchedulerState , _lowercase :jnp.ndarray , _lowercase :jnp.ndarray , _lowercase :jnp.ndarray , ) -> jnp.ndarray:
return add_noise_common(state.common , _lowercase , _lowercase , _lowercase)
def __a ( self :str , _lowercase :DDPMSchedulerState , _lowercase :jnp.ndarray , _lowercase :jnp.ndarray , _lowercase :jnp.ndarray , ) -> jnp.ndarray:
return get_velocity_common(state.common , _lowercase , _lowercase , _lowercase)
def __len__( self :Tuple) -> Union[str, Any]:
return self.config.num_train_timesteps
| 344 |
def A ( __UpperCAmelCase = 100_0000 ) -> int:
'''simple docstring'''
UpperCAmelCase_ = [i - 1 for i in range(limit + 1 )]
for i in range(2 , limit + 1 ):
if phi[i] == i - 1:
for j in range(2 * i , limit + 1 , __UpperCAmelCase ):
phi[j] -= phi[j] // i
return sum(phi[2 : limit + 1] )
if __name__ == "__main__":
print(solution())
| 344 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase_ = {"configuration_opt": ["OPT_PRETRAINED_CONFIG_ARCHIVE_MAP", "OPTConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"OPT_PRETRAINED_MODEL_ARCHIVE_LIST",
"OPTForCausalLM",
"OPTModel",
"OPTPreTrainedModel",
"OPTForSequenceClassification",
"OPTForQuestionAnswering",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["TFOPTForCausalLM", "TFOPTModel", "TFOPTPreTrainedModel"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"FlaxOPTForCausalLM",
"FlaxOPTModel",
"FlaxOPTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_opt import OPT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_opt import (
OPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OPTForCausalLM,
OPTForQuestionAnswering,
OPTForSequenceClassification,
OPTModel,
OPTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_opt import TFOPTForCausalLM, TFOPTModel, TFOPTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_opt import FlaxOPTForCausalLM, FlaxOPTModel, FlaxOPTPreTrainedModel
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 344 |
import tempfile
import torch
from diffusers import PNDMScheduler
from .test_schedulers import SchedulerCommonTest
class a_ ( _snake_case ):
UpperCamelCase__ : List[Any] =(PNDMScheduler,)
UpperCamelCase__ : Optional[Any] =(("num_inference_steps", 50),)
def __a ( self :Union[str, Any] , **_lowercase :Any) -> Union[str, Any]:
UpperCAmelCase_ = {
'''num_train_timesteps''': 1000,
'''beta_start''': 0.0_001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
}
config.update(**_lowercase)
return config
def __a ( self :str , _lowercase :List[Any]=0 , **_lowercase :str) -> Union[str, Any]:
UpperCAmelCase_ = dict(self.forward_default_kwargs)
UpperCAmelCase_ = kwargs.pop('''num_inference_steps''' , _lowercase)
UpperCAmelCase_ = self.dummy_sample
UpperCAmelCase_ = 0.1 * sample
UpperCAmelCase_ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
UpperCAmelCase_ = self.get_scheduler_config(**_lowercase)
UpperCAmelCase_ = scheduler_class(**_lowercase)
scheduler.set_timesteps(_lowercase)
# copy over dummy past residuals
UpperCAmelCase_ = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_lowercase)
UpperCAmelCase_ = scheduler_class.from_pretrained(_lowercase)
new_scheduler.set_timesteps(_lowercase)
# copy over dummy past residuals
UpperCAmelCase_ = dummy_past_residuals[:]
UpperCAmelCase_ = scheduler.step_prk(_lowercase , _lowercase , _lowercase , **_lowercase).prev_sample
UpperCAmelCase_ = new_scheduler.step_prk(_lowercase , _lowercase , _lowercase , **_lowercase).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical"
UpperCAmelCase_ = scheduler.step_plms(_lowercase , _lowercase , _lowercase , **_lowercase).prev_sample
UpperCAmelCase_ = new_scheduler.step_plms(_lowercase , _lowercase , _lowercase , **_lowercase).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical"
def __a ( self :Any) -> Optional[Any]:
pass
def __a ( self :str , _lowercase :int=0 , **_lowercase :Union[str, Any]) -> List[Any]:
UpperCAmelCase_ = dict(self.forward_default_kwargs)
UpperCAmelCase_ = kwargs.pop('''num_inference_steps''' , _lowercase)
UpperCAmelCase_ = self.dummy_sample
UpperCAmelCase_ = 0.1 * sample
UpperCAmelCase_ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
UpperCAmelCase_ = self.get_scheduler_config()
UpperCAmelCase_ = scheduler_class(**_lowercase)
scheduler.set_timesteps(_lowercase)
# copy over dummy past residuals (must be after setting timesteps)
UpperCAmelCase_ = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_lowercase)
UpperCAmelCase_ = scheduler_class.from_pretrained(_lowercase)
# copy over dummy past residuals
new_scheduler.set_timesteps(_lowercase)
# copy over dummy past residual (must be after setting timesteps)
UpperCAmelCase_ = dummy_past_residuals[:]
UpperCAmelCase_ = scheduler.step_prk(_lowercase , _lowercase , _lowercase , **_lowercase).prev_sample
UpperCAmelCase_ = new_scheduler.step_prk(_lowercase , _lowercase , _lowercase , **_lowercase).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical"
UpperCAmelCase_ = scheduler.step_plms(_lowercase , _lowercase , _lowercase , **_lowercase).prev_sample
UpperCAmelCase_ = new_scheduler.step_plms(_lowercase , _lowercase , _lowercase , **_lowercase).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical"
def __a ( self :int , **_lowercase :str) -> Optional[Any]:
UpperCAmelCase_ = self.scheduler_classes[0]
UpperCAmelCase_ = self.get_scheduler_config(**_lowercase)
UpperCAmelCase_ = scheduler_class(**_lowercase)
UpperCAmelCase_ = 10
UpperCAmelCase_ = self.dummy_model()
UpperCAmelCase_ = self.dummy_sample_deter
scheduler.set_timesteps(_lowercase)
for i, t in enumerate(scheduler.prk_timesteps):
UpperCAmelCase_ = model(_lowercase , _lowercase)
UpperCAmelCase_ = scheduler.step_prk(_lowercase , _lowercase , _lowercase).prev_sample
for i, t in enumerate(scheduler.plms_timesteps):
UpperCAmelCase_ = model(_lowercase , _lowercase)
UpperCAmelCase_ = scheduler.step_plms(_lowercase , _lowercase , _lowercase).prev_sample
return sample
def __a ( self :Union[str, Any]) -> int:
UpperCAmelCase_ = dict(self.forward_default_kwargs)
UpperCAmelCase_ = kwargs.pop('''num_inference_steps''' , _lowercase)
for scheduler_class in self.scheduler_classes:
UpperCAmelCase_ = self.get_scheduler_config()
UpperCAmelCase_ = scheduler_class(**_lowercase)
UpperCAmelCase_ = self.dummy_sample
UpperCAmelCase_ = 0.1 * sample
if num_inference_steps is not None and hasattr(_lowercase , '''set_timesteps'''):
scheduler.set_timesteps(_lowercase)
elif num_inference_steps is not None and not hasattr(_lowercase , '''set_timesteps'''):
UpperCAmelCase_ = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
UpperCAmelCase_ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
UpperCAmelCase_ = dummy_past_residuals[:]
UpperCAmelCase_ = scheduler.step_prk(_lowercase , 0 , _lowercase , **_lowercase).prev_sample
UpperCAmelCase_ = scheduler.step_prk(_lowercase , 1 , _lowercase , **_lowercase).prev_sample
self.assertEqual(output_a.shape , sample.shape)
self.assertEqual(output_a.shape , output_a.shape)
UpperCAmelCase_ = scheduler.step_plms(_lowercase , 0 , _lowercase , **_lowercase).prev_sample
UpperCAmelCase_ = scheduler.step_plms(_lowercase , 1 , _lowercase , **_lowercase).prev_sample
self.assertEqual(output_a.shape , sample.shape)
self.assertEqual(output_a.shape , output_a.shape)
def __a ( self :Any) -> Dict:
for timesteps in [100, 1000]:
self.check_over_configs(num_train_timesteps=_lowercase)
def __a ( self :List[Any]) -> Any:
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=_lowercase)
UpperCAmelCase_ = self.scheduler_classes[0]
UpperCAmelCase_ = self.get_scheduler_config(steps_offset=1)
UpperCAmelCase_ = scheduler_class(**_lowercase)
scheduler.set_timesteps(10)
assert torch.equal(
scheduler.timesteps , torch.LongTensor(
[901, 851, 851, 801, 801, 751, 751, 701, 701, 651, 651, 601, 601, 501, 401, 301, 201, 101, 1]) , )
def __a ( self :Optional[int]) -> str:
for beta_start, beta_end in zip([0.0_001, 0.001] , [0.002, 0.02]):
self.check_over_configs(beta_start=_lowercase , beta_end=_lowercase)
def __a ( self :Any) -> List[str]:
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=_lowercase)
def __a ( self :List[Any]) -> Dict:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_lowercase)
def __a ( self :Any) -> Tuple:
for t in [1, 5, 10]:
self.check_over_forward(time_step=_lowercase)
def __a ( self :Tuple) -> Dict:
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100]):
self.check_over_forward(num_inference_steps=_lowercase)
def __a ( self :str) -> List[Any]:
# earlier version of set_timesteps() caused an error indexing alpha's with inference steps as power of 3
UpperCAmelCase_ = 27
for scheduler_class in self.scheduler_classes:
UpperCAmelCase_ = self.dummy_sample
UpperCAmelCase_ = 0.1 * sample
UpperCAmelCase_ = self.get_scheduler_config()
UpperCAmelCase_ = scheduler_class(**_lowercase)
scheduler.set_timesteps(_lowercase)
# before power of 3 fix, would error on first step, so we only need to do two
for i, t in enumerate(scheduler.prk_timesteps[:2]):
UpperCAmelCase_ = scheduler.step_prk(_lowercase , _lowercase , _lowercase).prev_sample
def __a ( self :List[str]) -> int:
with self.assertRaises(_lowercase):
UpperCAmelCase_ = self.scheduler_classes[0]
UpperCAmelCase_ = self.get_scheduler_config()
UpperCAmelCase_ = scheduler_class(**_lowercase)
scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample).prev_sample
def __a ( self :List[str]) -> Dict:
UpperCAmelCase_ = self.full_loop()
UpperCAmelCase_ = torch.sum(torch.abs(_lowercase))
UpperCAmelCase_ = torch.mean(torch.abs(_lowercase))
assert abs(result_sum.item() - 198.1_318) < 1E-2
assert abs(result_mean.item() - 0.2_580) < 1E-3
def __a ( self :Any) -> Tuple:
UpperCAmelCase_ = self.full_loop(prediction_type='''v_prediction''')
UpperCAmelCase_ = torch.sum(torch.abs(_lowercase))
UpperCAmelCase_ = torch.mean(torch.abs(_lowercase))
assert abs(result_sum.item() - 67.3_986) < 1E-2
assert abs(result_mean.item() - 0.0_878) < 1E-3
def __a ( self :int) -> Any:
# We specify different beta, so that the first alpha is 0.99
UpperCAmelCase_ = self.full_loop(set_alpha_to_one=_lowercase , beta_start=0.01)
UpperCAmelCase_ = torch.sum(torch.abs(_lowercase))
UpperCAmelCase_ = torch.mean(torch.abs(_lowercase))
assert abs(result_sum.item() - 230.0_399) < 1E-2
assert abs(result_mean.item() - 0.2_995) < 1E-3
def __a ( self :Any) -> Dict:
# We specify different beta, so that the first alpha is 0.99
UpperCAmelCase_ = self.full_loop(set_alpha_to_one=_lowercase , beta_start=0.01)
UpperCAmelCase_ = torch.sum(torch.abs(_lowercase))
UpperCAmelCase_ = torch.mean(torch.abs(_lowercase))
assert abs(result_sum.item() - 186.9_482) < 1E-2
assert abs(result_mean.item() - 0.2_434) < 1E-3
| 344 | 1 |
import os
from argparse import ArgumentParser
from typing import List
import torch.utils.data
from datasets import Dataset, IterableDataset
from datasets.distributed import split_dataset_by_node
UpperCamelCase_ = 4
UpperCamelCase_ = 3
class a_ ( _snake_case ):
pass
def A ( __UpperCAmelCase ) -> Tuple:
'''simple docstring'''
for shard in shards:
for i in range(__UpperCAmelCase ):
yield {"i": i, "shard": shard}
def A ( ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = int(os.environ['''RANK'''] )
UpperCAmelCase_ = int(os.environ['''WORLD_SIZE'''] )
UpperCAmelCase_ = ArgumentParser()
parser.add_argument('''--streaming''' , type=__UpperCAmelCase )
parser.add_argument('''--local_rank''' , type=__UpperCAmelCase )
parser.add_argument('''--num_workers''' , type=__UpperCAmelCase , default=0 )
UpperCAmelCase_ = parser.parse_args()
UpperCAmelCase_ = args.streaming
UpperCAmelCase_ = args.num_workers
UpperCAmelCase_ = {'''shards''': [f"shard_{shard_idx}" for shard_idx in range(__UpperCAmelCase )]}
UpperCAmelCase_ = IterableDataset.from_generator(__UpperCAmelCase , gen_kwargs=__UpperCAmelCase )
if not streaming:
UpperCAmelCase_ = Dataset.from_list(list(__UpperCAmelCase ) )
UpperCAmelCase_ = split_dataset_by_node(__UpperCAmelCase , rank=__UpperCAmelCase , world_size=__UpperCAmelCase )
UpperCAmelCase_ = torch.utils.data.DataLoader(__UpperCAmelCase , num_workers=__UpperCAmelCase )
UpperCAmelCase_ = NUM_SHARDS * NUM_ITEMS_PER_SHARD
UpperCAmelCase_ = full_size // world_size
expected_local_size += int(rank < (full_size % world_size) )
UpperCAmelCase_ = sum(1 for _ in dataloader )
if local_size != expected_local_size:
raise FailedTestError(f"local_size {local_size} != expected_local_size {expected_local_size}" )
if __name__ == "__main__":
main()
| 344 |
import os
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen, xsplitext
from ..table import array_cast
from ..utils.py_utils import no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
from .features import FeatureType
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = False, False, False
@dataclass
class a_ :
UpperCamelCase__ : Optional[int] =None
UpperCamelCase__ : bool =True
UpperCamelCase__ : bool =True
UpperCamelCase__ : Optional[str] =None
# Automatically constructed
UpperCamelCase__ : ClassVar[str] ="dict"
UpperCamelCase__ : ClassVar[Any] =pa.struct({"bytes": pa.binary(), "path": pa.string()} )
UpperCamelCase__ : str =field(default="Audio" , init=_snake_case , repr=_snake_case )
def __call__( self :List[Any]) -> List[Any]:
return self.pa_type
def __a ( self :Any , _lowercase :Union[str, bytes, dict]) -> dict:
try:
import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files.
except ImportError as err:
raise ImportError('''To support encoding audio data, please install \'soundfile\'.''') from err
if isinstance(_lowercase , _lowercase):
return {"bytes": None, "path": value}
elif isinstance(_lowercase , _lowercase):
return {"bytes": value, "path": None}
elif "array" in value:
# convert the audio array to wav bytes
UpperCAmelCase_ = BytesIO()
sf.write(_lowercase , value['''array'''] , value['''sampling_rate'''] , format='''wav''')
return {"bytes": buffer.getvalue(), "path": None}
elif value.get('''path''') is not None and os.path.isfile(value['''path''']):
# we set "bytes": None to not duplicate the data if they're already available locally
if value["path"].endswith('''pcm'''):
# "PCM" only has raw audio bytes
if value.get('''sampling_rate''') is None:
# At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate
raise KeyError('''To use PCM files, please specify a \'sampling_rate\' in Audio object''')
if value.get('''bytes'''):
# If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!)
UpperCAmelCase_ = np.frombuffer(value['''bytes'''] , dtype=np.intaa).astype(np.floataa) / 32767
else:
UpperCAmelCase_ = np.memmap(value['''path'''] , dtype='''h''' , mode='''r''').astype(np.floataa) / 32767
UpperCAmelCase_ = BytesIO(bytes())
sf.write(_lowercase , _lowercase , value['''sampling_rate'''] , format='''wav''')
return {"bytes": buffer.getvalue(), "path": None}
else:
return {"bytes": None, "path": value.get('''path''')}
elif value.get('''bytes''') is not None or value.get('''path''') is not None:
# store the audio bytes, and path is used to infer the audio format using the file extension
return {"bytes": value.get('''bytes'''), "path": value.get('''path''')}
else:
raise ValueError(
f"An audio sample should have one of 'path' or 'bytes' but they are missing or None in {value}.")
def __a ( self :Dict , _lowercase :dict , _lowercase :Optional[Dict[str, Union[str, bool, None]]] = None) -> dict:
if not self.decode:
raise RuntimeError('''Decoding is disabled for this feature. Please use Audio(decode=True) instead.''')
UpperCAmelCase_ , UpperCAmelCase_ = (value['''path'''], BytesIO(value['''bytes'''])) if value['''bytes'''] is not None else (value['''path'''], None)
if path is None and file is None:
raise ValueError(f"An audio sample should have one of 'path' or 'bytes' but both are None in {value}.")
try:
import librosa
import soundfile as sf
except ImportError as err:
raise ImportError('''To support decoding audio files, please install \'librosa\' and \'soundfile\'.''') from err
UpperCAmelCase_ = xsplitext(_lowercase)[1][1:].lower() if path is not None else None
if not config.IS_OPUS_SUPPORTED and audio_format == "opus":
raise RuntimeError(
'''Decoding \'opus\' files requires system library \'libsndfile\'>=1.0.31, '''
'''You can try to update `soundfile` python library: `pip install "soundfile>=0.12.1"`. ''')
elif not config.IS_MP3_SUPPORTED and audio_format == "mp3":
raise RuntimeError(
'''Decoding \'mp3\' files requires system library \'libsndfile\'>=1.1.0, '''
'''You can try to update `soundfile` python library: `pip install "soundfile>=0.12.1"`. ''')
if file is None:
UpperCAmelCase_ = token_per_repo_id or {}
UpperCAmelCase_ = path.split('''::''')[-1]
try:
UpperCAmelCase_ = string_to_dict(_lowercase , config.HUB_DATASETS_URL)['''repo_id''']
UpperCAmelCase_ = token_per_repo_id[repo_id]
except (ValueError, KeyError):
UpperCAmelCase_ = None
with xopen(_lowercase , '''rb''' , use_auth_token=_lowercase) as f:
UpperCAmelCase_ , UpperCAmelCase_ = sf.read(_lowercase)
else:
UpperCAmelCase_ , UpperCAmelCase_ = sf.read(_lowercase)
UpperCAmelCase_ = array.T
if self.mono:
UpperCAmelCase_ = librosa.to_mono(_lowercase)
if self.sampling_rate and self.sampling_rate != sampling_rate:
UpperCAmelCase_ = librosa.resample(_lowercase , orig_sr=_lowercase , target_sr=self.sampling_rate)
UpperCAmelCase_ = self.sampling_rate
return {"path": path, "array": array, "sampling_rate": sampling_rate}
def __a ( self :Union[str, Any]) -> Union["FeatureType", Dict[str, "FeatureType"]]:
from .features import Value
if self.decode:
raise ValueError('''Cannot flatten a decoded Audio feature.''')
return {
"bytes": Value('''binary'''),
"path": Value('''string'''),
}
def __a ( self :int , _lowercase :Union[pa.StringArray, pa.StructArray]) -> pa.StructArray:
if pa.types.is_string(storage.type):
UpperCAmelCase_ = pa.array([None] * len(_lowercase) , type=pa.binary())
UpperCAmelCase_ = pa.StructArray.from_arrays([bytes_array, storage] , ['''bytes''', '''path'''] , mask=storage.is_null())
elif pa.types.is_binary(storage.type):
UpperCAmelCase_ = pa.array([None] * len(_lowercase) , type=pa.string())
UpperCAmelCase_ = pa.StructArray.from_arrays([storage, path_array] , ['''bytes''', '''path'''] , mask=storage.is_null())
elif pa.types.is_struct(storage.type) and storage.type.get_all_field_indices('''array'''):
UpperCAmelCase_ = pa.array([Audio().encode_example(_lowercase) if x is not None else None for x in storage.to_pylist()])
elif pa.types.is_struct(storage.type):
if storage.type.get_field_index('''bytes''') >= 0:
UpperCAmelCase_ = storage.field('''bytes''')
else:
UpperCAmelCase_ = pa.array([None] * len(_lowercase) , type=pa.binary())
if storage.type.get_field_index('''path''') >= 0:
UpperCAmelCase_ = storage.field('''path''')
else:
UpperCAmelCase_ = pa.array([None] * len(_lowercase) , type=pa.string())
UpperCAmelCase_ = pa.StructArray.from_arrays([bytes_array, path_array] , ['''bytes''', '''path'''] , mask=storage.is_null())
return array_cast(_lowercase , self.pa_type)
def __a ( self :Any , _lowercase :pa.StructArray) -> pa.StructArray:
@no_op_if_value_is_null
def path_to_bytes(_lowercase :Tuple):
with xopen(_lowercase , '''rb''') as f:
UpperCAmelCase_ = f.read()
return bytes_
UpperCAmelCase_ = pa.array(
[
(path_to_bytes(x['''path''']) if x['''bytes'''] is None else x['''bytes''']) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
UpperCAmelCase_ = pa.array(
[os.path.basename(_lowercase) if path is not None else None for path in storage.field('''path''').to_pylist()] , type=pa.string() , )
UpperCAmelCase_ = pa.StructArray.from_arrays([bytes_array, path_array] , ['''bytes''', '''path'''] , mask=bytes_array.is_null())
return array_cast(_lowercase , self.pa_type)
| 344 | 1 |
import unittest
import torch
from torch import nn
from diffusers.models.activations import get_activation
class a_ ( unittest.TestCase ):
def __a ( self :Optional[Any]) -> Dict:
UpperCAmelCase_ = get_activation('''swish''')
self.assertIsInstance(_lowercase , nn.SiLU)
self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa)).item() , 0)
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa)).item() , 0)
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa)).item() , 0)
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa)).item() , 20)
def __a ( self :List[Any]) -> List[Any]:
UpperCAmelCase_ = get_activation('''silu''')
self.assertIsInstance(_lowercase , nn.SiLU)
self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa)).item() , 0)
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa)).item() , 0)
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa)).item() , 0)
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa)).item() , 20)
def __a ( self :str) -> Any:
UpperCAmelCase_ = get_activation('''mish''')
self.assertIsInstance(_lowercase , nn.Mish)
self.assertEqual(act(torch.tensor(-200 , dtype=torch.floataa)).item() , 0)
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa)).item() , 0)
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa)).item() , 0)
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa)).item() , 20)
def __a ( self :Dict) -> Union[str, Any]:
UpperCAmelCase_ = get_activation('''gelu''')
self.assertIsInstance(_lowercase , nn.GELU)
self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa)).item() , 0)
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa)).item() , 0)
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa)).item() , 0)
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa)).item() , 20)
| 344 |
from ..utils import DummyObject, requires_backends
class a_ ( metaclass=_snake_case ):
UpperCamelCase__ : Any =["torch", "scipy"]
def __init__( self :List[str] , *_lowercase :List[str] , **_lowercase :Union[str, Any]) -> List[Any]:
requires_backends(self , ['''torch''', '''scipy'''])
@classmethod
def __a ( cls :Dict , *_lowercase :Any , **_lowercase :Dict) -> Union[str, Any]:
requires_backends(cls , ['''torch''', '''scipy'''])
@classmethod
def __a ( cls :Optional[Any] , *_lowercase :str , **_lowercase :Optional[Any]) -> Union[str, Any]:
requires_backends(cls , ['''torch''', '''scipy'''])
| 344 | 1 |
import argparse
import json
import os
import numpy as np
import PIL
import requests
import tensorflow.keras.applications.efficientnet as efficientnet
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from tensorflow.keras.preprocessing import image
from transformers import (
EfficientNetConfig,
EfficientNetForImageClassification,
EfficientNetImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
"b0": efficientnet.EfficientNetBa,
"b1": efficientnet.EfficientNetBa,
"b2": efficientnet.EfficientNetBa,
"b3": efficientnet.EfficientNetBa,
"b4": efficientnet.EfficientNetBa,
"b5": efficientnet.EfficientNetBa,
"b6": efficientnet.EfficientNetBa,
"b7": efficientnet.EfficientNetBa,
}
UpperCamelCase_ = {
"b0": {
"hidden_dim": 1_280,
"width_coef": 1.0,
"depth_coef": 1.0,
"image_size": 224,
"dropout_rate": 0.2,
"dw_padding": [],
},
"b1": {
"hidden_dim": 1_280,
"width_coef": 1.0,
"depth_coef": 1.1,
"image_size": 240,
"dropout_rate": 0.2,
"dw_padding": [16],
},
"b2": {
"hidden_dim": 1_408,
"width_coef": 1.1,
"depth_coef": 1.2,
"image_size": 260,
"dropout_rate": 0.3,
"dw_padding": [5, 8, 16],
},
"b3": {
"hidden_dim": 1_536,
"width_coef": 1.2,
"depth_coef": 1.4,
"image_size": 300,
"dropout_rate": 0.3,
"dw_padding": [5, 18],
},
"b4": {
"hidden_dim": 1_792,
"width_coef": 1.4,
"depth_coef": 1.8,
"image_size": 380,
"dropout_rate": 0.4,
"dw_padding": [6],
},
"b5": {
"hidden_dim": 2_048,
"width_coef": 1.6,
"depth_coef": 2.2,
"image_size": 456,
"dropout_rate": 0.4,
"dw_padding": [13, 27],
},
"b6": {
"hidden_dim": 2_304,
"width_coef": 1.8,
"depth_coef": 2.6,
"image_size": 528,
"dropout_rate": 0.5,
"dw_padding": [31],
},
"b7": {
"hidden_dim": 2_560,
"width_coef": 2.0,
"depth_coef": 3.1,
"image_size": 600,
"dropout_rate": 0.5,
"dw_padding": [18],
},
}
def A ( __UpperCAmelCase ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ = EfficientNetConfig()
UpperCAmelCase_ = CONFIG_MAP[model_name]['''hidden_dim''']
UpperCAmelCase_ = CONFIG_MAP[model_name]['''width_coef''']
UpperCAmelCase_ = CONFIG_MAP[model_name]['''depth_coef''']
UpperCAmelCase_ = CONFIG_MAP[model_name]['''image_size''']
UpperCAmelCase_ = CONFIG_MAP[model_name]['''dropout_rate''']
UpperCAmelCase_ = CONFIG_MAP[model_name]['''dw_padding''']
UpperCAmelCase_ = '''huggingface/label-files'''
UpperCAmelCase_ = '''imagenet-1k-id2label.json'''
UpperCAmelCase_ = 1000
UpperCAmelCase_ = json.load(open(hf_hub_download(__UpperCAmelCase , __UpperCAmelCase , repo_type='''dataset''' ) , '''r''' ) )
UpperCAmelCase_ = {int(__UpperCAmelCase ): v for k, v in idalabel.items()}
UpperCAmelCase_ = idalabel
UpperCAmelCase_ = {v: k for k, v in idalabel.items()}
return config
def A ( ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
UpperCAmelCase_ = Image.open(requests.get(__UpperCAmelCase , stream=__UpperCAmelCase ).raw )
return im
def A ( __UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ = CONFIG_MAP[model_name]['''image_size''']
UpperCAmelCase_ = EfficientNetImageProcessor(
size={'''height''': size, '''width''': size} , image_mean=[0.485, 0.456, 0.406] , image_std=[0.47_853_944, 0.4_732_864, 0.47_434_163] , do_center_crop=__UpperCAmelCase , )
return preprocessor
def A ( __UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ = [v.split('''_''' )[0].split('''block''' )[1] for v in original_param_names if v.startswith('''block''' )]
UpperCAmelCase_ = sorted(set(__UpperCAmelCase ) )
UpperCAmelCase_ = len(__UpperCAmelCase )
UpperCAmelCase_ = {b: str(__UpperCAmelCase ) for b, i in zip(__UpperCAmelCase , range(__UpperCAmelCase ) )}
UpperCAmelCase_ = []
rename_keys.append(('''stem_conv/kernel:0''', '''embeddings.convolution.weight''') )
rename_keys.append(('''stem_bn/gamma:0''', '''embeddings.batchnorm.weight''') )
rename_keys.append(('''stem_bn/beta:0''', '''embeddings.batchnorm.bias''') )
rename_keys.append(('''stem_bn/moving_mean:0''', '''embeddings.batchnorm.running_mean''') )
rename_keys.append(('''stem_bn/moving_variance:0''', '''embeddings.batchnorm.running_var''') )
for b in block_names:
UpperCAmelCase_ = block_name_mapping[b]
rename_keys.append((f"block{b}_expand_conv/kernel:0", f"encoder.blocks.{hf_b}.expansion.expand_conv.weight") )
rename_keys.append((f"block{b}_expand_bn/gamma:0", f"encoder.blocks.{hf_b}.expansion.expand_bn.weight") )
rename_keys.append((f"block{b}_expand_bn/beta:0", f"encoder.blocks.{hf_b}.expansion.expand_bn.bias") )
rename_keys.append(
(f"block{b}_expand_bn/moving_mean:0", f"encoder.blocks.{hf_b}.expansion.expand_bn.running_mean") )
rename_keys.append(
(f"block{b}_expand_bn/moving_variance:0", f"encoder.blocks.{hf_b}.expansion.expand_bn.running_var") )
rename_keys.append(
(f"block{b}_dwconv/depthwise_kernel:0", f"encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight") )
rename_keys.append((f"block{b}_bn/gamma:0", f"encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight") )
rename_keys.append((f"block{b}_bn/beta:0", f"encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias") )
rename_keys.append(
(f"block{b}_bn/moving_mean:0", f"encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean") )
rename_keys.append(
(f"block{b}_bn/moving_variance:0", f"encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var") )
rename_keys.append((f"block{b}_se_reduce/kernel:0", f"encoder.blocks.{hf_b}.squeeze_excite.reduce.weight") )
rename_keys.append((f"block{b}_se_reduce/bias:0", f"encoder.blocks.{hf_b}.squeeze_excite.reduce.bias") )
rename_keys.append((f"block{b}_se_expand/kernel:0", f"encoder.blocks.{hf_b}.squeeze_excite.expand.weight") )
rename_keys.append((f"block{b}_se_expand/bias:0", f"encoder.blocks.{hf_b}.squeeze_excite.expand.bias") )
rename_keys.append(
(f"block{b}_project_conv/kernel:0", f"encoder.blocks.{hf_b}.projection.project_conv.weight") )
rename_keys.append((f"block{b}_project_bn/gamma:0", f"encoder.blocks.{hf_b}.projection.project_bn.weight") )
rename_keys.append((f"block{b}_project_bn/beta:0", f"encoder.blocks.{hf_b}.projection.project_bn.bias") )
rename_keys.append(
(f"block{b}_project_bn/moving_mean:0", f"encoder.blocks.{hf_b}.projection.project_bn.running_mean") )
rename_keys.append(
(f"block{b}_project_bn/moving_variance:0", f"encoder.blocks.{hf_b}.projection.project_bn.running_var") )
rename_keys.append(('''top_conv/kernel:0''', '''encoder.top_conv.weight''') )
rename_keys.append(('''top_bn/gamma:0''', '''encoder.top_bn.weight''') )
rename_keys.append(('''top_bn/beta:0''', '''encoder.top_bn.bias''') )
rename_keys.append(('''top_bn/moving_mean:0''', '''encoder.top_bn.running_mean''') )
rename_keys.append(('''top_bn/moving_variance:0''', '''encoder.top_bn.running_var''') )
UpperCAmelCase_ = {}
for item in rename_keys:
if item[0] in original_param_names:
UpperCAmelCase_ = '''efficientnet.''' + item[1]
UpperCAmelCase_ = '''classifier.weight'''
UpperCAmelCase_ = '''classifier.bias'''
return key_mapping
def A ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Dict:
'''simple docstring'''
for key, value in tf_params.items():
if "normalization" in key:
continue
UpperCAmelCase_ = key_mapping[key]
if "_conv" in key and "kernel" in key:
UpperCAmelCase_ = torch.from_numpy(__UpperCAmelCase ).permute(3 , 2 , 0 , 1 )
elif "depthwise_kernel" in key:
UpperCAmelCase_ = torch.from_numpy(__UpperCAmelCase ).permute(2 , 3 , 0 , 1 )
elif "kernel" in key:
UpperCAmelCase_ = torch.from_numpy(np.transpose(__UpperCAmelCase ) )
else:
UpperCAmelCase_ = torch.from_numpy(__UpperCAmelCase )
# Replace HF parameters with original TF model parameters
assert hf_params[hf_key].shape == new_hf_value.shape
hf_params[hf_key].copy_(__UpperCAmelCase )
@torch.no_grad()
def A ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ = model_classes[model_name](
include_top=__UpperCAmelCase , weights='''imagenet''' , input_tensor=__UpperCAmelCase , input_shape=__UpperCAmelCase , pooling=__UpperCAmelCase , classes=1000 , classifier_activation='''softmax''' , )
UpperCAmelCase_ = original_model.trainable_variables
UpperCAmelCase_ = original_model.non_trainable_variables
UpperCAmelCase_ = {param.name: param.numpy() for param in tf_params}
for param in tf_non_train_params:
UpperCAmelCase_ = param.numpy()
UpperCAmelCase_ = list(tf_params.keys() )
# Load HuggingFace model
UpperCAmelCase_ = get_efficientnet_config(__UpperCAmelCase )
UpperCAmelCase_ = EfficientNetForImageClassification(__UpperCAmelCase ).eval()
UpperCAmelCase_ = hf_model.state_dict()
# Create src-to-dst parameter name mapping dictionary
print('''Converting parameters...''' )
UpperCAmelCase_ = rename_keys(__UpperCAmelCase )
replace_params(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# Initialize preprocessor and preprocess input image
UpperCAmelCase_ = convert_image_processor(__UpperCAmelCase )
UpperCAmelCase_ = preprocessor(images=prepare_img() , return_tensors='''pt''' )
# HF model inference
hf_model.eval()
with torch.no_grad():
UpperCAmelCase_ = hf_model(**__UpperCAmelCase )
UpperCAmelCase_ = outputs.logits.detach().numpy()
# Original model inference
UpperCAmelCase_ = False
UpperCAmelCase_ = CONFIG_MAP[model_name]['''image_size''']
UpperCAmelCase_ = prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST )
UpperCAmelCase_ = image.img_to_array(__UpperCAmelCase )
UpperCAmelCase_ = np.expand_dims(__UpperCAmelCase , axis=0 )
UpperCAmelCase_ = original_model.predict(__UpperCAmelCase )
# Check whether original and HF model outputs match -> np.allclose
assert np.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1e-3 ), "The predicted logits are not the same."
print('''Model outputs match!''' )
if save_model:
# Create folder to save model
if not os.path.isdir(__UpperCAmelCase ):
os.mkdir(__UpperCAmelCase )
# Save converted model and image processor
hf_model.save_pretrained(__UpperCAmelCase )
preprocessor.save_pretrained(__UpperCAmelCase )
if push_to_hub:
# Push model and image processor to hub
print(f"Pushing converted {model_name} to the hub..." )
UpperCAmelCase_ = f"efficientnet-{model_name}"
preprocessor.push_to_hub(__UpperCAmelCase )
hf_model.push_to_hub(__UpperCAmelCase )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="b0",
type=str,
help="Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default="hf_model",
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument("--save_model", action="store_true", help="Save model to local")
parser.add_argument("--push_to_hub", action="store_true", help="Push model and image processor to the hub")
UpperCamelCase_ = parser.parse_args()
convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
| 344 |
from typing import Dict
import numpy as np
import torch
from . import residue_constants as rc
from .tensor_utils import tensor_tree_map, tree_map
def A ( __UpperCAmelCase ) -> Dict[str, torch.Tensor]:
'''simple docstring'''
UpperCAmelCase_ = []
UpperCAmelCase_ = []
UpperCAmelCase_ = []
for rt in rc.restypes:
UpperCAmelCase_ = rc.restype_name_to_atomaa_names[rc.restype_atoa[rt]]
restype_atomaa_to_atomaa_list.append([(rc.atom_order[name] if name else 0) for name in atom_names] )
UpperCAmelCase_ = {name: i for i, name in enumerate(__UpperCAmelCase )}
restype_atomaa_to_atomaa_list.append(
[(atom_name_to_idxaa[name] if name in atom_name_to_idxaa else 0) for name in rc.atom_types] )
restype_atomaa_mask_list.append([(1.0 if name else 0.0) for name in atom_names] )
# Add dummy mapping for restype 'UNK'
restype_atomaa_to_atomaa_list.append([0] * 14 )
restype_atomaa_to_atomaa_list.append([0] * 37 )
restype_atomaa_mask_list.append([0.0] * 14 )
UpperCAmelCase_ = torch.tensor(
__UpperCAmelCase , dtype=torch.intaa , device=protein['''aatype'''].device , )
UpperCAmelCase_ = torch.tensor(
__UpperCAmelCase , dtype=torch.intaa , device=protein['''aatype'''].device , )
UpperCAmelCase_ = torch.tensor(
__UpperCAmelCase , dtype=torch.floataa , device=protein['''aatype'''].device , )
UpperCAmelCase_ = protein['''aatype'''].to(torch.long )
# create the mapping for (residx, atom14) --> atom37, i.e. an array
# with shape (num_res, 14) containing the atom37 indices for this protein
UpperCAmelCase_ = restype_atomaa_to_atomaa[protein_aatype]
UpperCAmelCase_ = restype_atomaa_mask[protein_aatype]
UpperCAmelCase_ = residx_atomaa_mask
UpperCAmelCase_ = residx_atomaa_to_atomaa.long()
# create the gather indices for mapping back
UpperCAmelCase_ = restype_atomaa_to_atomaa[protein_aatype]
UpperCAmelCase_ = residx_atomaa_to_atomaa.long()
# create the corresponding mask
UpperCAmelCase_ = torch.zeros([21, 37] , dtype=torch.floataa , device=protein['''aatype'''].device )
for restype, restype_letter in enumerate(rc.restypes ):
UpperCAmelCase_ = rc.restype_atoa[restype_letter]
UpperCAmelCase_ = rc.residue_atoms[restype_name]
for atom_name in atom_names:
UpperCAmelCase_ = rc.atom_order[atom_name]
UpperCAmelCase_ = 1
UpperCAmelCase_ = restype_atomaa_mask[protein_aatype]
UpperCAmelCase_ = residx_atomaa_mask
return protein
def A ( __UpperCAmelCase ) -> Dict[str, np.ndarray]:
'''simple docstring'''
UpperCAmelCase_ = tree_map(lambda __UpperCAmelCase : torch.tensor(__UpperCAmelCase , device=batch['''aatype'''].device ) , __UpperCAmelCase , np.ndarray )
UpperCAmelCase_ = tensor_tree_map(lambda __UpperCAmelCase : np.array(__UpperCAmelCase ) , make_atomaa_masks(__UpperCAmelCase ) )
return out
| 344 | 1 |
import time
import warnings
from abc import ABC
from copy import deepcopy
from typing import Optional
import torch
from ..utils import add_start_docstrings, logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = r"\n Args:\n input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n scores (`torch.FloatTensor` of shape `(batch_size, config.vocab_size)`):\n Prediction scores of a language modeling head. These can be scores for each vocabulary token before SoftMax\n or scores for each vocabulary token after SoftMax.\n kwargs (`Dict[str, Any]`, *optional*):\n Additional stopping criteria specific kwargs.\n\n Return:\n `bool`. `False` indicates we should continue, `True` indicates we should stop.\n\n"
class a_ ( _snake_case ):
@add_start_docstrings(_lowercase)
def __call__( self :Union[str, Any] , _lowercase :torch.LongTensor , _lowercase :torch.FloatTensor , **_lowercase :List[str]) -> bool:
raise NotImplementedError('''StoppingCriteria needs to be subclassed''')
class a_ ( _snake_case ):
def __init__( self :Optional[int] , _lowercase :int , _lowercase :Optional[int] = None) -> List[Any]:
UpperCAmelCase_ = max_length
UpperCAmelCase_ = max_position_embeddings
@add_start_docstrings(_lowercase)
def __call__( self :Dict , _lowercase :torch.LongTensor , _lowercase :torch.FloatTensor , **_lowercase :Any) -> bool:
UpperCAmelCase_ = input_ids.shape[-1]
UpperCAmelCase_ = cur_len >= self.max_length
if self.max_position_embeddings is not None and not is_done and cur_len >= self.max_position_embeddings:
logger.warning_once(
'''This is a friendly reminder - the current text generation call will exceed the model\'s predefined '''
f"maximum length ({self.max_position_embeddings}). Depending on the model, you may observe "
'''exceptions, performance degradation, or nothing at all.''')
return is_done
class a_ ( _snake_case ):
def __init__( self :Optional[int] , _lowercase :int , _lowercase :int) -> str:
warnings.warn(
'''The class `MaxNewTokensCriteria` is deprecated. '''
f"Please use `MaxLengthCriteria(max_length={start_length + max_new_tokens})` "
'''with `max_length = start_length + max_new_tokens` instead.''' , _lowercase , )
UpperCAmelCase_ = start_length
UpperCAmelCase_ = max_new_tokens
UpperCAmelCase_ = start_length + max_new_tokens
@add_start_docstrings(_lowercase)
def __call__( self :Any , _lowercase :torch.LongTensor , _lowercase :torch.FloatTensor , **_lowercase :Union[str, Any]) -> bool:
return input_ids.shape[-1] >= self.max_length
class a_ ( _snake_case ):
def __init__( self :Any , _lowercase :float , _lowercase :Optional[float] = None) -> Union[str, Any]:
UpperCAmelCase_ = max_time
UpperCAmelCase_ = time.time() if initial_timestamp is None else initial_timestamp
@add_start_docstrings(_lowercase)
def __call__( self :List[str] , _lowercase :torch.LongTensor , _lowercase :torch.FloatTensor , **_lowercase :Union[str, Any]) -> bool:
return time.time() - self.initial_timestamp > self.max_time
class a_ ( _snake_case ):
@add_start_docstrings(_lowercase)
def __call__( self :int , _lowercase :torch.LongTensor , _lowercase :torch.FloatTensor , **_lowercase :Optional[Any]) -> bool:
return any(criteria(_lowercase , _lowercase) for criteria in self)
@property
def __a ( self :Any) -> Optional[int]:
for stopping_criterium in self:
if isinstance(_lowercase , _lowercase):
return stopping_criterium.max_length
elif isinstance(_lowercase , _lowercase):
return stopping_criterium.max_length
return None
def A ( __UpperCAmelCase , __UpperCAmelCase ) -> StoppingCriteriaList:
'''simple docstring'''
UpperCAmelCase_ = stopping_criteria.max_length
UpperCAmelCase_ = deepcopy(__UpperCAmelCase )
if stopping_max_length is not None and stopping_max_length != max_length:
warnings.warn('''You set different `max_length` for stopping criteria and `max_length` parameter''' , __UpperCAmelCase )
elif stopping_max_length is None:
new_stopping_criteria.append(MaxLengthCriteria(max_length=__UpperCAmelCase ) )
return new_stopping_criteria
| 344 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor
from .base import PipelineTool
class a_ ( _snake_case ):
UpperCamelCase__ : Dict ="openai/whisper-base"
UpperCamelCase__ : int =(
"This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the "
"transcribed text."
)
UpperCamelCase__ : Any ="transcriber"
UpperCamelCase__ : Optional[int] =WhisperProcessor
UpperCamelCase__ : List[str] =WhisperForConditionalGeneration
UpperCamelCase__ : List[Any] =["audio"]
UpperCamelCase__ : Union[str, Any] =["text"]
def __a ( self :int , _lowercase :Any) -> Tuple:
return self.pre_processor(_lowercase , return_tensors='''pt''').input_features
def __a ( self :Dict , _lowercase :Tuple) -> Any:
return self.model.generate(inputs=_lowercase)
def __a ( self :int , _lowercase :Union[str, Any]) -> Optional[Any]:
return self.pre_processor.batch_decode(_lowercase , skip_special_tokens=_lowercase)[0]
| 344 | 1 |
import os
from pathlib import Path
def A ( ) -> List[str]:
'''simple docstring'''
from torch.utils.cpp_extension import load
UpperCAmelCase_ = Path(__UpperCAmelCase ).resolve().parent.parent.parent / '''kernels''' / '''deformable_detr'''
UpperCAmelCase_ = [
root / filename
for filename in [
'''vision.cpp''',
os.path.join('''cpu''' , '''ms_deform_attn_cpu.cpp''' ),
os.path.join('''cuda''' , '''ms_deform_attn_cuda.cu''' ),
]
]
load(
'''MultiScaleDeformableAttention''' , __UpperCAmelCase , with_cuda=__UpperCAmelCase , extra_include_paths=[str(__UpperCAmelCase )] , extra_cflags=['''-DWITH_CUDA=1'''] , extra_cuda_cflags=[
'''-DCUDA_HAS_FP16=1''',
'''-D__CUDA_NO_HALF_OPERATORS__''',
'''-D__CUDA_NO_HALF_CONVERSIONS__''',
'''-D__CUDA_NO_HALF2_OPERATORS__''',
] , )
import MultiScaleDeformableAttention as MSDA
return MSDA
| 344 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase_ = {"configuration_opt": ["OPT_PRETRAINED_CONFIG_ARCHIVE_MAP", "OPTConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"OPT_PRETRAINED_MODEL_ARCHIVE_LIST",
"OPTForCausalLM",
"OPTModel",
"OPTPreTrainedModel",
"OPTForSequenceClassification",
"OPTForQuestionAnswering",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["TFOPTForCausalLM", "TFOPTModel", "TFOPTPreTrainedModel"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"FlaxOPTForCausalLM",
"FlaxOPTModel",
"FlaxOPTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_opt import OPT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_opt import (
OPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OPTForCausalLM,
OPTForQuestionAnswering,
OPTForSequenceClassification,
OPTModel,
OPTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_opt import TFOPTForCausalLM, TFOPTModel, TFOPTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_opt import FlaxOPTForCausalLM, FlaxOPTModel, FlaxOPTPreTrainedModel
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 344 | 1 |
from __future__ import annotations
import pandas as pd
def A ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> list[int]:
'''simple docstring'''
UpperCAmelCase_ = [0] * no_of_processes
UpperCAmelCase_ = [0] * no_of_processes
# Copy the burst time into remaining_time[]
for i in range(__UpperCAmelCase ):
UpperCAmelCase_ = burst_time[i]
UpperCAmelCase_ = 0
UpperCAmelCase_ = 0
UpperCAmelCase_ = 9_9999_9999
UpperCAmelCase_ = 0
UpperCAmelCase_ = False
# Process until all processes are completed
while complete != no_of_processes:
for j in range(__UpperCAmelCase ):
if arrival_time[j] <= increment_time and remaining_time[j] > 0:
if remaining_time[j] < minm:
UpperCAmelCase_ = remaining_time[j]
UpperCAmelCase_ = j
UpperCAmelCase_ = True
if not check:
increment_time += 1
continue
remaining_time[short] -= 1
UpperCAmelCase_ = remaining_time[short]
if minm == 0:
UpperCAmelCase_ = 9_9999_9999
if remaining_time[short] == 0:
complete += 1
UpperCAmelCase_ = False
# Find finish time of current process
UpperCAmelCase_ = increment_time + 1
# Calculate waiting time
UpperCAmelCase_ = finish_time - arrival_time[short]
UpperCAmelCase_ = finar - burst_time[short]
if waiting_time[short] < 0:
UpperCAmelCase_ = 0
# Increment time
increment_time += 1
return waiting_time
def A ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> list[int]:
'''simple docstring'''
UpperCAmelCase_ = [0] * no_of_processes
for i in range(__UpperCAmelCase ):
UpperCAmelCase_ = burst_time[i] + waiting_time[i]
return turn_around_time
def A ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> None:
'''simple docstring'''
UpperCAmelCase_ = 0
UpperCAmelCase_ = 0
for i in range(__UpperCAmelCase ):
UpperCAmelCase_ = total_waiting_time + waiting_time[i]
UpperCAmelCase_ = total_turn_around_time + turn_around_time[i]
print(f"Average waiting time = {total_waiting_time / no_of_processes:.5f}" )
print('''Average turn around time =''' , total_turn_around_time / no_of_processes )
if __name__ == "__main__":
print("Enter how many process you want to analyze")
UpperCamelCase_ = int(input())
UpperCamelCase_ = [0] * no_of_processes
UpperCamelCase_ = [0] * no_of_processes
UpperCamelCase_ = list(range(1, no_of_processes + 1))
for i in range(no_of_processes):
print("Enter the arrival time and burst time for process:--" + str(i + 1))
UpperCamelCase_ , UpperCamelCase_ = map(int, input().split())
UpperCamelCase_ = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
UpperCamelCase_ = burst_time
UpperCamelCase_ = no_of_processes
UpperCamelCase_ = waiting_time
UpperCamelCase_ = calculate_turnaroundtime(bt, n, wt)
calculate_average_times(waiting_time, turn_around_time, no_of_processes)
UpperCamelCase_ = pd.DataFrame(
list(zip(processes, burst_time, arrival_time, waiting_time, turn_around_time)),
columns=[
"Process",
"BurstTime",
"ArrivalTime",
"WaitingTime",
"TurnAroundTime",
],
)
# Printing the dataFrame
pd.set_option("display.max_rows", fcfs.shape[0] + 1)
print(fcfs)
| 344 |
import unittest
from transformers import BigBirdTokenizer, BigBirdTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
UpperCamelCase_ = "▁"
UpperCamelCase_ = get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
@require_tokenizers
class a_ ( _snake_case , unittest.TestCase ):
UpperCamelCase__ : str =BigBirdTokenizer
UpperCamelCase__ : Tuple =BigBirdTokenizerFast
UpperCamelCase__ : Union[str, Any] =True
UpperCamelCase__ : List[str] =True
def __a ( self :Any) -> List[str]:
super().setUp()
UpperCAmelCase_ = self.tokenizer_class(_lowercase , keep_accents=_lowercase)
tokenizer.save_pretrained(self.tmpdirname)
def __a ( self :Optional[int]) -> str:
UpperCAmelCase_ = '''<s>'''
UpperCAmelCase_ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_lowercase) , _lowercase)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_lowercase) , _lowercase)
def __a ( self :str) -> str:
UpperCAmelCase_ = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , '''<unk>''')
self.assertEqual(vocab_keys[1] , '''<s>''')
self.assertEqual(vocab_keys[-1] , '''[MASK]''')
self.assertEqual(len(_lowercase) , 1004)
def __a ( self :List[str]) -> int:
self.assertEqual(self.get_tokenizer().vocab_size , 1000)
def __a ( self :Tuple) -> int:
if not self.test_rust_tokenizer:
return
UpperCAmelCase_ = self.get_tokenizer()
UpperCAmelCase_ = self.get_rust_tokenizer()
UpperCAmelCase_ = '''I was born in 92000, and this is falsé.'''
UpperCAmelCase_ = tokenizer.tokenize(_lowercase)
UpperCAmelCase_ = rust_tokenizer.tokenize(_lowercase)
self.assertListEqual(_lowercase , _lowercase)
UpperCAmelCase_ = tokenizer.encode(_lowercase , add_special_tokens=_lowercase)
UpperCAmelCase_ = rust_tokenizer.encode(_lowercase , add_special_tokens=_lowercase)
self.assertListEqual(_lowercase , _lowercase)
UpperCAmelCase_ = self.get_rust_tokenizer()
UpperCAmelCase_ = tokenizer.encode(_lowercase)
UpperCAmelCase_ = rust_tokenizer.encode(_lowercase)
self.assertListEqual(_lowercase , _lowercase)
def __a ( self :Optional[Any]) -> List[str]:
UpperCAmelCase_ = BigBirdTokenizer(_lowercase , keep_accents=_lowercase)
UpperCAmelCase_ = tokenizer.tokenize('''This is a test''')
self.assertListEqual(_lowercase , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_lowercase) , [285, 46, 10, 170, 382] , )
UpperCAmelCase_ = tokenizer.tokenize('''I was born in 92000, and this is falsé.''')
self.assertListEqual(
_lowercase , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
UpperCAmelCase_ = tokenizer.convert_tokens_to_ids(_lowercase)
self.assertListEqual(
_lowercase , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
UpperCAmelCase_ = tokenizer.convert_ids_to_tokens(_lowercase)
self.assertListEqual(
_lowercase , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
@cached_property
def __a ( self :Any) -> List[Any]:
return BigBirdTokenizer.from_pretrained('''google/bigbird-roberta-base''')
@slow
def __a ( self :int) -> List[Any]:
UpperCAmelCase_ = '''Hello World!'''
UpperCAmelCase_ = [65, 18536, 2260, 101, 66]
self.assertListEqual(_lowercase , self.big_tokenizer.encode(_lowercase))
@slow
def __a ( self :int) -> Any:
UpperCAmelCase_ = (
'''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'''
''' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'''
)
# fmt: off
UpperCAmelCase_ = [65, 871, 419, 358, 946, 991, 2521, 452, 358, 1357, 387, 7751, 3536, 112, 985, 456, 126, 865, 938, 5400, 5734, 458, 1368, 467, 786, 2462, 5246, 1159, 633, 865, 4519, 457, 582, 852, 2557, 427, 916, 508, 405, 34324, 497, 391, 408, 11342, 1244, 385, 100, 938, 985, 456, 574, 362, 12597, 3200, 3129, 1172, 66] # noqa: E231
# fmt: on
self.assertListEqual(_lowercase , self.big_tokenizer.encode(_lowercase))
@require_torch
@slow
def __a ( self :Dict) -> Union[str, Any]:
import torch
from transformers import BigBirdConfig, BigBirdModel
# Build sequence
UpperCAmelCase_ = list(self.big_tokenizer.get_vocab().keys())[:10]
UpperCAmelCase_ = ''' '''.join(_lowercase)
UpperCAmelCase_ = self.big_tokenizer.encode_plus(_lowercase , return_tensors='''pt''' , return_token_type_ids=_lowercase)
UpperCAmelCase_ = self.big_tokenizer.batch_encode_plus(
[sequence + ''' ''' + sequence] , return_tensors='''pt''' , return_token_type_ids=_lowercase)
UpperCAmelCase_ = BigBirdConfig(attention_type='''original_full''')
UpperCAmelCase_ = BigBirdModel(_lowercase)
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**_lowercase)
model(**_lowercase)
@slow
def __a ( self :Optional[int]) -> Any:
UpperCAmelCase_ = BigBirdTokenizer.from_pretrained('''google/bigbird-roberta-base''')
UpperCAmelCase_ = tokenizer.decode(tokenizer('''Paris is the [MASK].''').input_ids)
self.assertTrue(decoded_text == '''[CLS] Paris is the[MASK].[SEP]''')
@slow
def __a ( self :Dict) -> List[str]:
# fmt: off
UpperCAmelCase_ = {'''input_ids''': [[65, 39286, 458, 36335, 2001, 456, 13073, 13266, 455, 113, 7746, 1741, 11157, 391, 13073, 13266, 455, 113, 3967, 35412, 113, 4936, 109, 3870, 2377, 113, 30084, 45720, 458, 134, 17496, 112, 503, 11672, 113, 118, 112, 5665, 13347, 38687, 112, 1496, 31389, 112, 3268, 47264, 134, 962, 112, 16377, 8035, 23130, 430, 12169, 15518, 28592, 458, 146, 41697, 109, 391, 12169, 15518, 16689, 458, 146, 41358, 109, 452, 726, 4034, 111, 763, 35412, 5082, 388, 1903, 111, 9051, 391, 2870, 48918, 1900, 1123, 550, 998, 112, 9586, 15985, 455, 391, 410, 22955, 37636, 114, 66], [65, 448, 17496, 419, 3663, 385, 763, 113, 27533, 2870, 3283, 13043, 1639, 24713, 523, 656, 24013, 18550, 2521, 517, 27014, 21244, 420, 1212, 1465, 391, 927, 4833, 388, 578, 11786, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [65, 484, 2169, 7687, 21932, 18146, 726, 363, 17032, 3391, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_lowercase , model_name='''google/bigbird-roberta-base''' , revision='''215c99f1600e06f83acce68422f2035b2b5c3510''' , )
| 344 | 1 |
from __future__ import annotations
from random import choice
def A ( __UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
return choice(__UpperCAmelCase )
def A ( __UpperCAmelCase , __UpperCAmelCase ) -> int:
'''simple docstring'''
UpperCAmelCase_ = random_pivot(__UpperCAmelCase )
# partition based on pivot
# linear time
UpperCAmelCase_ = [e for e in lst if e < pivot]
UpperCAmelCase_ = [e for e in lst if e > pivot]
# if we get lucky, pivot might be the element we want.
# we can easily see this:
# small (elements smaller than k)
# + pivot (kth element)
# + big (elements larger than k)
if len(__UpperCAmelCase ) == k - 1:
return pivot
# pivot is in elements bigger than k
elif len(__UpperCAmelCase ) < k - 1:
return kth_number(__UpperCAmelCase , k - len(__UpperCAmelCase ) - 1 )
# pivot is in elements smaller than k
else:
return kth_number(__UpperCAmelCase , __UpperCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 344 |
from __future__ import annotations
import string
from itertools import cycle, product
from pathlib import Path
UpperCamelCase_ = (
string.ascii_letters + string.digits + string.punctuation + string.whitespace
)
UpperCamelCase_ = [ord(letter) for letter in string.ascii_lowercase]
UpperCamelCase_ = {ord(char) for char in VALID_CHARS}
UpperCamelCase_ = ["the", "be", "to", "of", "and", "in", "that", "have"]
def A ( __UpperCAmelCase , __UpperCAmelCase ) -> str | None:
'''simple docstring'''
UpperCAmelCase_ = ""
UpperCAmelCase_ = 42
UpperCAmelCase_ = 42
UpperCAmelCase_ = 42
for keychar, cipherchar in zip(cycle(__UpperCAmelCase ) , __UpperCAmelCase ):
UpperCAmelCase_ = cipherchar ^ keychar
if decodedchar not in VALID_INTS:
return None
decoded += chr(__UpperCAmelCase )
return decoded
def A ( __UpperCAmelCase ) -> list[str]:
'''simple docstring'''
UpperCAmelCase_ = []
for key in product(__UpperCAmelCase , repeat=3 ):
UpperCAmelCase_ = try_key(__UpperCAmelCase , __UpperCAmelCase )
if encoded is not None:
possibles.append(__UpperCAmelCase )
return possibles
def A ( __UpperCAmelCase , __UpperCAmelCase ) -> list[str]:
'''simple docstring'''
return [possible for possible in possibles if common_word in possible.lower()]
def A ( __UpperCAmelCase = "p059_cipher.txt" ) -> int:
'''simple docstring'''
UpperCAmelCase_ = 42
UpperCAmelCase_ = 42
UpperCAmelCase_ = 42
UpperCAmelCase_ = 42
UpperCAmelCase_ = Path(__UpperCAmelCase ).parent.joinpath(__UpperCAmelCase ).read_text(encoding='''utf-8''' )
UpperCAmelCase_ = [int(__UpperCAmelCase ) for number in data.strip().split(''',''' )]
UpperCAmelCase_ = filter_valid_chars(__UpperCAmelCase )
for common_word in COMMON_WORDS:
UpperCAmelCase_ = filter_common_word(__UpperCAmelCase , __UpperCAmelCase )
if len(__UpperCAmelCase ) == 1:
break
UpperCAmelCase_ = possibles[0]
return sum(ord(__UpperCAmelCase ) for char in decoded_text )
if __name__ == "__main__":
print(f"{solution() = }")
| 344 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCamelCase_ = {
"configuration_altclip": [
"ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"AltCLIPConfig",
"AltCLIPTextConfig",
"AltCLIPVisionConfig",
],
"processing_altclip": ["AltCLIPProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"AltCLIPPreTrainedModel",
"AltCLIPModel",
"AltCLIPTextModel",
"AltCLIPVisionModel",
]
if TYPE_CHECKING:
from .configuration_altclip import (
ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
AltCLIPConfig,
AltCLIPTextConfig,
AltCLIPVisionConfig,
)
from .processing_altclip import AltCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_altclip import (
ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
AltCLIPModel,
AltCLIPPreTrainedModel,
AltCLIPTextModel,
AltCLIPVisionModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 344 |
import pytest
UpperCamelCase_ = "__dummy_dataset1__"
UpperCamelCase_ = "\nimport json\nimport os\n\nimport datasets\n\n\nREPO_URL = \"https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/\"\nURLS = {\"train\": REPO_URL + \"wikiann-bn-train.jsonl\", \"validation\": REPO_URL + \"wikiann-bn-validation.jsonl\"}\n\n\nclass __DummyDataset1__(datasets.GeneratorBasedBuilder):\n\n def _info(self):\n features = datasets.Features(\n {\n \"tokens\": datasets.Sequence(datasets.Value(\"string\")),\n \"ner_tags\": datasets.Sequence(\n datasets.features.ClassLabel(\n names=[\n \"O\",\n \"B-PER\",\n \"I-PER\",\n \"B-ORG\",\n \"I-ORG\",\n \"B-LOC\",\n \"I-LOC\",\n ]\n )\n ),\n \"langs\": datasets.Sequence(datasets.Value(\"string\")),\n \"spans\": datasets.Sequence(datasets.Value(\"string\")),\n }\n )\n return datasets.DatasetInfo(features=features)\n\n def _split_generators(self, dl_manager):\n dl_path = dl_manager.download(URLS)\n return [\n datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={\"filepath\": dl_path[\"train\"]}),\n datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={\"filepath\": dl_path[\"validation\"]}),\n ]\n\n def _generate_examples(self, filepath):\n with open(filepath, \"r\", encoding=\"utf-8\") as f:\n for i, line in enumerate(f):\n yield i, json.loads(line)\n"
@pytest.fixture
def A ( ) -> str:
'''simple docstring'''
return DATASET_LOADING_SCRIPT_NAME
@pytest.fixture
def A ( ) -> Any:
'''simple docstring'''
return DATASET_LOADING_SCRIPT_CODE
@pytest.fixture
def A ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = dataset_loading_script_name
UpperCAmelCase_ = tmp_path / '''datasets''' / script_name
script_dir.mkdir(parents=__UpperCAmelCase )
UpperCAmelCase_ = script_dir / f"{script_name}.py"
with open(__UpperCAmelCase , '''w''' ) as f:
f.write(__UpperCAmelCase )
return str(__UpperCAmelCase )
| 344 | 1 |
from __future__ import annotations
import unittest
from transformers import EsmConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers.models.esm.modeling_tf_esm import (
TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
TFEsmModel,
)
class a_ :
def __init__( self :Dict , _lowercase :Tuple , ) -> Dict:
UpperCAmelCase_ = parent
UpperCAmelCase_ = 13
UpperCAmelCase_ = 7
UpperCAmelCase_ = True
UpperCAmelCase_ = True
UpperCAmelCase_ = True
UpperCAmelCase_ = 99
UpperCAmelCase_ = 32
UpperCAmelCase_ = 2
UpperCAmelCase_ = 4
UpperCAmelCase_ = 37
UpperCAmelCase_ = '''gelu'''
UpperCAmelCase_ = 0.1
UpperCAmelCase_ = 0.1
UpperCAmelCase_ = 512
UpperCAmelCase_ = 16
UpperCAmelCase_ = 2
UpperCAmelCase_ = 0.02
UpperCAmelCase_ = 3
UpperCAmelCase_ = 4
UpperCAmelCase_ = None
def __a ( self :List[str]) -> Union[str, Any]:
UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
UpperCAmelCase_ = None
if self.use_input_mask:
UpperCAmelCase_ = random_attention_mask([self.batch_size, self.seq_length])
UpperCAmelCase_ = None
UpperCAmelCase_ = None
UpperCAmelCase_ = None
if self.use_labels:
UpperCAmelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size)
UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
UpperCAmelCase_ = ids_tensor([self.batch_size] , self.num_choices)
UpperCAmelCase_ = EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , pad_token_id=1 , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def __a ( self :int) -> Optional[int]:
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
) = self.prepare_config_and_inputs()
UpperCAmelCase_ = True
UpperCAmelCase_ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size])
UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2)
return (
config,
input_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def __a ( self :Optional[Any] , _lowercase :int , _lowercase :Union[str, Any] , _lowercase :Optional[Any] , _lowercase :Optional[Any] , _lowercase :List[str] , _lowercase :Any) -> Optional[Any]:
UpperCAmelCase_ = TFEsmModel(config=_lowercase)
UpperCAmelCase_ = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
UpperCAmelCase_ = model(_lowercase)
UpperCAmelCase_ = [input_ids, input_mask]
UpperCAmelCase_ = model(_lowercase)
UpperCAmelCase_ = model(_lowercase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def __a ( self :List[str] , _lowercase :Optional[int] , _lowercase :Tuple , _lowercase :List[Any] , _lowercase :str , _lowercase :List[Any] , _lowercase :List[str] , _lowercase :List[str] , _lowercase :Optional[int] , ) -> int:
UpperCAmelCase_ = True
UpperCAmelCase_ = TFEsmModel(config=_lowercase)
UpperCAmelCase_ = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''encoder_hidden_states''': encoder_hidden_states,
'''encoder_attention_mask''': encoder_attention_mask,
}
UpperCAmelCase_ = model(_lowercase)
UpperCAmelCase_ = [input_ids, input_mask]
UpperCAmelCase_ = model(_lowercase , encoder_hidden_states=_lowercase)
# Also check the case where encoder outputs are not passed
UpperCAmelCase_ = model(_lowercase , attention_mask=_lowercase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def __a ( self :List[str] , _lowercase :Union[str, Any] , _lowercase :Optional[int] , _lowercase :Any , _lowercase :List[Any] , _lowercase :Tuple , _lowercase :Optional[Any]) -> str:
UpperCAmelCase_ = TFEsmForMaskedLM(config=_lowercase)
UpperCAmelCase_ = model([input_ids, input_mask])
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def __a ( self :int , _lowercase :Optional[Any] , _lowercase :Union[str, Any] , _lowercase :int , _lowercase :List[Any] , _lowercase :Optional[int] , _lowercase :Union[str, Any]) -> List[Any]:
UpperCAmelCase_ = self.num_labels
UpperCAmelCase_ = TFEsmForTokenClassification(config=_lowercase)
UpperCAmelCase_ = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
UpperCAmelCase_ = model(_lowercase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def __a ( self :Dict) -> List[Any]:
UpperCAmelCase_ = self.prepare_config_and_inputs()
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
) = config_and_inputs
UpperCAmelCase_ = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class a_ ( _snake_case , _snake_case , unittest.TestCase ):
UpperCamelCase__ : Optional[int] =(
(
TFEsmModel,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
)
if is_tf_available()
else ()
)
UpperCamelCase__ : List[Any] =(
{
"feature-extraction": TFEsmModel,
"fill-mask": TFEsmForMaskedLM,
"text-classification": TFEsmForSequenceClassification,
"token-classification": TFEsmForTokenClassification,
"zero-shot": TFEsmForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCamelCase__ : Optional[Any] =False
UpperCamelCase__ : Tuple =False
def __a ( self :Optional[int]) -> List[Any]:
UpperCAmelCase_ = TFEsmModelTester(self)
UpperCAmelCase_ = ConfigTester(self , config_class=_lowercase , hidden_size=37)
def __a ( self :str) -> Any:
self.config_tester.run_common_tests()
def __a ( self :Optional[Any]) -> List[str]:
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowercase)
def __a ( self :Dict) -> Tuple:
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*_lowercase)
def __a ( self :Union[str, Any]) -> List[str]:
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_lowercase)
def __a ( self :Union[str, Any]) -> Tuple:
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_lowercase)
@slow
def __a ( self :Any) -> Union[str, Any]:
for model_name in TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ = TFEsmModel.from_pretrained(_lowercase)
self.assertIsNotNone(_lowercase)
@unittest.skip('''Protein models do not support embedding resizing.''')
def __a ( self :Optional[Any]) -> Union[str, Any]:
pass
@unittest.skip('''Protein models do not support embedding resizing.''')
def __a ( self :Union[str, Any]) -> List[Any]:
pass
def __a ( self :Tuple) -> List[str]:
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(_lowercase)
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer)
if model_class is TFEsmForMaskedLM:
# Output embedding test differs from the main test because they're a matrix, not a layer
UpperCAmelCase_ = model.get_bias()
assert isinstance(_lowercase , _lowercase)
for k, v in name.items():
assert isinstance(_lowercase , tf.Variable)
else:
UpperCAmelCase_ = model.get_output_embeddings()
assert x is None
UpperCAmelCase_ = model.get_bias()
assert name is None
@require_tf
class a_ ( unittest.TestCase ):
@slow
def __a ( self :Optional[Any]) -> Any:
UpperCAmelCase_ = TFEsmForMaskedLM.from_pretrained('''facebook/esm2_t6_8M_UR50D''')
UpperCAmelCase_ = tf.constant([[0, 1, 2, 3, 4, 5]])
UpperCAmelCase_ = model(_lowercase)[0]
UpperCAmelCase_ = [1, 6, 33]
self.assertEqual(list(output.numpy().shape) , _lowercase)
# compare the actual values for a slice.
UpperCAmelCase_ = tf.constant(
[
[
[8.921_518, -10.589_814, -6.4_671_307],
[-6.3_967_156, -13.911_377, -1.1_211_915],
[-7.781_247, -13.951_557, -3.740_592],
]
])
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-2))
@slow
def __a ( self :List[Any]) -> Optional[Any]:
UpperCAmelCase_ = TFEsmModel.from_pretrained('''facebook/esm2_t6_8M_UR50D''')
UpperCAmelCase_ = tf.constant([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]])
UpperCAmelCase_ = model(_lowercase)[0]
# compare the actual values for a slice.
UpperCAmelCase_ = tf.constant(
[
[
[0.14_443_092, 0.54_125_327, 0.3_247_739],
[0.30_340_484, 0.00_526_676, 0.31_077_722],
[0.32_278_043, -0.24_987_096, 0.3_414_628],
]
])
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4))
| 344 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
"s-JoL/Open-Llama-V1": "https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json",
}
class a_ ( _snake_case ):
UpperCamelCase__ : Dict ="open-llama"
def __init__( self :Union[str, Any] , _lowercase :List[Any]=100000 , _lowercase :Dict=4096 , _lowercase :List[Any]=11008 , _lowercase :Optional[int]=32 , _lowercase :Union[str, Any]=32 , _lowercase :List[str]="silu" , _lowercase :Union[str, Any]=2048 , _lowercase :Any=0.02 , _lowercase :Optional[Any]=1E-6 , _lowercase :str=True , _lowercase :str=0 , _lowercase :Any=1 , _lowercase :Optional[Any]=2 , _lowercase :str=False , _lowercase :Dict=True , _lowercase :Optional[Any]=0.1 , _lowercase :Tuple=0.1 , _lowercase :Dict=True , _lowercase :List[Any]=True , _lowercase :Dict=None , **_lowercase :Optional[int] , ) -> List[Any]:
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = rms_norm_eps
UpperCAmelCase_ = use_cache
UpperCAmelCase_ = kwargs.pop(
'''use_memorry_efficient_attention''' , _lowercase)
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_dropout_prob
UpperCAmelCase_ = use_stable_embedding
UpperCAmelCase_ = shared_input_output_embedding
UpperCAmelCase_ = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=_lowercase , bos_token_id=_lowercase , eos_token_id=_lowercase , tie_word_embeddings=_lowercase , **_lowercase , )
def __a ( self :int) -> str:
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , _lowercase) or len(self.rope_scaling) != 2:
raise ValueError(
'''`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '''
f"got {self.rope_scaling}")
UpperCAmelCase_ = self.rope_scaling.get('''type''' , _lowercase)
UpperCAmelCase_ = self.rope_scaling.get('''factor''' , _lowercase)
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f"`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}")
if rope_scaling_factor is None or not isinstance(_lowercase , _lowercase) or rope_scaling_factor <= 1.0:
raise ValueError(f"`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}")
| 344 | 1 |
def A ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> int:
'''simple docstring'''
if exponent == 1:
return base
if exponent % 2 == 0:
UpperCAmelCase_ = _modexpt(__UpperCAmelCase , exponent // 2 , __UpperCAmelCase ) % modulo_value
return (x * x) % modulo_value
else:
return (base * _modexpt(__UpperCAmelCase , exponent - 1 , __UpperCAmelCase )) % modulo_value
def A ( __UpperCAmelCase = 1777 , __UpperCAmelCase = 1855 , __UpperCAmelCase = 8 ) -> int:
'''simple docstring'''
UpperCAmelCase_ = base
for _ in range(1 , __UpperCAmelCase ):
UpperCAmelCase_ = _modexpt(__UpperCAmelCase , __UpperCAmelCase , 10**digits )
return result
if __name__ == "__main__":
print(f"{solution() = }")
| 344 |
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class a_ ( _snake_case ):
UpperCamelCase__ : Optional[Any] =(DPMSolverSinglestepScheduler,)
UpperCamelCase__ : Tuple =(("num_inference_steps", 25),)
def __a ( self :List[Any] , **_lowercase :Optional[Any]) -> int:
UpperCAmelCase_ = {
'''num_train_timesteps''': 1000,
'''beta_start''': 0.0_001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''solver_order''': 2,
'''prediction_type''': '''epsilon''',
'''thresholding''': False,
'''sample_max_value''': 1.0,
'''algorithm_type''': '''dpmsolver++''',
'''solver_type''': '''midpoint''',
'''lambda_min_clipped''': -float('''inf'''),
'''variance_type''': None,
}
config.update(**_lowercase)
return config
def __a ( self :Union[str, Any] , _lowercase :List[Any]=0 , **_lowercase :Optional[int]) -> List[Any]:
UpperCAmelCase_ = dict(self.forward_default_kwargs)
UpperCAmelCase_ = kwargs.pop('''num_inference_steps''' , _lowercase)
UpperCAmelCase_ = self.dummy_sample
UpperCAmelCase_ = 0.1 * sample
UpperCAmelCase_ = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
UpperCAmelCase_ = self.get_scheduler_config(**_lowercase)
UpperCAmelCase_ = scheduler_class(**_lowercase)
scheduler.set_timesteps(_lowercase)
# copy over dummy past residuals
UpperCAmelCase_ = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_lowercase)
UpperCAmelCase_ = scheduler_class.from_pretrained(_lowercase)
new_scheduler.set_timesteps(_lowercase)
# copy over dummy past residuals
UpperCAmelCase_ = dummy_past_residuals[: new_scheduler.config.solver_order]
UpperCAmelCase_ , UpperCAmelCase_ = sample, sample
for t in range(_lowercase , time_step + scheduler.config.solver_order + 1):
UpperCAmelCase_ = scheduler.step(_lowercase , _lowercase , _lowercase , **_lowercase).prev_sample
UpperCAmelCase_ = new_scheduler.step(_lowercase , _lowercase , _lowercase , **_lowercase).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical"
def __a ( self :Union[str, Any]) -> List[Any]:
pass
def __a ( self :Optional[Any] , _lowercase :str=0 , **_lowercase :Union[str, Any]) -> Dict:
UpperCAmelCase_ = dict(self.forward_default_kwargs)
UpperCAmelCase_ = kwargs.pop('''num_inference_steps''' , _lowercase)
UpperCAmelCase_ = self.dummy_sample
UpperCAmelCase_ = 0.1 * sample
UpperCAmelCase_ = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
UpperCAmelCase_ = self.get_scheduler_config()
UpperCAmelCase_ = scheduler_class(**_lowercase)
scheduler.set_timesteps(_lowercase)
# copy over dummy past residuals (must be after setting timesteps)
UpperCAmelCase_ = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_lowercase)
UpperCAmelCase_ = scheduler_class.from_pretrained(_lowercase)
# copy over dummy past residuals
new_scheduler.set_timesteps(_lowercase)
# copy over dummy past residual (must be after setting timesteps)
UpperCAmelCase_ = dummy_past_residuals[: new_scheduler.config.solver_order]
UpperCAmelCase_ = scheduler.step(_lowercase , _lowercase , _lowercase , **_lowercase).prev_sample
UpperCAmelCase_ = new_scheduler.step(_lowercase , _lowercase , _lowercase , **_lowercase).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical"
def __a ( self :Dict , _lowercase :Union[str, Any]=None , **_lowercase :List[Any]) -> int:
if scheduler is None:
UpperCAmelCase_ = self.scheduler_classes[0]
UpperCAmelCase_ = self.get_scheduler_config(**_lowercase)
UpperCAmelCase_ = scheduler_class(**_lowercase)
UpperCAmelCase_ = self.scheduler_classes[0]
UpperCAmelCase_ = self.get_scheduler_config(**_lowercase)
UpperCAmelCase_ = scheduler_class(**_lowercase)
UpperCAmelCase_ = 10
UpperCAmelCase_ = self.dummy_model()
UpperCAmelCase_ = self.dummy_sample_deter
scheduler.set_timesteps(_lowercase)
for i, t in enumerate(scheduler.timesteps):
UpperCAmelCase_ = model(_lowercase , _lowercase)
UpperCAmelCase_ = scheduler.step(_lowercase , _lowercase , _lowercase).prev_sample
return sample
def __a ( self :int) -> Tuple:
UpperCAmelCase_ = DPMSolverSinglestepScheduler(**self.get_scheduler_config())
UpperCAmelCase_ = 50
UpperCAmelCase_ = self.dummy_model()
UpperCAmelCase_ = self.dummy_sample_deter
scheduler.set_timesteps(_lowercase)
# make sure that the first t is uneven
for i, t in enumerate(scheduler.timesteps[3:]):
UpperCAmelCase_ = model(_lowercase , _lowercase)
UpperCAmelCase_ = scheduler.step(_lowercase , _lowercase , _lowercase).prev_sample
UpperCAmelCase_ = torch.mean(torch.abs(_lowercase))
assert abs(result_mean.item() - 0.2_574) < 1E-3
def __a ( self :List[Any]) -> List[Any]:
for timesteps in [25, 50, 100, 999, 1000]:
self.check_over_configs(num_train_timesteps=_lowercase)
def __a ( self :int) -> Optional[Any]:
# make sure that iterating over schedulers with same config names gives same results
# for defaults
UpperCAmelCase_ = DPMSolverSinglestepScheduler(**self.get_scheduler_config())
UpperCAmelCase_ = self.full_loop(scheduler=_lowercase)
UpperCAmelCase_ = torch.mean(torch.abs(_lowercase))
assert abs(result_mean.item() - 0.2_791) < 1E-3
UpperCAmelCase_ = DEISMultistepScheduler.from_config(scheduler.config)
UpperCAmelCase_ = DPMSolverMultistepScheduler.from_config(scheduler.config)
UpperCAmelCase_ = UniPCMultistepScheduler.from_config(scheduler.config)
UpperCAmelCase_ = DPMSolverSinglestepScheduler.from_config(scheduler.config)
UpperCAmelCase_ = self.full_loop(scheduler=_lowercase)
UpperCAmelCase_ = torch.mean(torch.abs(_lowercase))
assert abs(result_mean.item() - 0.2_791) < 1E-3
def __a ( self :Tuple) -> int:
self.check_over_configs(thresholding=_lowercase)
for order in [1, 2, 3]:
for solver_type in ["midpoint", "heun"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=_lowercase , prediction_type=_lowercase , sample_max_value=_lowercase , algorithm_type='''dpmsolver++''' , solver_order=_lowercase , solver_type=_lowercase , )
def __a ( self :List[Any]) -> Any:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_lowercase)
def __a ( self :Any) -> Optional[int]:
for algorithm_type in ["dpmsolver", "dpmsolver++"]:
for solver_type in ["midpoint", "heun"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=_lowercase , solver_type=_lowercase , prediction_type=_lowercase , algorithm_type=_lowercase , )
UpperCAmelCase_ = self.full_loop(
solver_order=_lowercase , solver_type=_lowercase , prediction_type=_lowercase , algorithm_type=_lowercase , )
assert not torch.isnan(_lowercase).any(), "Samples have nan numbers"
def __a ( self :Tuple) -> int:
self.check_over_configs(lower_order_final=_lowercase)
self.check_over_configs(lower_order_final=_lowercase)
def __a ( self :Tuple) -> Optional[Any]:
self.check_over_configs(lambda_min_clipped=-float('''inf'''))
self.check_over_configs(lambda_min_clipped=-5.1)
def __a ( self :Any) -> List[str]:
self.check_over_configs(variance_type=_lowercase)
self.check_over_configs(variance_type='''learned_range''')
def __a ( self :Any) -> Dict:
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]:
self.check_over_forward(num_inference_steps=_lowercase , time_step=0)
def __a ( self :Dict) -> Union[str, Any]:
UpperCAmelCase_ = self.full_loop()
UpperCAmelCase_ = torch.mean(torch.abs(_lowercase))
assert abs(result_mean.item() - 0.2_791) < 1E-3
def __a ( self :Any) -> Union[str, Any]:
UpperCAmelCase_ = self.full_loop(use_karras_sigmas=_lowercase)
UpperCAmelCase_ = torch.mean(torch.abs(_lowercase))
assert abs(result_mean.item() - 0.2_248) < 1E-3
def __a ( self :str) -> Optional[int]:
UpperCAmelCase_ = self.full_loop(prediction_type='''v_prediction''')
UpperCAmelCase_ = torch.mean(torch.abs(_lowercase))
assert abs(result_mean.item() - 0.1_453) < 1E-3
def __a ( self :List[Any]) -> Dict:
UpperCAmelCase_ = self.full_loop(prediction_type='''v_prediction''' , use_karras_sigmas=_lowercase)
UpperCAmelCase_ = torch.mean(torch.abs(_lowercase))
assert abs(result_mean.item() - 0.0_649) < 1E-3
def __a ( self :Any) -> Optional[Any]:
UpperCAmelCase_ = self.scheduler_classes[0]
UpperCAmelCase_ = self.get_scheduler_config(thresholding=_lowercase , dynamic_thresholding_ratio=0)
UpperCAmelCase_ = scheduler_class(**_lowercase)
UpperCAmelCase_ = 10
UpperCAmelCase_ = self.dummy_model()
UpperCAmelCase_ = self.dummy_sample_deter.half()
scheduler.set_timesteps(_lowercase)
for i, t in enumerate(scheduler.timesteps):
UpperCAmelCase_ = model(_lowercase , _lowercase)
UpperCAmelCase_ = scheduler.step(_lowercase , _lowercase , _lowercase).prev_sample
assert sample.dtype == torch.floataa
| 344 | 1 |
from math import factorial
def A ( __UpperCAmelCase = 100 ) -> int:
'''simple docstring'''
return sum(map(__UpperCAmelCase , str(factorial(__UpperCAmelCase ) ) ) )
if __name__ == "__main__":
print(solution(int(input("Enter the Number: ").strip())))
| 344 |
import inspect
import unittest
import torch
import torch.nn as nn
from accelerate.hooks import (
AlignDevicesHook,
ModelHook,
SequentialHook,
add_hook_to_module,
attach_align_device_hook,
remove_hook_from_module,
remove_hook_from_submodules,
)
from accelerate.test_utils import require_multi_gpu
class a_ ( nn.Module ):
def __init__( self :Optional[Any]) -> Union[str, Any]:
super().__init__()
UpperCAmelCase_ = nn.Linear(3 , 4)
UpperCAmelCase_ = nn.BatchNormad(4)
UpperCAmelCase_ = nn.Linear(4 , 5)
def __a ( self :Dict , _lowercase :int) -> str:
return self.lineara(self.batchnorm(self.lineara(_lowercase)))
class a_ ( _snake_case ):
def __a ( self :Tuple , _lowercase :Optional[int] , *_lowercase :Union[str, Any] , **_lowercase :Any) -> Optional[Any]:
return (args[0] + 1,) + args[1:], kwargs
class a_ ( _snake_case ):
def __a ( self :Union[str, Any] , _lowercase :Dict , _lowercase :Tuple) -> int:
return output + 1
class a_ ( unittest.TestCase ):
def __a ( self :str) -> Optional[int]:
UpperCAmelCase_ = ModelForTest()
UpperCAmelCase_ = ModelHook()
add_hook_to_module(_lowercase , _lowercase)
self.assertEqual(test_model._hf_hook , _lowercase)
self.assertTrue(hasattr(_lowercase , '''_old_forward'''))
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , '''forward''')
self.assertListEqual(list(inspect.signature(test_model.forward).parameters) , ['''x'''])
remove_hook_from_module(_lowercase)
self.assertFalse(hasattr(_lowercase , '''_hf_hook'''))
self.assertFalse(hasattr(_lowercase , '''_old_forward'''))
def __a ( self :Optional[Any]) -> Any:
UpperCAmelCase_ = ModelForTest()
UpperCAmelCase_ = ModelHook()
add_hook_to_module(_lowercase , _lowercase)
add_hook_to_module(_lowercase , _lowercase , append=_lowercase)
self.assertEqual(isinstance(test_model._hf_hook , _lowercase) , _lowercase)
self.assertEqual(len(test_model._hf_hook.hooks) , 2)
self.assertTrue(hasattr(_lowercase , '''_old_forward'''))
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , '''forward''')
self.assertListEqual(list(inspect.signature(test_model.forward).parameters) , ['''x'''])
remove_hook_from_module(_lowercase)
self.assertFalse(hasattr(_lowercase , '''_hf_hook'''))
self.assertFalse(hasattr(_lowercase , '''_old_forward'''))
def __a ( self :Optional[int]) -> Optional[int]:
UpperCAmelCase_ = ModelForTest()
UpperCAmelCase_ = torch.randn(2 , 3)
UpperCAmelCase_ = test_model(x + 1)
UpperCAmelCase_ = test_model(x + 2)
UpperCAmelCase_ = PreForwardHook()
add_hook_to_module(_lowercase , _lowercase)
UpperCAmelCase_ = test_model(_lowercase)
self.assertTrue(torch.allclose(_lowercase , _lowercase , atol=1E-5))
# Attaching a hook to a model when it already has one replaces, does not chain
UpperCAmelCase_ = PreForwardHook()
add_hook_to_module(_lowercase , _lowercase)
UpperCAmelCase_ = test_model(_lowercase)
self.assertTrue(torch.allclose(_lowercase , _lowercase , atol=1E-5))
# You need to use the sequential hook to chain two or more hooks
UpperCAmelCase_ = SequentialHook(PreForwardHook() , PreForwardHook())
add_hook_to_module(_lowercase , _lowercase)
UpperCAmelCase_ = test_model(_lowercase)
assert torch.allclose(_lowercase , _lowercase , atol=1E-5)
def __a ( self :List[str]) -> int:
UpperCAmelCase_ = ModelForTest()
UpperCAmelCase_ = torch.randn(2 , 3)
UpperCAmelCase_ = test_model(_lowercase)
UpperCAmelCase_ = PostForwardHook()
add_hook_to_module(_lowercase , _lowercase)
UpperCAmelCase_ = test_model(_lowercase)
self.assertTrue(torch.allclose(_lowercase , output + 1 , atol=1E-5))
# Attaching a hook to a model when it already has one replaces, does not chain
UpperCAmelCase_ = PostForwardHook()
add_hook_to_module(_lowercase , _lowercase)
UpperCAmelCase_ = test_model(_lowercase)
self.assertTrue(torch.allclose(_lowercase , output + 1 , atol=1E-5))
# You need to use the sequential hook to chain two or more hooks
UpperCAmelCase_ = SequentialHook(PostForwardHook() , PostForwardHook())
add_hook_to_module(_lowercase , _lowercase)
UpperCAmelCase_ = test_model(_lowercase)
assert torch.allclose(_lowercase , output + 2 , atol=1E-5)
def __a ( self :str) -> List[Any]:
UpperCAmelCase_ = ModelForTest()
UpperCAmelCase_ = torch.randn(2 , 3)
UpperCAmelCase_ = test_model(_lowercase)
UpperCAmelCase_ = PostForwardHook()
add_hook_to_module(_lowercase , _lowercase)
UpperCAmelCase_ = test_model(_lowercase)
self.assertTrue(torch.allclose(_lowercase , output + 1))
self.assertTrue(outputa.requires_grad)
UpperCAmelCase_ = True
UpperCAmelCase_ = test_model(_lowercase)
self.assertFalse(outputa.requires_grad)
@require_multi_gpu
def __a ( self :Tuple) -> Optional[int]:
UpperCAmelCase_ = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu'''))
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
# This will move each submodule on different devices
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=0))
add_hook_to_module(model.batchnorm , AlignDevicesHook(execution_device=0))
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=1))
self.assertEqual(model.lineara.weight.device , torch.device(0))
self.assertEqual(model.batchnorm.weight.device , torch.device(0))
self.assertEqual(model.batchnorm.running_mean.device , torch.device(0))
self.assertEqual(model.lineara.weight.device , torch.device(1))
# We can still make a forward pass. The input does not need to be on any particular device
UpperCAmelCase_ = torch.randn(2 , 3)
UpperCAmelCase_ = model(_lowercase)
self.assertEqual(output.device , torch.device(1))
# We can add a general hook to put back output on same device as input.
add_hook_to_module(_lowercase , AlignDevicesHook(io_same_device=_lowercase))
UpperCAmelCase_ = torch.randn(2 , 3).to(0)
UpperCAmelCase_ = model(_lowercase)
self.assertEqual(output.device , torch.device(0))
def __a ( self :str) -> List[Any]:
UpperCAmelCase_ = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu'''))
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
# This will move each submodule on different devices
UpperCAmelCase_ = {'''execution_device''': 0 if torch.cuda.is_available() else '''cpu''', '''offload''': True}
add_hook_to_module(model.lineara , AlignDevicesHook(**_lowercase))
add_hook_to_module(model.batchnorm , AlignDevicesHook(**_lowercase))
add_hook_to_module(model.lineara , AlignDevicesHook(**_lowercase))
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device('''meta'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta'''))
self.assertEqual(model.lineara.weight.device , torch.device('''meta'''))
# Buffers are not included in the offload by default, so are on the execution device
UpperCAmelCase_ = torch.device(hook_kwargs['''execution_device'''])
self.assertEqual(model.batchnorm.running_mean.device , _lowercase)
UpperCAmelCase_ = torch.randn(2 , 3)
UpperCAmelCase_ = model(_lowercase)
self.assertEqual(output.device , _lowercase)
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara)
remove_hook_from_module(model.batchnorm)
remove_hook_from_module(model.lineara)
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu'''))
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
# Now test with buffers included in the offload
UpperCAmelCase_ = {
'''execution_device''': 0 if torch.cuda.is_available() else '''cpu''',
'''offload''': True,
'''offload_buffers''': True,
}
add_hook_to_module(model.lineara , AlignDevicesHook(**_lowercase))
add_hook_to_module(model.batchnorm , AlignDevicesHook(**_lowercase))
add_hook_to_module(model.lineara , AlignDevicesHook(**_lowercase))
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device('''meta'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta'''))
self.assertEqual(model.lineara.weight.device , torch.device('''meta'''))
self.assertEqual(model.batchnorm.running_mean.device , torch.device('''meta'''))
UpperCAmelCase_ = torch.randn(2 , 3)
UpperCAmelCase_ = model(_lowercase)
self.assertEqual(output.device , _lowercase)
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara)
remove_hook_from_module(model.batchnorm)
remove_hook_from_module(model.lineara)
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu'''))
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
def __a ( self :List[Any]) -> str:
UpperCAmelCase_ = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu'''))
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
# This will move each submodule on different devices
UpperCAmelCase_ = 0 if torch.cuda.is_available() else '''cpu'''
attach_align_device_hook(_lowercase , execution_device=_lowercase , offload=_lowercase)
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device('''meta'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta'''))
self.assertEqual(model.lineara.weight.device , torch.device('''meta'''))
# Buffers are not included in the offload by default, so are on the execution device
UpperCAmelCase_ = torch.device(_lowercase)
self.assertEqual(model.batchnorm.running_mean.device , _lowercase)
UpperCAmelCase_ = torch.randn(2 , 3)
UpperCAmelCase_ = model(_lowercase)
self.assertEqual(output.device , _lowercase)
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(_lowercase)
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu'''))
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
# Now test with buffers included in the offload
attach_align_device_hook(_lowercase , execution_device=_lowercase , offload=_lowercase , offload_buffers=_lowercase)
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device('''meta'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta'''))
self.assertEqual(model.lineara.weight.device , torch.device('''meta'''))
self.assertEqual(model.batchnorm.running_mean.device , torch.device('''meta'''))
UpperCAmelCase_ = torch.randn(2 , 3)
UpperCAmelCase_ = model(_lowercase)
self.assertEqual(output.device , _lowercase)
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(_lowercase)
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu'''))
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
def __a ( self :Optional[Any]) -> int:
UpperCAmelCase_ = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu'''))
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
# This will move each submodule on different devices
UpperCAmelCase_ = 0 if torch.cuda.is_available() else '''cpu'''
attach_align_device_hook(
_lowercase , execution_device=_lowercase , offload=_lowercase , weights_map=model.state_dict())
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device('''meta'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta'''))
self.assertEqual(model.lineara.weight.device , torch.device('''meta'''))
# Buffers are not included in the offload by default, so are on the execution device
UpperCAmelCase_ = torch.device(_lowercase)
self.assertEqual(model.batchnorm.running_mean.device , _lowercase)
UpperCAmelCase_ = torch.randn(2 , 3)
UpperCAmelCase_ = model(_lowercase)
self.assertEqual(output.device , _lowercase)
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(_lowercase)
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu'''))
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
# Now test with buffers included in the offload
attach_align_device_hook(
_lowercase , execution_device=_lowercase , offload=_lowercase , weights_map=model.state_dict() , offload_buffers=_lowercase , )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device('''meta'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta'''))
self.assertEqual(model.lineara.weight.device , torch.device('''meta'''))
self.assertEqual(model.batchnorm.running_mean.device , torch.device('''meta'''))
UpperCAmelCase_ = torch.randn(2 , 3)
UpperCAmelCase_ = model(_lowercase)
self.assertEqual(output.device , _lowercase)
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(_lowercase)
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu'''))
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
| 344 | 1 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class a_ ( _snake_case , unittest.TestCase ):
UpperCamelCase__ : Dict =KandinskyVaaControlnetImgaImgPipeline
UpperCamelCase__ : Union[str, Any] =["image_embeds", "negative_image_embeds", "image", "hint"]
UpperCamelCase__ : Optional[Any] =["image_embeds", "negative_image_embeds", "image", "hint"]
UpperCamelCase__ : Optional[Any] =[
"generator",
"height",
"width",
"strength",
"guidance_scale",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
UpperCamelCase__ : Dict =False
@property
def __a ( self :Union[str, Any]) -> Optional[int]:
return 32
@property
def __a ( self :Tuple) -> List[str]:
return 32
@property
def __a ( self :Tuple) -> Tuple:
return self.time_input_dim
@property
def __a ( self :List[str]) -> Union[str, Any]:
return self.time_input_dim * 4
@property
def __a ( self :Optional[int]) -> Tuple:
return 100
@property
def __a ( self :Tuple) -> Any:
torch.manual_seed(0)
UpperCAmelCase_ = {
'''in_channels''': 8,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''image_hint''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
UpperCAmelCase_ = UNetaDConditionModel(**_lowercase)
return model
@property
def __a ( self :Union[str, Any]) -> Tuple:
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def __a ( self :int) -> List[Any]:
torch.manual_seed(0)
UpperCAmelCase_ = VQModel(**self.dummy_movq_kwargs)
return model
def __a ( self :int) -> int:
UpperCAmelCase_ = self.dummy_unet
UpperCAmelCase_ = self.dummy_movq
UpperCAmelCase_ = {
'''num_train_timesteps''': 1000,
'''beta_schedule''': '''linear''',
'''beta_start''': 0.00_085,
'''beta_end''': 0.012,
'''clip_sample''': False,
'''set_alpha_to_one''': False,
'''steps_offset''': 0,
'''prediction_type''': '''epsilon''',
'''thresholding''': False,
}
UpperCAmelCase_ = DDIMScheduler(**_lowercase)
UpperCAmelCase_ = {
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def __a ( self :Any , _lowercase :Dict , _lowercase :Dict=0) -> Optional[Any]:
UpperCAmelCase_ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(_lowercase)).to(_lowercase)
UpperCAmelCase_ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1)).to(
_lowercase)
# create init_image
UpperCAmelCase_ = floats_tensor((1, 3, 64, 64) , rng=random.Random(_lowercase)).to(_lowercase)
UpperCAmelCase_ = image.cpu().permute(0 , 2 , 3 , 1)[0]
UpperCAmelCase_ = Image.fromarray(np.uinta(_lowercase)).convert('''RGB''').resize((256, 256))
# create hint
UpperCAmelCase_ = floats_tensor((1, 3, 64, 64) , rng=random.Random(_lowercase)).to(_lowercase)
if str(_lowercase).startswith('''mps'''):
UpperCAmelCase_ = torch.manual_seed(_lowercase)
else:
UpperCAmelCase_ = torch.Generator(device=_lowercase).manual_seed(_lowercase)
UpperCAmelCase_ = {
'''image''': init_image,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''hint''': hint,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''num_inference_steps''': 10,
'''guidance_scale''': 7.0,
'''strength''': 0.2,
'''output_type''': '''np''',
}
return inputs
def __a ( self :Optional[int]) -> Tuple:
UpperCAmelCase_ = '''cpu'''
UpperCAmelCase_ = self.get_dummy_components()
UpperCAmelCase_ = self.pipeline_class(**_lowercase)
UpperCAmelCase_ = pipe.to(_lowercase)
pipe.set_progress_bar_config(disable=_lowercase)
UpperCAmelCase_ = pipe(**self.get_dummy_inputs(_lowercase))
UpperCAmelCase_ = output.images
UpperCAmelCase_ = pipe(
**self.get_dummy_inputs(_lowercase) , return_dict=_lowercase , )[0]
UpperCAmelCase_ = image[0, -3:, -3:, -1]
UpperCAmelCase_ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase_ = np.array(
[0.54_985_034, 0.55_509_365, 0.52_561_504, 0.5_570_494, 0.5_593_818, 0.5_263_979, 0.50_285_643, 0.5_069_846, 0.51_196_736])
assert (
np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
), f" expected_slice {expected_slice}, but got {image_slice.flatten()}"
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1E-2
), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
@slow
@require_torch_gpu
class a_ ( unittest.TestCase ):
def __a ( self :str) -> Any:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __a ( self :Tuple) -> Tuple:
UpperCAmelCase_ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/kandinskyv22_controlnet_img2img_robotcat_fp16.npy''')
UpperCAmelCase_ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''')
UpperCAmelCase_ = init_image.resize((512, 512))
UpperCAmelCase_ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/hint_image_cat.png''')
UpperCAmelCase_ = torch.from_numpy(np.array(_lowercase)).float() / 255.0
UpperCAmelCase_ = hint.permute(2 , 0 , 1).unsqueeze(0)
UpperCAmelCase_ = '''A robot, 4k photo'''
UpperCAmelCase_ = KandinskyVaaPriorEmbaEmbPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-prior''' , torch_dtype=torch.floataa)
pipe_prior.to(_lowercase)
UpperCAmelCase_ = KandinskyVaaControlnetImgaImgPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-controlnet-depth''' , torch_dtype=torch.floataa)
UpperCAmelCase_ = pipeline.to(_lowercase)
pipeline.set_progress_bar_config(disable=_lowercase)
UpperCAmelCase_ = torch.Generator(device='''cpu''').manual_seed(0)
UpperCAmelCase_ , UpperCAmelCase_ = pipe_prior(
_lowercase , image=_lowercase , strength=0.85 , generator=_lowercase , negative_prompt='''''' , ).to_tuple()
UpperCAmelCase_ = pipeline(
image=_lowercase , image_embeds=_lowercase , negative_image_embeds=_lowercase , hint=_lowercase , generator=_lowercase , num_inference_steps=100 , height=512 , width=512 , strength=0.5 , output_type='''np''' , )
UpperCAmelCase_ = output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(_lowercase , _lowercase)
| 344 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import (
AutoProcessor,
BertTokenizerFast,
BlipImageProcessor,
GPTaTokenizer,
InstructBlipProcessor,
PreTrainedTokenizerFast,
)
@require_vision
class a_ ( unittest.TestCase ):
def __a ( self :Optional[Any]) -> int:
UpperCAmelCase_ = tempfile.mkdtemp()
UpperCAmelCase_ = BlipImageProcessor()
UpperCAmelCase_ = GPTaTokenizer.from_pretrained('''hf-internal-testing/tiny-random-GPT2Model''')
UpperCAmelCase_ = BertTokenizerFast.from_pretrained('''hf-internal-testing/tiny-random-bert''')
UpperCAmelCase_ = InstructBlipProcessor(_lowercase , _lowercase , _lowercase)
processor.save_pretrained(self.tmpdirname)
def __a ( self :List[Any] , **_lowercase :Dict) -> str:
return AutoProcessor.from_pretrained(self.tmpdirname , **_lowercase).tokenizer
def __a ( self :Optional[Any] , **_lowercase :Optional[Any]) -> Optional[int]:
return AutoProcessor.from_pretrained(self.tmpdirname , **_lowercase).image_processor
def __a ( self :Dict , **_lowercase :Tuple) -> str:
return AutoProcessor.from_pretrained(self.tmpdirname , **_lowercase).qformer_tokenizer
def __a ( self :Optional[int]) -> str:
shutil.rmtree(self.tmpdirname)
def __a ( self :Any) -> List[str]:
UpperCAmelCase_ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta)]
UpperCAmelCase_ = [Image.fromarray(np.moveaxis(_lowercase , 0 , -1)) for x in image_inputs]
return image_inputs
def __a ( self :Tuple) -> int:
UpperCAmelCase_ = InstructBlipProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() , qformer_tokenizer=self.get_qformer_tokenizer() , )
processor.save_pretrained(self.tmpdirname)
UpperCAmelCase_ = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''')
UpperCAmelCase_ = self.get_image_processor(do_normalize=_lowercase , padding_value=1.0)
UpperCAmelCase_ = InstructBlipProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=_lowercase , padding_value=1.0)
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.tokenizer , _lowercase)
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor , _lowercase)
self.assertIsInstance(processor.qformer_tokenizer , _lowercase)
def __a ( self :Dict) -> Any:
UpperCAmelCase_ = self.get_image_processor()
UpperCAmelCase_ = self.get_tokenizer()
UpperCAmelCase_ = self.get_qformer_tokenizer()
UpperCAmelCase_ = InstructBlipProcessor(
tokenizer=_lowercase , image_processor=_lowercase , qformer_tokenizer=_lowercase)
UpperCAmelCase_ = self.prepare_image_inputs()
UpperCAmelCase_ = image_processor(_lowercase , return_tensors='''np''')
UpperCAmelCase_ = processor(images=_lowercase , return_tensors='''np''')
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2)
def __a ( self :Union[str, Any]) -> Dict:
UpperCAmelCase_ = self.get_image_processor()
UpperCAmelCase_ = self.get_tokenizer()
UpperCAmelCase_ = self.get_qformer_tokenizer()
UpperCAmelCase_ = InstructBlipProcessor(
tokenizer=_lowercase , image_processor=_lowercase , qformer_tokenizer=_lowercase)
UpperCAmelCase_ = '''lower newer'''
UpperCAmelCase_ = processor(text=_lowercase)
UpperCAmelCase_ = tokenizer(_lowercase , return_token_type_ids=_lowercase)
UpperCAmelCase_ = qformer_tokenizer(_lowercase , return_token_type_ids=_lowercase)
for key in encoded_tokens.keys():
self.assertListEqual(encoded_tokens[key] , encoded_processor[key])
for key in encoded_tokens_qformer.keys():
self.assertListEqual(encoded_tokens_qformer[key] , encoded_processor['''qformer_''' + key])
def __a ( self :Dict) -> Optional[Any]:
UpperCAmelCase_ = self.get_image_processor()
UpperCAmelCase_ = self.get_tokenizer()
UpperCAmelCase_ = self.get_qformer_tokenizer()
UpperCAmelCase_ = InstructBlipProcessor(
tokenizer=_lowercase , image_processor=_lowercase , qformer_tokenizer=_lowercase)
UpperCAmelCase_ = '''lower newer'''
UpperCAmelCase_ = self.prepare_image_inputs()
UpperCAmelCase_ = processor(text=_lowercase , images=_lowercase)
self.assertListEqual(
list(inputs.keys()) , ['''input_ids''', '''attention_mask''', '''qformer_input_ids''', '''qformer_attention_mask''', '''pixel_values'''] , )
# test if it raises when no input is passed
with pytest.raises(_lowercase):
processor()
def __a ( self :Optional[int]) -> Optional[Any]:
UpperCAmelCase_ = self.get_image_processor()
UpperCAmelCase_ = self.get_tokenizer()
UpperCAmelCase_ = self.get_qformer_tokenizer()
UpperCAmelCase_ = InstructBlipProcessor(
tokenizer=_lowercase , image_processor=_lowercase , qformer_tokenizer=_lowercase)
UpperCAmelCase_ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
UpperCAmelCase_ = processor.batch_decode(_lowercase)
UpperCAmelCase_ = tokenizer.batch_decode(_lowercase)
self.assertListEqual(_lowercase , _lowercase)
def __a ( self :str) -> int:
UpperCAmelCase_ = self.get_image_processor()
UpperCAmelCase_ = self.get_tokenizer()
UpperCAmelCase_ = self.get_qformer_tokenizer()
UpperCAmelCase_ = InstructBlipProcessor(
tokenizer=_lowercase , image_processor=_lowercase , qformer_tokenizer=_lowercase)
UpperCAmelCase_ = '''lower newer'''
UpperCAmelCase_ = self.prepare_image_inputs()
UpperCAmelCase_ = processor(text=_lowercase , images=_lowercase)
self.assertListEqual(
list(inputs.keys()) , ['''input_ids''', '''attention_mask''', '''qformer_input_ids''', '''qformer_attention_mask''', '''pixel_values'''] , )
| 344 | 1 |
import itertools
import string
from collections.abc import Generator, Iterable
def A ( __UpperCAmelCase , __UpperCAmelCase ) -> Generator[tuple[str, ...], None, None]:
'''simple docstring'''
UpperCAmelCase_ = iter(__UpperCAmelCase )
while True:
UpperCAmelCase_ = tuple(itertools.islice(__UpperCAmelCase , __UpperCAmelCase ) )
if not chunk:
return
yield chunk
def A ( __UpperCAmelCase ) -> str:
'''simple docstring'''
UpperCAmelCase_ = ''''''.join([c.upper() for c in dirty if c in string.ascii_letters] )
UpperCAmelCase_ = ''''''
if len(__UpperCAmelCase ) < 2:
return dirty
for i in range(len(__UpperCAmelCase ) - 1 ):
clean += dirty[i]
if dirty[i] == dirty[i + 1]:
clean += "X"
clean += dirty[-1]
if len(__UpperCAmelCase ) & 1:
clean += "X"
return clean
def A ( __UpperCAmelCase ) -> list[str]:
'''simple docstring'''
UpperCAmelCase_ = '''ABCDEFGHIKLMNOPQRSTUVWXYZ'''
# we're using a list instead of a '2d' array because it makes the math
# for setting up the table and doing the actual encoding/decoding simpler
UpperCAmelCase_ = []
# copy key chars into the table if they are in `alphabet` ignoring duplicates
for char in key.upper():
if char not in table and char in alphabet:
table.append(__UpperCAmelCase )
# fill the rest of the table in with the remaining alphabet chars
for char in alphabet:
if char not in table:
table.append(__UpperCAmelCase )
return table
def A ( __UpperCAmelCase , __UpperCAmelCase ) -> str:
'''simple docstring'''
UpperCAmelCase_ = generate_table(__UpperCAmelCase )
UpperCAmelCase_ = prepare_input(__UpperCAmelCase )
UpperCAmelCase_ = ''''''
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(__UpperCAmelCase , 2 ):
UpperCAmelCase_ , UpperCAmelCase_ = divmod(table.index(__UpperCAmelCase ) , 5 )
UpperCAmelCase_ , UpperCAmelCase_ = divmod(table.index(__UpperCAmelCase ) , 5 )
if rowa == rowa:
ciphertext += table[rowa * 5 + (cola + 1) % 5]
ciphertext += table[rowa * 5 + (cola + 1) % 5]
elif cola == cola:
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
else: # rectangle
ciphertext += table[rowa * 5 + cola]
ciphertext += table[rowa * 5 + cola]
return ciphertext
def A ( __UpperCAmelCase , __UpperCAmelCase ) -> str:
'''simple docstring'''
UpperCAmelCase_ = generate_table(__UpperCAmelCase )
UpperCAmelCase_ = ''''''
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(__UpperCAmelCase , 2 ):
UpperCAmelCase_ , UpperCAmelCase_ = divmod(table.index(__UpperCAmelCase ) , 5 )
UpperCAmelCase_ , UpperCAmelCase_ = divmod(table.index(__UpperCAmelCase ) , 5 )
if rowa == rowa:
plaintext += table[rowa * 5 + (cola - 1) % 5]
plaintext += table[rowa * 5 + (cola - 1) % 5]
elif cola == cola:
plaintext += table[((rowa - 1) % 5) * 5 + cola]
plaintext += table[((rowa - 1) % 5) * 5 + cola]
else: # rectangle
plaintext += table[rowa * 5 + cola]
plaintext += table[rowa * 5 + cola]
return plaintext
| 344 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
"facebook/levit-128S": "https://huggingface.co/facebook/levit-128S/resolve/main/config.json",
# See all LeViT models at https://huggingface.co/models?filter=levit
}
class a_ ( _snake_case ):
UpperCamelCase__ : Optional[int] ="levit"
def __init__( self :List[str] , _lowercase :List[Any]=224 , _lowercase :str=3 , _lowercase :Optional[int]=3 , _lowercase :str=2 , _lowercase :List[Any]=1 , _lowercase :str=16 , _lowercase :Dict=[128, 256, 384] , _lowercase :Union[str, Any]=[4, 8, 12] , _lowercase :Tuple=[4, 4, 4] , _lowercase :Dict=[16, 16, 16] , _lowercase :Any=0 , _lowercase :Dict=[2, 2, 2] , _lowercase :Any=[2, 2, 2] , _lowercase :Tuple=0.02 , **_lowercase :Union[str, Any] , ) -> Optional[Any]:
super().__init__(**_lowercase)
UpperCAmelCase_ = image_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = kernel_size
UpperCAmelCase_ = stride
UpperCAmelCase_ = padding
UpperCAmelCase_ = hidden_sizes
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = depths
UpperCAmelCase_ = key_dim
UpperCAmelCase_ = drop_path_rate
UpperCAmelCase_ = patch_size
UpperCAmelCase_ = attention_ratio
UpperCAmelCase_ = mlp_ratio
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = [
['''Subsample''', key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
['''Subsample''', key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
class a_ ( _snake_case ):
UpperCamelCase__ : Union[str, Any] =version.parse("1.11" )
@property
def __a ( self :Any) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
])
@property
def __a ( self :List[Any]) -> float:
return 1E-4
| 344 | 1 |
import tempfile
import torch
from diffusers import PNDMScheduler
from .test_schedulers import SchedulerCommonTest
class a_ ( _snake_case ):
UpperCamelCase__ : List[Any] =(PNDMScheduler,)
UpperCamelCase__ : Optional[Any] =(("num_inference_steps", 50),)
def __a ( self :Union[str, Any] , **_lowercase :Any) -> Union[str, Any]:
UpperCAmelCase_ = {
'''num_train_timesteps''': 1000,
'''beta_start''': 0.0_001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
}
config.update(**_lowercase)
return config
def __a ( self :str , _lowercase :List[Any]=0 , **_lowercase :str) -> Union[str, Any]:
UpperCAmelCase_ = dict(self.forward_default_kwargs)
UpperCAmelCase_ = kwargs.pop('''num_inference_steps''' , _lowercase)
UpperCAmelCase_ = self.dummy_sample
UpperCAmelCase_ = 0.1 * sample
UpperCAmelCase_ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
UpperCAmelCase_ = self.get_scheduler_config(**_lowercase)
UpperCAmelCase_ = scheduler_class(**_lowercase)
scheduler.set_timesteps(_lowercase)
# copy over dummy past residuals
UpperCAmelCase_ = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_lowercase)
UpperCAmelCase_ = scheduler_class.from_pretrained(_lowercase)
new_scheduler.set_timesteps(_lowercase)
# copy over dummy past residuals
UpperCAmelCase_ = dummy_past_residuals[:]
UpperCAmelCase_ = scheduler.step_prk(_lowercase , _lowercase , _lowercase , **_lowercase).prev_sample
UpperCAmelCase_ = new_scheduler.step_prk(_lowercase , _lowercase , _lowercase , **_lowercase).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical"
UpperCAmelCase_ = scheduler.step_plms(_lowercase , _lowercase , _lowercase , **_lowercase).prev_sample
UpperCAmelCase_ = new_scheduler.step_plms(_lowercase , _lowercase , _lowercase , **_lowercase).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical"
def __a ( self :Any) -> Optional[Any]:
pass
def __a ( self :str , _lowercase :int=0 , **_lowercase :Union[str, Any]) -> List[Any]:
UpperCAmelCase_ = dict(self.forward_default_kwargs)
UpperCAmelCase_ = kwargs.pop('''num_inference_steps''' , _lowercase)
UpperCAmelCase_ = self.dummy_sample
UpperCAmelCase_ = 0.1 * sample
UpperCAmelCase_ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
UpperCAmelCase_ = self.get_scheduler_config()
UpperCAmelCase_ = scheduler_class(**_lowercase)
scheduler.set_timesteps(_lowercase)
# copy over dummy past residuals (must be after setting timesteps)
UpperCAmelCase_ = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_lowercase)
UpperCAmelCase_ = scheduler_class.from_pretrained(_lowercase)
# copy over dummy past residuals
new_scheduler.set_timesteps(_lowercase)
# copy over dummy past residual (must be after setting timesteps)
UpperCAmelCase_ = dummy_past_residuals[:]
UpperCAmelCase_ = scheduler.step_prk(_lowercase , _lowercase , _lowercase , **_lowercase).prev_sample
UpperCAmelCase_ = new_scheduler.step_prk(_lowercase , _lowercase , _lowercase , **_lowercase).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical"
UpperCAmelCase_ = scheduler.step_plms(_lowercase , _lowercase , _lowercase , **_lowercase).prev_sample
UpperCAmelCase_ = new_scheduler.step_plms(_lowercase , _lowercase , _lowercase , **_lowercase).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical"
def __a ( self :int , **_lowercase :str) -> Optional[Any]:
UpperCAmelCase_ = self.scheduler_classes[0]
UpperCAmelCase_ = self.get_scheduler_config(**_lowercase)
UpperCAmelCase_ = scheduler_class(**_lowercase)
UpperCAmelCase_ = 10
UpperCAmelCase_ = self.dummy_model()
UpperCAmelCase_ = self.dummy_sample_deter
scheduler.set_timesteps(_lowercase)
for i, t in enumerate(scheduler.prk_timesteps):
UpperCAmelCase_ = model(_lowercase , _lowercase)
UpperCAmelCase_ = scheduler.step_prk(_lowercase , _lowercase , _lowercase).prev_sample
for i, t in enumerate(scheduler.plms_timesteps):
UpperCAmelCase_ = model(_lowercase , _lowercase)
UpperCAmelCase_ = scheduler.step_plms(_lowercase , _lowercase , _lowercase).prev_sample
return sample
def __a ( self :Union[str, Any]) -> int:
UpperCAmelCase_ = dict(self.forward_default_kwargs)
UpperCAmelCase_ = kwargs.pop('''num_inference_steps''' , _lowercase)
for scheduler_class in self.scheduler_classes:
UpperCAmelCase_ = self.get_scheduler_config()
UpperCAmelCase_ = scheduler_class(**_lowercase)
UpperCAmelCase_ = self.dummy_sample
UpperCAmelCase_ = 0.1 * sample
if num_inference_steps is not None and hasattr(_lowercase , '''set_timesteps'''):
scheduler.set_timesteps(_lowercase)
elif num_inference_steps is not None and not hasattr(_lowercase , '''set_timesteps'''):
UpperCAmelCase_ = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
UpperCAmelCase_ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
UpperCAmelCase_ = dummy_past_residuals[:]
UpperCAmelCase_ = scheduler.step_prk(_lowercase , 0 , _lowercase , **_lowercase).prev_sample
UpperCAmelCase_ = scheduler.step_prk(_lowercase , 1 , _lowercase , **_lowercase).prev_sample
self.assertEqual(output_a.shape , sample.shape)
self.assertEqual(output_a.shape , output_a.shape)
UpperCAmelCase_ = scheduler.step_plms(_lowercase , 0 , _lowercase , **_lowercase).prev_sample
UpperCAmelCase_ = scheduler.step_plms(_lowercase , 1 , _lowercase , **_lowercase).prev_sample
self.assertEqual(output_a.shape , sample.shape)
self.assertEqual(output_a.shape , output_a.shape)
def __a ( self :Any) -> Dict:
for timesteps in [100, 1000]:
self.check_over_configs(num_train_timesteps=_lowercase)
def __a ( self :List[Any]) -> Any:
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=_lowercase)
UpperCAmelCase_ = self.scheduler_classes[0]
UpperCAmelCase_ = self.get_scheduler_config(steps_offset=1)
UpperCAmelCase_ = scheduler_class(**_lowercase)
scheduler.set_timesteps(10)
assert torch.equal(
scheduler.timesteps , torch.LongTensor(
[901, 851, 851, 801, 801, 751, 751, 701, 701, 651, 651, 601, 601, 501, 401, 301, 201, 101, 1]) , )
def __a ( self :Optional[int]) -> str:
for beta_start, beta_end in zip([0.0_001, 0.001] , [0.002, 0.02]):
self.check_over_configs(beta_start=_lowercase , beta_end=_lowercase)
def __a ( self :Any) -> List[str]:
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=_lowercase)
def __a ( self :List[Any]) -> Dict:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_lowercase)
def __a ( self :Any) -> Tuple:
for t in [1, 5, 10]:
self.check_over_forward(time_step=_lowercase)
def __a ( self :Tuple) -> Dict:
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100]):
self.check_over_forward(num_inference_steps=_lowercase)
def __a ( self :str) -> List[Any]:
# earlier version of set_timesteps() caused an error indexing alpha's with inference steps as power of 3
UpperCAmelCase_ = 27
for scheduler_class in self.scheduler_classes:
UpperCAmelCase_ = self.dummy_sample
UpperCAmelCase_ = 0.1 * sample
UpperCAmelCase_ = self.get_scheduler_config()
UpperCAmelCase_ = scheduler_class(**_lowercase)
scheduler.set_timesteps(_lowercase)
# before power of 3 fix, would error on first step, so we only need to do two
for i, t in enumerate(scheduler.prk_timesteps[:2]):
UpperCAmelCase_ = scheduler.step_prk(_lowercase , _lowercase , _lowercase).prev_sample
def __a ( self :List[str]) -> int:
with self.assertRaises(_lowercase):
UpperCAmelCase_ = self.scheduler_classes[0]
UpperCAmelCase_ = self.get_scheduler_config()
UpperCAmelCase_ = scheduler_class(**_lowercase)
scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample).prev_sample
def __a ( self :List[str]) -> Dict:
UpperCAmelCase_ = self.full_loop()
UpperCAmelCase_ = torch.sum(torch.abs(_lowercase))
UpperCAmelCase_ = torch.mean(torch.abs(_lowercase))
assert abs(result_sum.item() - 198.1_318) < 1E-2
assert abs(result_mean.item() - 0.2_580) < 1E-3
def __a ( self :Any) -> Tuple:
UpperCAmelCase_ = self.full_loop(prediction_type='''v_prediction''')
UpperCAmelCase_ = torch.sum(torch.abs(_lowercase))
UpperCAmelCase_ = torch.mean(torch.abs(_lowercase))
assert abs(result_sum.item() - 67.3_986) < 1E-2
assert abs(result_mean.item() - 0.0_878) < 1E-3
def __a ( self :int) -> Any:
# We specify different beta, so that the first alpha is 0.99
UpperCAmelCase_ = self.full_loop(set_alpha_to_one=_lowercase , beta_start=0.01)
UpperCAmelCase_ = torch.sum(torch.abs(_lowercase))
UpperCAmelCase_ = torch.mean(torch.abs(_lowercase))
assert abs(result_sum.item() - 230.0_399) < 1E-2
assert abs(result_mean.item() - 0.2_995) < 1E-3
def __a ( self :Any) -> Dict:
# We specify different beta, so that the first alpha is 0.99
UpperCAmelCase_ = self.full_loop(set_alpha_to_one=_lowercase , beta_start=0.01)
UpperCAmelCase_ = torch.sum(torch.abs(_lowercase))
UpperCAmelCase_ = torch.mean(torch.abs(_lowercase))
assert abs(result_sum.item() - 186.9_482) < 1E-2
assert abs(result_mean.item() - 0.2_434) < 1E-3
| 344 |
import os
import sys
import tempfile
import torch
from .state import AcceleratorState
from .utils import PrecisionType, PrepareForLaunch, is_mps_available, patch_environment
def A ( __UpperCAmelCase , __UpperCAmelCase=() , __UpperCAmelCase=None , __UpperCAmelCase="no" , __UpperCAmelCase="29500" ) -> int:
'''simple docstring'''
UpperCAmelCase_ = False
UpperCAmelCase_ = False
if any(key.startswith('''KAGGLE''' ) for key in os.environ.keys() ):
UpperCAmelCase_ = True
elif "IPython" in sys.modules:
UpperCAmelCase_ = '''google.colab''' in str(sys.modules['''IPython'''].get_ipython() )
try:
UpperCAmelCase_ = PrecisionType(mixed_precision.lower() )
except ValueError:
raise ValueError(
f"Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}." )
if (in_colab or in_kaggle) and (os.environ.get('''TPU_NAME''' , __UpperCAmelCase ) is not None):
# TPU launch
import torch_xla.distributed.xla_multiprocessing as xmp
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
'''To train on TPU in Colab or Kaggle Kernel, the `Accelerator` should only be initialized inside '''
'''your training function. Restart your notebook and make sure no cells initializes an '''
'''`Accelerator`.''' )
if num_processes is None:
UpperCAmelCase_ = 8
UpperCAmelCase_ = PrepareForLaunch(__UpperCAmelCase , distributed_type='''TPU''' )
print(f"Launching a training on {num_processes} TPU cores." )
xmp.spawn(__UpperCAmelCase , args=__UpperCAmelCase , nprocs=__UpperCAmelCase , start_method='''fork''' )
elif in_colab:
# No need for a distributed launch otherwise as it's either CPU or one GPU.
if torch.cuda.is_available():
print('''Launching training on one GPU.''' )
else:
print('''Launching training on one CPU.''' )
function(*__UpperCAmelCase )
else:
if num_processes is None:
raise ValueError(
'''You have to specify the number of GPUs you would like to use, add `num_processes=...` to your call.''' )
if num_processes > 1:
# Multi-GPU launch
from torch.multiprocessing import start_processes
from torch.multiprocessing.spawn import ProcessRaisedException
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
'''To launch a multi-GPU training from your notebook, the `Accelerator` should only be initialized '''
'''inside your training function. Restart your notebook and make sure no cells initializes an '''
'''`Accelerator`.''' )
if torch.cuda.is_initialized():
raise ValueError(
'''To launch a multi-GPU training from your notebook, you need to avoid running any instruction '''
'''using `torch.cuda` in any cell. Restart your notebook and make sure no cells use any CUDA '''
'''function.''' )
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=__UpperCAmelCase , master_addr='''127.0.01''' , master_port=__UpperCAmelCase , mixed_precision=__UpperCAmelCase ):
UpperCAmelCase_ = PrepareForLaunch(__UpperCAmelCase , distributed_type='''MULTI_GPU''' )
print(f"Launching training on {num_processes} GPUs." )
try:
start_processes(__UpperCAmelCase , args=__UpperCAmelCase , nprocs=__UpperCAmelCase , start_method='''fork''' )
except ProcessRaisedException as e:
if "Cannot re-initialize CUDA in forked subprocess" in e.args[0]:
raise RuntimeError(
'''CUDA has been initialized before the `notebook_launcher` could create a forked subprocess. '''
'''This likely stems from an outside import causing issues once the `notebook_launcher()` is called. '''
'''Please review your imports and test them when running the `notebook_launcher()` to identify '''
'''which one is problematic.''' ) from e
else:
# No need for a distributed launch otherwise as it's either CPU, GPU or MPS.
if is_mps_available():
UpperCAmelCase_ = '''1'''
print('''Launching training on MPS.''' )
elif torch.cuda.is_available():
print('''Launching training on one GPU.''' )
else:
print('''Launching training on CPU.''' )
function(*__UpperCAmelCase )
def A ( __UpperCAmelCase , __UpperCAmelCase=() , __UpperCAmelCase=2 ) -> Optional[Any]:
'''simple docstring'''
from torch.multiprocessing import start_processes
with tempfile.NamedTemporaryFile() as tmp_file:
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=__UpperCAmelCase , master_addr='''127.0.01''' , master_port='''29500''' , accelerate_mixed_precision='''no''' , accelerate_debug_rdv_file=tmp_file.name , accelerate_use_cpu='''yes''' , ):
UpperCAmelCase_ = PrepareForLaunch(__UpperCAmelCase , debug=__UpperCAmelCase )
start_processes(__UpperCAmelCase , args=__UpperCAmelCase , nprocs=__UpperCAmelCase , start_method='''fork''' )
| 344 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tensorflow_text_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase_ = {
"configuration_bert": ["BERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "BertConfig", "BertOnnxConfig"],
"tokenization_bert": ["BasicTokenizer", "BertTokenizer", "WordpieceTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["BertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"BERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"BertForMaskedLM",
"BertForMultipleChoice",
"BertForNextSentencePrediction",
"BertForPreTraining",
"BertForQuestionAnswering",
"BertForSequenceClassification",
"BertForTokenClassification",
"BertLayer",
"BertLMHeadModel",
"BertModel",
"BertPreTrainedModel",
"load_tf_weights_in_bert",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFBertEmbeddings",
"TFBertForMaskedLM",
"TFBertForMultipleChoice",
"TFBertForNextSentencePrediction",
"TFBertForPreTraining",
"TFBertForQuestionAnswering",
"TFBertForSequenceClassification",
"TFBertForTokenClassification",
"TFBertLMHeadModel",
"TFBertMainLayer",
"TFBertModel",
"TFBertPreTrainedModel",
]
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["TFBertTokenizer"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"FlaxBertForCausalLM",
"FlaxBertForMaskedLM",
"FlaxBertForMultipleChoice",
"FlaxBertForNextSentencePrediction",
"FlaxBertForPreTraining",
"FlaxBertForQuestionAnswering",
"FlaxBertForSequenceClassification",
"FlaxBertForTokenClassification",
"FlaxBertModel",
"FlaxBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_bert import BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BertConfig, BertOnnxConfig
from .tokenization_bert import BasicTokenizer, BertTokenizer, WordpieceTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_fast import BertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bert import (
BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
BertForMaskedLM,
BertForMultipleChoice,
BertForNextSentencePrediction,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertForTokenClassification,
BertLayer,
BertLMHeadModel,
BertModel,
BertPreTrainedModel,
load_tf_weights_in_bert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_bert import (
TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBertEmbeddings,
TFBertForMaskedLM,
TFBertForMultipleChoice,
TFBertForNextSentencePrediction,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertForTokenClassification,
TFBertLMHeadModel,
TFBertMainLayer,
TFBertModel,
TFBertPreTrainedModel,
)
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_tf import TFBertTokenizer
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_bert import (
FlaxBertForCausalLM,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
FlaxBertPreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 344 |
import math
import os
import re
import sys
import unittest
from pathlib import Path
from typing import Tuple
from unittest.mock import patch
from parameterized import parameterized
from transformers.testing_utils import (
CaptureStderr,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
get_torch_dist_unique_port,
require_apex,
require_bitsandbytes,
require_fairscale,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
require_torch_non_multi_gpu,
slow,
)
from transformers.trainer_callback import TrainerState
from transformers.trainer_utils import set_seed
UpperCamelCase_ = os.path.abspath(os.path.dirname(__file__))
with ExtendSysPath(f"{bindir}/../../examples/pytorch/translation"):
from run_translation import main # noqa
set_seed(42)
UpperCamelCase_ = "sshleifer/student_marian_en_ro_6_1"
UpperCamelCase_ = "sshleifer/tiny-mbart"
@require_torch
class a_ ( _snake_case ):
def __a ( self :str , _lowercase :Any=False , _lowercase :Tuple=None , _lowercase :Dict=True , _lowercase :Tuple=True , _lowercase :List[Any]=True , _lowercase :List[str]=True , ) -> int:
UpperCAmelCase_ = self.run_trainer(
eval_steps=1 , max_len=12 , model_name=_lowercase , num_train_epochs=1 , distributed=_lowercase , extra_args_str=_lowercase , predict_with_generate=_lowercase , do_train=_lowercase , do_eval=_lowercase , do_predict=_lowercase , )
UpperCAmelCase_ = TrainerState.load_from_json(os.path.join(_lowercase , '''trainer_state.json''')).log_history
if not do_eval:
return
UpperCAmelCase_ = [log for log in logs if '''eval_loss''' in log.keys()]
UpperCAmelCase_ = eval_metrics[0]
if predict_with_generate:
assert "eval_bleu" in first_step_stats
UpperCAmelCase_ = eval_metrics[-1]
assert isinstance(last_step_stats['''eval_bleu'''] , _lowercase)
assert not math.isnan(float(last_step_stats['''eval_loss'''])), "eval_loss must not be `nan`"
@require_torch_non_multi_gpu
def __a ( self :Dict) -> str:
self.run_seqaseq_quick()
@require_torch_multi_gpu
def __a ( self :Any) -> int:
self.run_seqaseq_quick(distributed=_lowercase)
@require_torch_multi_gpu
def __a ( self :int) -> Any:
self.run_seqaseq_quick(distributed=_lowercase)
@unittest.skip('''Requires an update of the env running those tests''')
@require_torch_multi_gpu
@require_fairscale
def __a ( self :Tuple) -> Any:
self.run_seqaseq_quick(distributed=_lowercase , extra_args_str='''--sharded_ddp simple''')
@unittest.skip('''Requires an update of the env running those tests''')
@require_torch_multi_gpu
@require_fairscale
def __a ( self :Tuple) -> List[str]:
self.run_seqaseq_quick(distributed=_lowercase , extra_args_str='''--sharded_ddp simple --fp16''')
@unittest.skip('''Requires an update of the env running those tests''')
@require_torch_multi_gpu
@require_fairscale
def __a ( self :Union[str, Any]) -> Any:
self.run_seqaseq_quick(distributed=_lowercase , extra_args_str='''--sharded_ddp zero_dp_2''' , predict_with_generate=_lowercase)
@unittest.skip('''Requires an update of the env running those tests''')
@require_torch_multi_gpu
@require_fairscale
def __a ( self :int) -> Any:
self.run_seqaseq_quick(
distributed=_lowercase , extra_args_str='''--sharded_ddp zero_dp_2 --fp16''' , predict_with_generate=_lowercase)
@require_apex
@require_torch_gpu
def __a ( self :Tuple) -> str:
# XXX: apex breaks the trainer if it's run twice e.g. run_seq2seq.main() from the same
# program and it breaks other tests that run from the same pytest worker, therefore until this is
# sorted out it must be run only in an external program, that is distributed=True in this
# test and only under one or more gpus - if we want cpu will need to make a special test
#
# specifically to the problem traced it to self.optimizer.step() - if it's run 2nd time via
# 2nd main() call it botches the future eval.
#
self.run_seqaseq_quick(distributed=_lowercase , extra_args_str='''--fp16 --fp16_backend=apex''')
# test 2nd time - was getting eval_loss': nan'
# to reproduce the problem set distributed=False
self.run_seqaseq_quick(distributed=_lowercase , extra_args_str='''--fp16 --fp16_backend=apex''')
@parameterized.expand(['''base''', '''low''', '''high''', '''mixed'''])
@require_torch_multi_gpu
def __a ( self :str , _lowercase :Any) -> List[str]:
# as each sub-test is slow-ish split into multiple sub-tests to avoid CI timeout
UpperCAmelCase_ = {
# test with the default log_level - should be info and thus log info once
'''base''': {'''extra_args_str''': '''''', '''n_matches''': 1},
# test with low log_level and log_level_replica - should be noisy on all processes
# now the info string should appear twice on 2 processes
'''low''': {'''extra_args_str''': '''--log_level debug --log_level_replica debug''', '''n_matches''': 2},
# test with high log_level and low log_level_replica
# now the info string should appear once only on the replica
'''high''': {'''extra_args_str''': '''--log_level error --log_level_replica debug''', '''n_matches''': 1},
# test with high log_level and log_level_replica - should be quiet on all processes
'''mixed''': {'''extra_args_str''': '''--log_level error --log_level_replica error''', '''n_matches''': 0},
}
UpperCAmelCase_ = experiments[experiment_id]
UpperCAmelCase_ = {'''distributed''': True, '''predict_with_generate''': False, '''do_eval''': False, '''do_predict''': False}
UpperCAmelCase_ = '''Running training'''
with CaptureStderr() as cl:
self.run_seqaseq_quick(**_lowercase , extra_args_str=data['''extra_args_str'''])
UpperCAmelCase_ = len(re.findall(_lowercase , cl.err))
self.assertEqual(_lowercase , data['''n_matches'''])
@slow
def __a ( self :Any) -> Dict:
UpperCAmelCase_ = self.run_trainer(
eval_steps=2 , max_len=128 , model_name=_lowercase , learning_rate=3E-4 , num_train_epochs=10 , distributed=_lowercase , )
# Check metrics
UpperCAmelCase_ = TrainerState.load_from_json(os.path.join(_lowercase , '''trainer_state.json''')).log_history
UpperCAmelCase_ = [log for log in logs if '''eval_loss''' in log.keys()]
UpperCAmelCase_ = eval_metrics[0]
UpperCAmelCase_ = eval_metrics[-1]
assert first_step_stats["eval_loss"] > last_step_stats["eval_loss"], "model learned nothing"
assert isinstance(last_step_stats['''eval_bleu'''] , _lowercase)
# test if do_predict saves generations and metrics
UpperCAmelCase_ = os.listdir(_lowercase)
UpperCAmelCase_ = {os.path.basename(_lowercase) for p in contents}
assert "generated_predictions.txt" in contents
assert "predict_results.json" in contents
@slow
@require_bitsandbytes
def __a ( self :List[str]) -> str:
from transformers.training_args import OptimizerNames
def train_and_return_metrics(_lowercase :str) -> Tuple[int, float]:
UpperCAmelCase_ = '''--skip_memory_metrics 0'''
UpperCAmelCase_ = self.run_trainer(
max_len=128 , model_name=_lowercase , learning_rate=3E-4 , num_train_epochs=1 , optim=_lowercase , distributed=_lowercase , extra_args_str=_lowercase , do_eval=_lowercase , do_predict=_lowercase , n_gpus_to_use=1 , )
# Check metrics
UpperCAmelCase_ = TrainerState.load_from_json(Path(_lowercase , '''trainer_state.json''')).log_history
UpperCAmelCase_ = int(logs[0]['''train_mem_gpu_peaked_delta'''] / 2**20)
UpperCAmelCase_ = int(logs[0]['''train_mem_gpu_alloc_delta'''] / 2**20)
UpperCAmelCase_ = logs[0]['''train_loss''']
return gpu_peak_mem_mb, gpu_alloc_mem_mb, loss
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = train_and_return_metrics(OptimizerNames.ADAMW_TORCH.value)
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = train_and_return_metrics(OptimizerNames.ADAMW_BNB.value)
UpperCAmelCase_ = gpu_alloc_mem_orig - gpu_alloc_mem_bnb
UpperCAmelCase_ = gpu_peak_mem_orig + gpu_alloc_mem_orig
UpperCAmelCase_ = gpu_peak_mem_bnb + gpu_alloc_mem_bnb
UpperCAmelCase_ = gpu_total_mem_orig - gpu_total_mem_bnb
# sshleifer/student_marian_en_ro_6_1 has 54M parameter, 29M of which is `nn.Embedding` which
# doesn't get quantized and remains in fp32. Therefore we only have 25M parameters quantized
# in 2 bytes and the diff in optim memory usage is derived as so:
#
# - normal 25*8=~200MB (8 bytes per param)
# - bnb 25*2= ~50MB (2 bytes per param)
#
# Thus we should expect ~150MB total memory saved.
#
# Peak memory should be the same - the total should be different by about that same margin
#
# After leaving a small margin to accommodate for differences between gpus let's check
# that we have at least 120MB in savings
UpperCAmelCase_ = 120
# uncomment the following if this test starts failing - requires py38 for a new print feature
# gpu_peak_mem_diff = gpu_peak_mem_orig - gpu_peak_mem_bnb
# print(f"{gpu_alloc_mem_orig=}MB {gpu_peak_mem_orig=}MB {gpu_alloc_mem_orig+gpu_peak_mem_orig=}MB")
# print(f" {gpu_alloc_mem_bnb=}MB {gpu_peak_mem_bnb=}MB {gpu_alloc_mem_bnb+gpu_peak_mem_bnb=}MB")
# print(f"{gpu_alloc_mem_diff=}MB")
# print(f"{gpu_peak_mem_diff=}MB")
# print(f"{gpu_total_mem_orig=}MB, {gpu_total_mem_bnb=}MB")
# print(f"{gpu_total_mem_diff=}MB, {gpu_total_mem_diff=}MB")
self.assertGreater(
_lowercase , _lowercase , '''should use ~150MB less alloc gpu memory with BNB, compared to without it for this model but got'''
f" a difference of {gpu_alloc_mem_diff}MB, with gpu_alloc_mem_orig={gpu_alloc_mem_orig}MB and"
f" gpu_alloc_mem_bnb={gpu_alloc_mem_bnb}MB" , )
self.assertGreater(
_lowercase , _lowercase , '''should use ~150MB less total gpu memory with BNB, compared to without it for this model but got'''
f" a difference of {gpu_total_mem_diff}MB, with gpu_total_mem_orig={gpu_total_mem_orig}MB and"
f" gpu_total_mem_bnb={gpu_total_mem_bnb}MB" , )
self.assertEqual(
_lowercase , _lowercase , f"loss should be the same, but got loss_orig={loss_orig}, loss_bnb={loss_bnb}")
def __a ( self :Any , _lowercase :int , _lowercase :str , _lowercase :int , _lowercase :float = 3E-3 , _lowercase :str = "adafactor" , _lowercase :bool = False , _lowercase :str = None , _lowercase :int = 0 , _lowercase :bool = True , _lowercase :bool = True , _lowercase :bool = True , _lowercase :bool = True , _lowercase :int = None , ) -> List[Any]:
UpperCAmelCase_ = self.test_file_dir / '''../fixtures/tests_samples/wmt_en_ro'''
UpperCAmelCase_ = self.get_auto_remove_tmp_dir()
UpperCAmelCase_ = f"\n --model_name_or_path {model_name}\n --train_file {data_dir}/train.json\n --validation_file {data_dir}/val.json\n --test_file {data_dir}/test.json\n --output_dir {output_dir}\n --overwrite_output_dir\n --max_train_samples 8\n --max_source_length {max_len}\n --max_target_length {max_len}\n --do_train\n --num_train_epochs {str(_lowercase)}\n --per_device_train_batch_size 4\n --learning_rate {learning_rate}\n --warmup_steps 8\n --logging_steps 0\n --logging_strategy no\n --save_steps {str(_lowercase)}\n --group_by_length\n --label_smoothing_factor 0.1\n --target_lang ro_RO\n --source_lang en_XX\n ".split()
UpperCAmelCase_ = f"\n --do_eval\n --per_device_eval_batch_size 4\n --max_eval_samples 8\n --val_max_target_length {max_len}\n --evaluation_strategy steps\n --eval_steps {str(_lowercase)}\n ".split()
UpperCAmelCase_ = '''
--do_predict
'''.split()
UpperCAmelCase_ = []
if do_train:
args += args_train
if do_eval:
args += args_eval
if do_predict:
args += args_predict
if predict_with_generate:
args += "--predict_with_generate".split()
if do_train:
if optim == "adafactor":
args += "--adafactor".split()
else:
args += f"--optim {optim}".split()
if extra_args_str is not None:
args += extra_args_str.split()
if distributed:
if n_gpus_to_use is None:
UpperCAmelCase_ = get_gpu_count()
UpperCAmelCase_ = get_torch_dist_unique_port()
UpperCAmelCase_ = f"\n -m torch.distributed.run\n --nproc_per_node={n_gpus_to_use}\n --master_port={master_port}\n {self.examples_dir_str}/pytorch/translation/run_translation.py\n ".split()
UpperCAmelCase_ = [sys.executable] + distributed_args + args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(_lowercase , env=self.get_env())
else:
UpperCAmelCase_ = ['''run_translation.py'''] + args
with patch.object(_lowercase , '''argv''' , _lowercase):
main()
return output_dir
| 344 | 1 |
UpperCamelCase_ = "0.18.2"
from .configuration_utils import ConfigMixin
from .utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_inflect_available,
is_invisible_watermark_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_librosa_available,
is_note_seq_available,
is_onnx_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
is_transformers_available,
is_transformers_version,
is_unidecode_available,
logging,
)
try:
if not is_onnx_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_onnx_objects import * # noqa F403
else:
from .pipelines import OnnxRuntimeModel
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_pt_objects import * # noqa F403
else:
from .models import (
AutoencoderKL,
ControlNetModel,
ModelMixin,
PriorTransformer,
TaFilmDecoder,
TransformeraDModel,
UNetaDModel,
UNetaDConditionModel,
UNetaDModel,
UNetaDConditionModel,
VQModel,
)
from .optimization import (
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
get_scheduler,
)
from .pipelines import (
AudioPipelineOutput,
ConsistencyModelPipeline,
DanceDiffusionPipeline,
DDIMPipeline,
DDPMPipeline,
DiffusionPipeline,
DiTPipeline,
ImagePipelineOutput,
KarrasVePipeline,
LDMPipeline,
LDMSuperResolutionPipeline,
PNDMPipeline,
RePaintPipeline,
ScoreSdeVePipeline,
)
from .schedulers import (
CMStochasticIterativeScheduler,
DDIMInverseScheduler,
DDIMParallelScheduler,
DDIMScheduler,
DDPMParallelScheduler,
DDPMScheduler,
DEISMultistepScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
HeunDiscreteScheduler,
IPNDMScheduler,
KarrasVeScheduler,
KDPMaAncestralDiscreteScheduler,
KDPMaDiscreteScheduler,
PNDMScheduler,
RePaintScheduler,
SchedulerMixin,
ScoreSdeVeScheduler,
UnCLIPScheduler,
UniPCMultistepScheduler,
VQDiffusionScheduler,
)
from .training_utils import EMAModel
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .schedulers import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .schedulers import DPMSolverSDEScheduler
try:
if not (is_torch_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
AltDiffusionImgaImgPipeline,
AltDiffusionPipeline,
AudioLDMPipeline,
CycleDiffusionPipeline,
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
ImageTextPipelineOutput,
KandinskyImgaImgPipeline,
KandinskyInpaintPipeline,
KandinskyPipeline,
KandinskyPriorPipeline,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaControlnetPipeline,
KandinskyVaaImgaImgPipeline,
KandinskyVaaInpaintPipeline,
KandinskyVaaPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
KandinskyVaaPriorPipeline,
LDMTextToImagePipeline,
PaintByExamplePipeline,
SemanticStableDiffusionPipeline,
ShapEImgaImgPipeline,
ShapEPipeline,
StableDiffusionAttendAndExcitePipeline,
StableDiffusionControlNetImgaImgPipeline,
StableDiffusionControlNetInpaintPipeline,
StableDiffusionControlNetPipeline,
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionImageVariationPipeline,
StableDiffusionImgaImgPipeline,
StableDiffusionInpaintPipeline,
StableDiffusionInpaintPipelineLegacy,
StableDiffusionInstructPixaPixPipeline,
StableDiffusionLatentUpscalePipeline,
StableDiffusionLDMaDPipeline,
StableDiffusionModelEditingPipeline,
StableDiffusionPanoramaPipeline,
StableDiffusionParadigmsPipeline,
StableDiffusionPipeline,
StableDiffusionPipelineSafe,
StableDiffusionPixaPixZeroPipeline,
StableDiffusionSAGPipeline,
StableDiffusionUpscalePipeline,
StableUnCLIPImgaImgPipeline,
StableUnCLIPPipeline,
TextToVideoSDPipeline,
TextToVideoZeroPipeline,
UnCLIPImageVariationPipeline,
UnCLIPPipeline,
UniDiffuserModel,
UniDiffuserPipeline,
UniDiffuserTextDecoder,
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
VideoToVideoSDPipeline,
VQDiffusionPipeline,
)
try:
if not (is_torch_available() and is_transformers_available() and is_invisible_watermark_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_invisible_watermark_objects import * # noqa F403
else:
from .pipelines import StableDiffusionXLImgaImgPipeline, StableDiffusionXLPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_k_diffusion_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipelines import StableDiffusionKDiffusionPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_onnx_objects import * # noqa F403
else:
from .pipelines import (
OnnxStableDiffusionImgaImgPipeline,
OnnxStableDiffusionInpaintPipeline,
OnnxStableDiffusionInpaintPipelineLegacy,
OnnxStableDiffusionPipeline,
OnnxStableDiffusionUpscalePipeline,
StableDiffusionOnnxPipeline,
)
try:
if not (is_torch_available() and is_librosa_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_librosa_objects import * # noqa F403
else:
from .pipelines import AudioDiffusionPipeline, Mel
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .pipelines import SpectrogramDiffusionPipeline
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_objects import * # noqa F403
else:
from .models.controlnet_flax import FlaxControlNetModel
from .models.modeling_flax_utils import FlaxModelMixin
from .models.unet_ad_condition_flax import FlaxUNetaDConditionModel
from .models.vae_flax import FlaxAutoencoderKL
from .pipelines import FlaxDiffusionPipeline
from .schedulers import (
FlaxDDIMScheduler,
FlaxDDPMScheduler,
FlaxDPMSolverMultistepScheduler,
FlaxKarrasVeScheduler,
FlaxLMSDiscreteScheduler,
FlaxPNDMScheduler,
FlaxSchedulerMixin,
FlaxScoreSdeVeScheduler,
)
try:
if not (is_flax_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
FlaxStableDiffusionControlNetPipeline,
FlaxStableDiffusionImgaImgPipeline,
FlaxStableDiffusionInpaintPipeline,
FlaxStableDiffusionPipeline,
)
try:
if not (is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_note_seq_objects import * # noqa F403
else:
from .pipelines import MidiProcessor
| 344 |
import functools
def A ( __UpperCAmelCase , __UpperCAmelCase ) -> int:
'''simple docstring'''
UpperCAmelCase_ = len(__UpperCAmelCase )
UpperCAmelCase_ = len(__UpperCAmelCase )
@functools.cache
def min_distance(__UpperCAmelCase , __UpperCAmelCase ) -> int:
# if first word index is overflow - delete all from the second word
if indexa >= len_worda:
return len_worda - indexa
# if second word index is overflow - delete all from the first word
if indexa >= len_worda:
return len_worda - indexa
UpperCAmelCase_ = int(worda[indexa] != worda[indexa] ) # current letters not identical
return min(
1 + min_distance(indexa + 1 , __UpperCAmelCase ) , 1 + min_distance(__UpperCAmelCase , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , )
return min_distance(0 , 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 344 | 1 |
from math import factorial
def A ( __UpperCAmelCase = 100 ) -> int:
'''simple docstring'''
return sum(int(__UpperCAmelCase ) for x in str(factorial(__UpperCAmelCase ) ) )
if __name__ == "__main__":
print(solution(int(input("Enter the Number: ").strip())))
| 344 |
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {"vocab_file": "spiece.model"}
UpperCamelCase_ = {
"vocab_file": {
"xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model",
"xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model",
}
}
UpperCamelCase_ = {
"xlnet-base-cased": None,
"xlnet-large-cased": None,
}
# Segments (not really needed)
UpperCamelCase_ = 0
UpperCamelCase_ = 1
UpperCamelCase_ = 2
UpperCamelCase_ = 3
UpperCamelCase_ = 4
class a_ ( _snake_case ):
UpperCamelCase__ : List[Any] =VOCAB_FILES_NAMES
UpperCamelCase__ : Optional[Any] =PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ : Tuple =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase__ : Any ="left"
def __init__( self :Optional[int] , _lowercase :Union[str, Any] , _lowercase :Union[str, Any]=False , _lowercase :Optional[int]=True , _lowercase :Union[str, Any]=False , _lowercase :Tuple="<s>" , _lowercase :Any="</s>" , _lowercase :Dict="<unk>" , _lowercase :str="<sep>" , _lowercase :Tuple="<pad>" , _lowercase :Any="<cls>" , _lowercase :List[str]="<mask>" , _lowercase :Union[str, Any]=["<eop>", "<eod>"] , _lowercase :Optional[Dict[str, Any]] = None , **_lowercase :Union[str, Any] , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
UpperCAmelCase_ = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase) if isinstance(_lowercase , _lowercase) else mask_token
UpperCAmelCase_ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=_lowercase , remove_space=_lowercase , keep_accents=_lowercase , bos_token=_lowercase , eos_token=_lowercase , unk_token=_lowercase , sep_token=_lowercase , pad_token=_lowercase , cls_token=_lowercase , mask_token=_lowercase , additional_special_tokens=_lowercase , sp_model_kwargs=self.sp_model_kwargs , **_lowercase , )
UpperCAmelCase_ = 3
UpperCAmelCase_ = do_lower_case
UpperCAmelCase_ = remove_space
UpperCAmelCase_ = keep_accents
UpperCAmelCase_ = vocab_file
UpperCAmelCase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(_lowercase)
@property
def __a ( self :int) -> List[Any]:
return len(self.sp_model)
def __a ( self :Optional[int]) -> List[Any]:
UpperCAmelCase_ = {self.convert_ids_to_tokens(_lowercase): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def __getstate__( self :Dict) -> Union[str, Any]:
UpperCAmelCase_ = self.__dict__.copy()
UpperCAmelCase_ = None
return state
def __setstate__( self :Optional[Any] , _lowercase :Optional[Any]) -> List[Any]:
UpperCAmelCase_ = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs'''):
UpperCAmelCase_ = {}
UpperCAmelCase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def __a ( self :List[str] , _lowercase :Tuple) -> Optional[int]:
if self.remove_space:
UpperCAmelCase_ = ''' '''.join(inputs.strip().split())
else:
UpperCAmelCase_ = inputs
UpperCAmelCase_ = outputs.replace('''``''' , '''"''').replace('''\'\'''' , '''"''')
if not self.keep_accents:
UpperCAmelCase_ = unicodedata.normalize('''NFKD''' , _lowercase)
UpperCAmelCase_ = ''''''.join([c for c in outputs if not unicodedata.combining(_lowercase)])
if self.do_lower_case:
UpperCAmelCase_ = outputs.lower()
return outputs
def __a ( self :str , _lowercase :str) -> List[str]:
UpperCAmelCase_ = self.preprocess_text(_lowercase)
UpperCAmelCase_ = self.sp_model.encode(_lowercase , out_type=_lowercase)
UpperCAmelCase_ = []
for piece in pieces:
if len(_lowercase) > 1 and piece[-1] == str(''',''') and piece[-2].isdigit():
UpperCAmelCase_ = self.sp_model.EncodeAsPieces(piece[:-1].replace(_lowercase , ''''''))
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0]) == 1:
UpperCAmelCase_ = cur_pieces[1:]
else:
UpperCAmelCase_ = cur_pieces[0][1:]
cur_pieces.append(piece[-1])
new_pieces.extend(_lowercase)
else:
new_pieces.append(_lowercase)
return new_pieces
def __a ( self :Optional[Any] , _lowercase :Union[str, Any]) -> Tuple:
return self.sp_model.PieceToId(_lowercase)
def __a ( self :Optional[int] , _lowercase :Optional[Any]) -> List[str]:
return self.sp_model.IdToPiece(_lowercase)
def __a ( self :List[Any] , _lowercase :Optional[Any]) -> int:
UpperCAmelCase_ = ''''''.join(_lowercase).replace(_lowercase , ''' ''').strip()
return out_string
def __a ( self :Union[str, Any] , _lowercase :List[int] , _lowercase :bool = False , _lowercase :bool = None , _lowercase :bool = True , **_lowercase :Tuple , ) -> str:
UpperCAmelCase_ = kwargs.pop('''use_source_tokenizer''' , _lowercase)
UpperCAmelCase_ = self.convert_ids_to_tokens(_lowercase , skip_special_tokens=_lowercase)
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
UpperCAmelCase_ = []
UpperCAmelCase_ = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_lowercase))
UpperCAmelCase_ = []
sub_texts.append(_lowercase)
else:
current_sub_text.append(_lowercase)
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_lowercase))
# Mimic the behavior of the Rust tokenizer:
# By default, there are no spaces between special tokens
UpperCAmelCase_ = ''''''.join(_lowercase)
UpperCAmelCase_ = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
UpperCAmelCase_ = self.clean_up_tokenization(_lowercase)
return clean_text
else:
return text
def __a ( self :str , _lowercase :List[int] , _lowercase :Optional[List[int]] = None) -> List[int]:
UpperCAmelCase_ = [self.sep_token_id]
UpperCAmelCase_ = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def __a ( self :Dict , _lowercase :List[int] , _lowercase :Optional[List[int]] = None , _lowercase :bool = False) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowercase , token_ids_a=_lowercase , already_has_special_tokens=_lowercase)
if token_ids_a is not None:
return ([0] * len(_lowercase)) + [1] + ([0] * len(_lowercase)) + [1, 1]
return ([0] * len(_lowercase)) + [1, 1]
def __a ( self :Optional[int] , _lowercase :List[int] , _lowercase :Optional[List[int]] = None) -> List[int]:
UpperCAmelCase_ = [self.sep_token_id]
UpperCAmelCase_ = [2]
if token_ids_a is None:
return len(token_ids_a + sep) * [0] + cls_segment_id
return len(token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1] + cls_segment_id
def __a ( self :str , _lowercase :str , _lowercase :Optional[str] = None) -> Tuple[str]:
if not os.path.isdir(_lowercase):
logger.error(f"Vocabulary path ({save_directory}) should be a directory")
return
UpperCAmelCase_ = os.path.join(
_lowercase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''])
if os.path.abspath(self.vocab_file) != os.path.abspath(_lowercase) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , _lowercase)
elif not os.path.isfile(self.vocab_file):
with open(_lowercase , '''wb''') as fi:
UpperCAmelCase_ = self.sp_model.serialized_model_proto()
fi.write(_lowercase)
return (out_vocab_file,)
| 344 | 1 |
from collections.abc import Sequence
def A ( __UpperCAmelCase , __UpperCAmelCase ) -> float:
'''simple docstring'''
return sum(c * (x**i) for i, c in enumerate(__UpperCAmelCase ) )
def A ( __UpperCAmelCase , __UpperCAmelCase ) -> float:
'''simple docstring'''
UpperCAmelCase_ = 0.0
for coeff in reversed(__UpperCAmelCase ):
UpperCAmelCase_ = result * x + coeff
return result
if __name__ == "__main__":
UpperCamelCase_ = (0.0, 0.0, 5.0, 9.3, 7.0)
UpperCamelCase_ = 10.0
print(evaluate_poly(poly, x))
print(horner(poly, x))
| 344 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
UpperCamelCase_ = logging.get_logger(__name__)
class a_ ( _snake_case , _snake_case ):
UpperCamelCase__ : Union[str, Any] ="maskformer-swin"
UpperCamelCase__ : List[str] ={
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self :Union[str, Any] , _lowercase :Optional[int]=224 , _lowercase :List[str]=4 , _lowercase :Tuple=3 , _lowercase :List[Any]=96 , _lowercase :Any=[2, 2, 6, 2] , _lowercase :int=[3, 6, 12, 24] , _lowercase :List[Any]=7 , _lowercase :Dict=4.0 , _lowercase :Any=True , _lowercase :int=0.0 , _lowercase :List[Any]=0.0 , _lowercase :Tuple=0.1 , _lowercase :str="gelu" , _lowercase :Union[str, Any]=False , _lowercase :Tuple=0.02 , _lowercase :List[str]=1E-5 , _lowercase :List[str]=None , _lowercase :Any=None , **_lowercase :Any , ) -> Union[str, Any]:
super().__init__(**_lowercase)
UpperCAmelCase_ = image_size
UpperCAmelCase_ = patch_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = embed_dim
UpperCAmelCase_ = depths
UpperCAmelCase_ = len(_lowercase)
UpperCAmelCase_ = num_heads
UpperCAmelCase_ = window_size
UpperCAmelCase_ = mlp_ratio
UpperCAmelCase_ = qkv_bias
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = drop_path_rate
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = use_absolute_embeddings
UpperCAmelCase_ = layer_norm_eps
UpperCAmelCase_ = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
UpperCAmelCase_ = int(embed_dim * 2 ** (len(_lowercase) - 1))
UpperCAmelCase_ = ['''stem'''] + [f"stage{idx}" for idx in range(1 , len(_lowercase) + 1)]
UpperCAmelCase_ , UpperCAmelCase_ = get_aligned_output_features_output_indices(
out_features=_lowercase , out_indices=_lowercase , stage_names=self.stage_names)
| 344 | 1 |
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMInverseScheduler,
DDIMScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
StableDiffusionDiffEditPipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_image, slow
from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class a_ ( _snake_case , _snake_case , unittest.TestCase ):
UpperCamelCase__ : int =StableDiffusionDiffEditPipeline
UpperCamelCase__ : Union[str, Any] =TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"height", "width", "image"} | {"image_latents"}
UpperCamelCase__ : Any =TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS - {"image"} | {"image_latents"}
UpperCamelCase__ : List[Any] =frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
UpperCamelCase__ : int =frozenset([] )
def __a ( self :List[Any]) -> Union[str, Any]:
torch.manual_seed(0)
UpperCAmelCase_ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=_lowercase , )
UpperCAmelCase_ = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=_lowercase , set_alpha_to_one=_lowercase , )
UpperCAmelCase_ = DDIMInverseScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=_lowercase , set_alpha_to_zero=_lowercase , )
torch.manual_seed(0)
UpperCAmelCase_ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0)
UpperCAmelCase_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='''gelu''' , projection_dim=512 , )
UpperCAmelCase_ = CLIPTextModel(_lowercase)
UpperCAmelCase_ = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''')
UpperCAmelCase_ = {
'''unet''': unet,
'''scheduler''': scheduler,
'''inverse_scheduler''': inverse_scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def __a ( self :Tuple , _lowercase :Union[str, Any] , _lowercase :Union[str, Any]=0) -> Any:
UpperCAmelCase_ = floats_tensor((1, 16, 16) , rng=random.Random(_lowercase)).to(_lowercase)
UpperCAmelCase_ = floats_tensor((1, 2, 4, 16, 16) , rng=random.Random(_lowercase)).to(_lowercase)
if str(_lowercase).startswith('''mps'''):
UpperCAmelCase_ = torch.manual_seed(_lowercase)
else:
UpperCAmelCase_ = torch.Generator(device=_lowercase).manual_seed(_lowercase)
UpperCAmelCase_ = {
'''prompt''': '''a dog and a newt''',
'''mask_image''': mask,
'''image_latents''': latents,
'''generator''': generator,
'''num_inference_steps''': 2,
'''inpaint_strength''': 1.0,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def __a ( self :str , _lowercase :Optional[int] , _lowercase :int=0) -> List[Any]:
UpperCAmelCase_ = floats_tensor((1, 3, 32, 32) , rng=random.Random(_lowercase)).to(_lowercase)
UpperCAmelCase_ = image.cpu().permute(0 , 2 , 3 , 1)[0]
UpperCAmelCase_ = Image.fromarray(np.uinta(_lowercase)).convert('''RGB''')
if str(_lowercase).startswith('''mps'''):
UpperCAmelCase_ = torch.manual_seed(_lowercase)
else:
UpperCAmelCase_ = torch.Generator(device=_lowercase).manual_seed(_lowercase)
UpperCAmelCase_ = {
'''image''': image,
'''source_prompt''': '''a cat and a frog''',
'''target_prompt''': '''a dog and a newt''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''num_maps_per_mask''': 2,
'''mask_encode_strength''': 1.0,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def __a ( self :Optional[int] , _lowercase :Optional[Any] , _lowercase :List[str]=0) -> Tuple:
UpperCAmelCase_ = floats_tensor((1, 3, 32, 32) , rng=random.Random(_lowercase)).to(_lowercase)
UpperCAmelCase_ = image.cpu().permute(0 , 2 , 3 , 1)[0]
UpperCAmelCase_ = Image.fromarray(np.uinta(_lowercase)).convert('''RGB''')
if str(_lowercase).startswith('''mps'''):
UpperCAmelCase_ = torch.manual_seed(_lowercase)
else:
UpperCAmelCase_ = torch.Generator(device=_lowercase).manual_seed(_lowercase)
UpperCAmelCase_ = {
'''image''': image,
'''prompt''': '''a cat and a frog''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''inpaint_strength''': 1.0,
'''guidance_scale''': 6.0,
'''decode_latents''': True,
'''output_type''': '''numpy''',
}
return inputs
def __a ( self :str) -> Optional[int]:
if not hasattr(self.pipeline_class , '''_optional_components'''):
return
UpperCAmelCase_ = self.get_dummy_components()
UpperCAmelCase_ = self.pipeline_class(**_lowercase)
pipe.to(_lowercase)
pipe.set_progress_bar_config(disable=_lowercase)
# set all optional components to None and update pipeline config accordingly
for optional_component in pipe._optional_components:
setattr(_lowercase , _lowercase , _lowercase)
pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components})
UpperCAmelCase_ = self.get_dummy_inputs(_lowercase)
UpperCAmelCase_ = pipe(**_lowercase)[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(_lowercase)
UpperCAmelCase_ = self.pipeline_class.from_pretrained(_lowercase)
pipe_loaded.to(_lowercase)
pipe_loaded.set_progress_bar_config(disable=_lowercase)
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(_lowercase , _lowercase) is None , f"`{optional_component}` did not stay set to None after loading." , )
UpperCAmelCase_ = self.get_dummy_inputs(_lowercase)
UpperCAmelCase_ = pipe_loaded(**_lowercase)[0]
UpperCAmelCase_ = np.abs(output - output_loaded).max()
self.assertLess(_lowercase , 1E-4)
def __a ( self :Dict) -> Tuple:
UpperCAmelCase_ = '''cpu'''
UpperCAmelCase_ = self.get_dummy_components()
UpperCAmelCase_ = self.pipeline_class(**_lowercase)
pipe.to(_lowercase)
pipe.set_progress_bar_config(disable=_lowercase)
UpperCAmelCase_ = self.get_dummy_mask_inputs(_lowercase)
UpperCAmelCase_ = pipe.generate_mask(**_lowercase)
UpperCAmelCase_ = mask[0, -3:, -3:]
self.assertEqual(mask.shape , (1, 16, 16))
UpperCAmelCase_ = np.array([0] * 9)
UpperCAmelCase_ = np.abs(mask_slice.flatten() - expected_slice).max()
self.assertLessEqual(_lowercase , 1E-3)
self.assertEqual(mask[0, -3, -4] , 0)
def __a ( self :Optional[Any]) -> Optional[Any]:
UpperCAmelCase_ = '''cpu'''
UpperCAmelCase_ = self.get_dummy_components()
UpperCAmelCase_ = self.pipeline_class(**_lowercase)
pipe.to(_lowercase)
pipe.set_progress_bar_config(disable=_lowercase)
UpperCAmelCase_ = self.get_dummy_inversion_inputs(_lowercase)
UpperCAmelCase_ = pipe.invert(**_lowercase).images
UpperCAmelCase_ = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3))
UpperCAmelCase_ = np.array(
[0.5_150, 0.5_134, 0.5_043, 0.5_376, 0.4_694, 0.51_050, 0.5_015, 0.4_407, 0.4_799] , )
UpperCAmelCase_ = np.abs(image_slice.flatten() - expected_slice).max()
self.assertLessEqual(_lowercase , 1E-3)
def __a ( self :List[Any]) -> Any:
super().test_inference_batch_single_identical(expected_max_diff=5E-3)
def __a ( self :List[Any]) -> Union[str, Any]:
UpperCAmelCase_ = '''cpu'''
UpperCAmelCase_ = self.get_dummy_components()
UpperCAmelCase_ = {'''beta_start''': 0.00_085, '''beta_end''': 0.012, '''beta_schedule''': '''scaled_linear'''}
UpperCAmelCase_ = DPMSolverMultistepScheduler(**_lowercase)
UpperCAmelCase_ = DPMSolverMultistepInverseScheduler(**_lowercase)
UpperCAmelCase_ = self.pipeline_class(**_lowercase)
pipe.to(_lowercase)
pipe.set_progress_bar_config(disable=_lowercase)
UpperCAmelCase_ = self.get_dummy_inversion_inputs(_lowercase)
UpperCAmelCase_ = pipe.invert(**_lowercase).images
UpperCAmelCase_ = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3))
UpperCAmelCase_ = np.array(
[0.5_150, 0.5_134, 0.5_043, 0.5_376, 0.4_694, 0.51_050, 0.5_015, 0.4_407, 0.4_799] , )
UpperCAmelCase_ = np.abs(image_slice.flatten() - expected_slice).max()
self.assertLessEqual(_lowercase , 1E-3)
@require_torch_gpu
@slow
class a_ ( unittest.TestCase ):
def __a ( self :Union[str, Any]) -> Dict:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@classmethod
def __a ( cls :List[str]) -> Dict:
UpperCAmelCase_ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png''')
UpperCAmelCase_ = raw_image.convert('''RGB''').resize((768, 768))
UpperCAmelCase_ = raw_image
def __a ( self :str) -> int:
UpperCAmelCase_ = torch.manual_seed(0)
UpperCAmelCase_ = StableDiffusionDiffEditPipeline.from_pretrained(
'''stabilityai/stable-diffusion-2-1''' , safety_checker=_lowercase , torch_dtype=torch.floataa)
UpperCAmelCase_ = DDIMScheduler.from_config(pipe.scheduler.config)
UpperCAmelCase_ = DDIMInverseScheduler.from_config(pipe.scheduler.config)
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=_lowercase)
UpperCAmelCase_ = '''a bowl of fruit'''
UpperCAmelCase_ = '''a bowl of pears'''
UpperCAmelCase_ = pipe.generate_mask(
image=self.raw_image , source_prompt=_lowercase , target_prompt=_lowercase , generator=_lowercase , )
UpperCAmelCase_ = pipe.invert(
prompt=_lowercase , image=self.raw_image , inpaint_strength=0.7 , generator=_lowercase).latents
UpperCAmelCase_ = pipe(
prompt=_lowercase , mask_image=_lowercase , image_latents=_lowercase , generator=_lowercase , negative_prompt=_lowercase , inpaint_strength=0.7 , output_type='''numpy''' , ).images[0]
UpperCAmelCase_ = (
np.array(
load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/diffedit/pears.png''').resize((768, 768)))
/ 255
)
assert np.abs((expected_image - image).max()) < 5E-1
def __a ( self :Tuple) -> Any:
UpperCAmelCase_ = torch.manual_seed(0)
UpperCAmelCase_ = StableDiffusionDiffEditPipeline.from_pretrained(
'''stabilityai/stable-diffusion-2-1''' , safety_checker=_lowercase , torch_dtype=torch.floataa)
UpperCAmelCase_ = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
UpperCAmelCase_ = DPMSolverMultistepInverseScheduler.from_config(pipe.scheduler.config)
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=_lowercase)
UpperCAmelCase_ = '''a bowl of fruit'''
UpperCAmelCase_ = '''a bowl of pears'''
UpperCAmelCase_ = pipe.generate_mask(
image=self.raw_image , source_prompt=_lowercase , target_prompt=_lowercase , generator=_lowercase , )
UpperCAmelCase_ = pipe.invert(
prompt=_lowercase , image=self.raw_image , inpaint_strength=0.7 , generator=_lowercase , num_inference_steps=25 , ).latents
UpperCAmelCase_ = pipe(
prompt=_lowercase , mask_image=_lowercase , image_latents=_lowercase , generator=_lowercase , negative_prompt=_lowercase , inpaint_strength=0.7 , num_inference_steps=25 , output_type='''numpy''' , ).images[0]
UpperCAmelCase_ = (
np.array(
load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/diffedit/pears.png''').resize((768, 768)))
/ 255
)
assert np.abs((expected_image - image).max()) < 5E-1
| 344 |
import os
from argparse import ArgumentParser, Namespace
from ..data import SingleSentenceClassificationProcessor as Processor
from ..pipelines import TextClassificationPipeline
from ..utils import is_tf_available, is_torch_available, logging
from . import BaseTransformersCLICommand
if not is_tf_available() and not is_torch_available():
raise RuntimeError("At least one of PyTorch or TensorFlow 2.0+ should be installed to use CLI training")
# TF training parameters
UpperCamelCase_ = False
UpperCamelCase_ = False
def A ( __UpperCAmelCase ) -> Any:
'''simple docstring'''
return TrainCommand(__UpperCAmelCase )
class a_ ( _snake_case ):
@staticmethod
def __a ( _lowercase :ArgumentParser) -> List[Any]:
UpperCAmelCase_ = parser.add_parser('''train''' , help='''CLI tool to train a model on a task.''')
train_parser.add_argument(
'''--train_data''' , type=_lowercase , required=_lowercase , help='''path to train (and optionally evaluation) dataset as a csv with tab separated labels and sentences.''' , )
train_parser.add_argument(
'''--column_label''' , type=_lowercase , default=0 , help='''Column of the dataset csv file with example labels.''')
train_parser.add_argument(
'''--column_text''' , type=_lowercase , default=1 , help='''Column of the dataset csv file with example texts.''')
train_parser.add_argument(
'''--column_id''' , type=_lowercase , default=2 , help='''Column of the dataset csv file with example ids.''')
train_parser.add_argument(
'''--skip_first_row''' , action='''store_true''' , help='''Skip the first row of the csv file (headers).''')
train_parser.add_argument('''--validation_data''' , type=_lowercase , default='''''' , help='''path to validation dataset.''')
train_parser.add_argument(
'''--validation_split''' , type=_lowercase , default=0.1 , help='''if validation dataset is not provided, fraction of train dataset to use as validation dataset.''' , )
train_parser.add_argument('''--output''' , type=_lowercase , default='''./''' , help='''path to saved the trained model.''')
train_parser.add_argument(
'''--task''' , type=_lowercase , default='''text_classification''' , help='''Task to train the model on.''')
train_parser.add_argument(
'''--model''' , type=_lowercase , default='''bert-base-uncased''' , help='''Model\'s name or path to stored model.''')
train_parser.add_argument('''--train_batch_size''' , type=_lowercase , default=32 , help='''Batch size for training.''')
train_parser.add_argument('''--valid_batch_size''' , type=_lowercase , default=64 , help='''Batch size for validation.''')
train_parser.add_argument('''--learning_rate''' , type=_lowercase , default=3E-5 , help='''Learning rate.''')
train_parser.add_argument('''--adam_epsilon''' , type=_lowercase , default=1E-0_8 , help='''Epsilon for Adam optimizer.''')
train_parser.set_defaults(func=_lowercase)
def __init__( self :Union[str, Any] , _lowercase :Namespace) -> Union[str, Any]:
UpperCAmelCase_ = logging.get_logger('''transformers-cli/training''')
UpperCAmelCase_ = '''tf''' if is_tf_available() else '''torch'''
os.makedirs(args.output , exist_ok=_lowercase)
UpperCAmelCase_ = args.output
UpperCAmelCase_ = args.column_label
UpperCAmelCase_ = args.column_text
UpperCAmelCase_ = args.column_id
self.logger.info(f"Loading {args.task} pipeline for {args.model}")
if args.task == "text_classification":
UpperCAmelCase_ = TextClassificationPipeline.from_pretrained(args.model)
elif args.task == "token_classification":
raise NotImplementedError
elif args.task == "question_answering":
raise NotImplementedError
self.logger.info(f"Loading dataset from {args.train_data}")
UpperCAmelCase_ = Processor.create_from_csv(
args.train_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
UpperCAmelCase_ = None
if args.validation_data:
self.logger.info(f"Loading validation dataset from {args.validation_data}")
UpperCAmelCase_ = Processor.create_from_csv(
args.validation_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
UpperCAmelCase_ = args.validation_split
UpperCAmelCase_ = args.train_batch_size
UpperCAmelCase_ = args.valid_batch_size
UpperCAmelCase_ = args.learning_rate
UpperCAmelCase_ = args.adam_epsilon
def __a ( self :int) -> Tuple:
if self.framework == "tf":
return self.run_tf()
return self.run_torch()
def __a ( self :Optional[Any]) -> Any:
raise NotImplementedError
def __a ( self :int) -> Optional[Any]:
self.pipeline.fit(
self.train_dataset , validation_data=self.valid_dataset , validation_split=self.validation_split , learning_rate=self.learning_rate , adam_epsilon=self.adam_epsilon , train_batch_size=self.train_batch_size , valid_batch_size=self.valid_batch_size , )
# Save trained pipeline
self.pipeline.save_pretrained(self.output)
| 344 | 1 |
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.text import TextDatasetReader
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def A ( __UpperCAmelCase , __UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
assert isinstance(__UpperCAmelCase , __UpperCAmelCase )
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def A ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> int:
'''simple docstring'''
UpperCAmelCase_ = tmp_path / '''cache'''
UpperCAmelCase_ = {'''text''': '''string'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
UpperCAmelCase_ = TextDatasetReader(__UpperCAmelCase , cache_dir=__UpperCAmelCase , keep_in_memory=__UpperCAmelCase ).read()
_check_text_dataset(__UpperCAmelCase , __UpperCAmelCase )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''text''': '''string'''},
{'''text''': '''int32'''},
{'''text''': '''float32'''},
] , )
def A ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = tmp_path / '''cache'''
UpperCAmelCase_ = {'''text''': '''string'''}
UpperCAmelCase_ = features.copy() if features else default_expected_features
UpperCAmelCase_ = (
Features({feature: Value(__UpperCAmelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCAmelCase_ = TextDatasetReader(__UpperCAmelCase , features=__UpperCAmelCase , cache_dir=__UpperCAmelCase ).read()
_check_text_dataset(__UpperCAmelCase , __UpperCAmelCase )
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def A ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ = tmp_path / '''cache'''
UpperCAmelCase_ = {'''text''': '''string'''}
UpperCAmelCase_ = TextDatasetReader(__UpperCAmelCase , cache_dir=__UpperCAmelCase , split=__UpperCAmelCase ).read()
_check_text_dataset(__UpperCAmelCase , __UpperCAmelCase )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('''path_type''' , [str, list] )
def A ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Dict:
'''simple docstring'''
if issubclass(__UpperCAmelCase , __UpperCAmelCase ):
UpperCAmelCase_ = text_path
elif issubclass(__UpperCAmelCase , __UpperCAmelCase ):
UpperCAmelCase_ = [text_path]
UpperCAmelCase_ = tmp_path / '''cache'''
UpperCAmelCase_ = {'''text''': '''string'''}
UpperCAmelCase_ = TextDatasetReader(__UpperCAmelCase , cache_dir=__UpperCAmelCase ).read()
_check_text_dataset(__UpperCAmelCase , __UpperCAmelCase )
def A ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=("train",) ) -> str:
'''simple docstring'''
assert isinstance(__UpperCAmelCase , __UpperCAmelCase )
for split in splits:
UpperCAmelCase_ = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def A ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Any:
'''simple docstring'''
UpperCAmelCase_ = tmp_path / '''cache'''
UpperCAmelCase_ = {'''text''': '''string'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
UpperCAmelCase_ = TextDatasetReader({'''train''': text_path} , cache_dir=__UpperCAmelCase , keep_in_memory=__UpperCAmelCase ).read()
_check_text_datasetdict(__UpperCAmelCase , __UpperCAmelCase )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''text''': '''string'''},
{'''text''': '''int32'''},
{'''text''': '''float32'''},
] , )
def A ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = tmp_path / '''cache'''
# CSV file loses col_1 string dtype information: default now is "int64" instead of "string"
UpperCAmelCase_ = {'''text''': '''string'''}
UpperCAmelCase_ = features.copy() if features else default_expected_features
UpperCAmelCase_ = (
Features({feature: Value(__UpperCAmelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCAmelCase_ = TextDatasetReader({'''train''': text_path} , features=__UpperCAmelCase , cache_dir=__UpperCAmelCase ).read()
_check_text_datasetdict(__UpperCAmelCase , __UpperCAmelCase )
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def A ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
if split:
UpperCAmelCase_ = {split: text_path}
else:
UpperCAmelCase_ = '''train'''
UpperCAmelCase_ = {'''train''': text_path, '''test''': text_path}
UpperCAmelCase_ = tmp_path / '''cache'''
UpperCAmelCase_ = {'''text''': '''string'''}
UpperCAmelCase_ = TextDatasetReader(__UpperCAmelCase , cache_dir=__UpperCAmelCase ).read()
_check_text_datasetdict(__UpperCAmelCase , __UpperCAmelCase , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
| 344 |
import unittest
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BridgeTowerImageProcessor
class a_ ( unittest.TestCase ):
def __init__( self :Tuple , _lowercase :List[Any] , _lowercase :bool = True , _lowercase :Dict[str, int] = None , _lowercase :int = 32 , _lowercase :bool = True , _lowercase :Union[int, float] = 1 / 255 , _lowercase :bool = True , _lowercase :bool = True , _lowercase :Optional[Union[float, List[float]]] = [0.48_145_466, 0.4_578_275, 0.40_821_073] , _lowercase :Optional[Union[float, List[float]]] = [0.26_862_954, 0.26_130_258, 0.27_577_711] , _lowercase :bool = True , _lowercase :List[Any]=7 , _lowercase :Dict=30 , _lowercase :Optional[int]=400 , _lowercase :Any=3 , ) -> Any:
UpperCAmelCase_ = parent
UpperCAmelCase_ = do_resize
UpperCAmelCase_ = size if size is not None else {'''shortest_edge''': 288}
UpperCAmelCase_ = size_divisor
UpperCAmelCase_ = do_rescale
UpperCAmelCase_ = rescale_factor
UpperCAmelCase_ = do_normalize
UpperCAmelCase_ = do_center_crop
UpperCAmelCase_ = image_mean
UpperCAmelCase_ = image_std
UpperCAmelCase_ = do_pad
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = min_resolution
UpperCAmelCase_ = max_resolution
def __a ( self :str) -> Tuple:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"size_divisor": self.size_divisor,
}
def __a ( self :List[Any] , _lowercase :Tuple , _lowercase :List[str]=False) -> int:
if not batched:
UpperCAmelCase_ = self.size['''shortest_edge''']
UpperCAmelCase_ = image_inputs[0]
if isinstance(_lowercase , Image.Image):
UpperCAmelCase_ , UpperCAmelCase_ = image.size
else:
UpperCAmelCase_ , UpperCAmelCase_ = image.shape[1], image.shape[2]
UpperCAmelCase_ = size / min(_lowercase , _lowercase)
if h < w:
UpperCAmelCase_ , UpperCAmelCase_ = size, scale * w
else:
UpperCAmelCase_ , UpperCAmelCase_ = scale * h, size
UpperCAmelCase_ = int((1333 / 800) * size)
if max(_lowercase , _lowercase) > max_size:
UpperCAmelCase_ = max_size / max(_lowercase , _lowercase)
UpperCAmelCase_ = newh * scale
UpperCAmelCase_ = neww * scale
UpperCAmelCase_ , UpperCAmelCase_ = int(newh + 0.5), int(neww + 0.5)
UpperCAmelCase_ , UpperCAmelCase_ = (
newh // self.size_divisor * self.size_divisor,
neww // self.size_divisor * self.size_divisor,
)
else:
UpperCAmelCase_ = []
for image in image_inputs:
UpperCAmelCase_ , UpperCAmelCase_ = self.get_expected_values([image])
expected_values.append((expected_height, expected_width))
UpperCAmelCase_ = max(_lowercase , key=lambda _lowercase: item[0])[0]
UpperCAmelCase_ = max(_lowercase , key=lambda _lowercase: item[1])[1]
return expected_height, expected_width
@require_torch
@require_vision
class a_ ( _snake_case , unittest.TestCase ):
UpperCamelCase__ : Tuple =BridgeTowerImageProcessor if is_vision_available() else None
def __a ( self :int) -> Dict:
UpperCAmelCase_ = BridgeTowerImageProcessingTester(self)
@property
def __a ( self :Dict) -> Any:
return self.image_processor_tester.prepare_image_processor_dict()
def __a ( self :Dict) -> Tuple:
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(_lowercase , '''image_mean'''))
self.assertTrue(hasattr(_lowercase , '''image_std'''))
self.assertTrue(hasattr(_lowercase , '''do_normalize'''))
self.assertTrue(hasattr(_lowercase , '''do_resize'''))
self.assertTrue(hasattr(_lowercase , '''size'''))
self.assertTrue(hasattr(_lowercase , '''size_divisor'''))
def __a ( self :Union[str, Any]) -> Tuple:
pass
def __a ( self :List[str]) -> Tuple:
# Initialize image processor
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase)
for image in image_inputs:
self.assertIsInstance(_lowercase , Image.Image)
# Test not batched input
UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values
UpperCAmelCase_ , UpperCAmelCase_ = self.image_processor_tester.get_expected_values(_lowercase)
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase_ = image_processing(_lowercase , return_tensors='''pt''').pixel_values
UpperCAmelCase_ , UpperCAmelCase_ = self.image_processor_tester.get_expected_values(_lowercase , batched=_lowercase)
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __a ( self :Union[str, Any]) -> Optional[int]:
# Initialize image processor
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase , numpify=_lowercase)
for image in image_inputs:
self.assertIsInstance(_lowercase , np.ndarray)
# Test not batched input
UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values
UpperCAmelCase_ , UpperCAmelCase_ = self.image_processor_tester.get_expected_values(_lowercase)
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase_ = image_processing(_lowercase , return_tensors='''pt''').pixel_values
UpperCAmelCase_ , UpperCAmelCase_ = self.image_processor_tester.get_expected_values(_lowercase , batched=_lowercase)
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __a ( self :str) -> int:
# Initialize image processor
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase , torchify=_lowercase)
for image in image_inputs:
self.assertIsInstance(_lowercase , torch.Tensor)
# Test not batched input
UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values
UpperCAmelCase_ , UpperCAmelCase_ = self.image_processor_tester.get_expected_values(_lowercase)
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase_ = image_processing(_lowercase , return_tensors='''pt''').pixel_values
UpperCAmelCase_ , UpperCAmelCase_ = self.image_processor_tester.get_expected_values(_lowercase , batched=_lowercase)
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
| 344 | 1 |
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import BatchEncoding, PreTrainedTokenizer
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = "▁"
UpperCamelCase_ = {
"vocab_file": "vocab.json",
"spm_file": "sentencepiece.bpe.model",
"tokenizer_config_file": "tokenizer_config.json",
}
UpperCamelCase_ = {
"vocab_file": {
"facebook/m2m100_418M": "https://huggingface.co/facebook/m2m100_418M/resolve/main/vocab.json",
"facebook/m2m100_1.2B": "https://huggingface.co/facebook/m2m100_1.2B/resolve/main/vocab.json",
},
"spm_file": {
"facebook/m2m100_418M": "https://huggingface.co/facebook/m2m100_418M/resolve/main/sentencepiece.bpe.model",
"facebook/m2m100_1.2B": "https://huggingface.co/facebook/m2m100_1.2B/resolve/main/sentencepiece.bpe.model",
},
"tokenizer_config_file": {
"facebook/m2m100_418M": "https://huggingface.co/facebook/m2m100_418M/resolve/main/tokenizer_config.json",
"facebook/m2m100_1.2B": "https://huggingface.co/facebook/m2m100_1.2B/resolve/main/tokenizer_config.json",
},
}
UpperCamelCase_ = {
"facebook/m2m100_418M": 1_024,
}
# fmt: off
UpperCamelCase_ = {
"m2m100": ["af", "am", "ar", "ast", "az", "ba", "be", "bg", "bn", "br", "bs", "ca", "ceb", "cs", "cy", "da", "de", "el", "en", "es", "et", "fa", "ff", "fi", "fr", "fy", "ga", "gd", "gl", "gu", "ha", "he", "hi", "hr", "ht", "hu", "hy", "id", "ig", "ilo", "is", "it", "ja", "jv", "ka", "kk", "km", "kn", "ko", "lb", "lg", "ln", "lo", "lt", "lv", "mg", "mk", "ml", "mn", "mr", "ms", "my", "ne", "nl", "no", "ns", "oc", "or", "pa", "pl", "ps", "pt", "ro", "ru", "sd", "si", "sk", "sl", "so", "sq", "sr", "ss", "su", "sv", "sw", "ta", "th", "tl", "tn", "tr", "uk", "ur", "uz", "vi", "wo", "xh", "yi", "yo", "zh", "zu"],
"wmt21": ["en", "ha", "is", "ja", "cs", "ru", "zh", "de"]
}
class a_ ( _snake_case ):
UpperCamelCase__ : str =VOCAB_FILES_NAMES
UpperCamelCase__ : Tuple =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase__ : Optional[int] =PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ : Optional[int] =["input_ids", "attention_mask"]
UpperCamelCase__ : List[int] =[]
UpperCamelCase__ : List[int] =[]
def __init__( self :Optional[Any] , _lowercase :Dict , _lowercase :List[str] , _lowercase :Dict=None , _lowercase :Union[str, Any]=None , _lowercase :Union[str, Any]="<s>" , _lowercase :Any="</s>" , _lowercase :List[Any]="</s>" , _lowercase :List[str]="<pad>" , _lowercase :Union[str, Any]="<unk>" , _lowercase :Tuple="m2m100" , _lowercase :Optional[Dict[str, Any]] = None , _lowercase :str=8 , **_lowercase :Any , ) -> None:
UpperCAmelCase_ = {} if sp_model_kwargs is None else sp_model_kwargs
UpperCAmelCase_ = language_codes
UpperCAmelCase_ = FAIRSEQ_LANGUAGE_CODES[language_codes]
UpperCAmelCase_ = {lang_code: f"__{lang_code}__" for lang_code in fairseq_language_code}
UpperCAmelCase_ = kwargs.get('''additional_special_tokens''' , [])
kwargs["additional_special_tokens"] += [
self.get_lang_token(_lowercase)
for lang_code in fairseq_language_code
if self.get_lang_token(_lowercase) not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=_lowercase , tgt_lang=_lowercase , bos_token=_lowercase , eos_token=_lowercase , sep_token=_lowercase , unk_token=_lowercase , pad_token=_lowercase , language_codes=_lowercase , sp_model_kwargs=self.sp_model_kwargs , num_madeup_words=_lowercase , **_lowercase , )
UpperCAmelCase_ = vocab_file
UpperCAmelCase_ = load_json(_lowercase)
UpperCAmelCase_ = {v: k for k, v in self.encoder.items()}
UpperCAmelCase_ = spm_file
UpperCAmelCase_ = load_spm(_lowercase , self.sp_model_kwargs)
UpperCAmelCase_ = len(self.encoder)
UpperCAmelCase_ = {
self.get_lang_token(_lowercase): self.encoder_size + i for i, lang_code in enumerate(_lowercase)
}
UpperCAmelCase_ = {lang_code: self.encoder_size + i for i, lang_code in enumerate(_lowercase)}
UpperCAmelCase_ = {v: k for k, v in self.lang_token_to_id.items()}
UpperCAmelCase_ = src_lang if src_lang is not None else '''en'''
UpperCAmelCase_ = tgt_lang
UpperCAmelCase_ = self.get_lang_id(self._src_lang)
self.set_src_lang_special_tokens(self._src_lang)
UpperCAmelCase_ = num_madeup_words
@property
def __a ( self :Optional[Any]) -> int:
return len(self.encoder) + len(self.lang_token_to_id)
@property
def __a ( self :int) -> str:
return self._src_lang
@src_lang.setter
def __a ( self :Dict , _lowercase :str) -> None:
UpperCAmelCase_ = new_src_lang
self.set_src_lang_special_tokens(self._src_lang)
def __a ( self :Optional[Any] , _lowercase :str) -> List[str]:
return self.sp_model.encode(_lowercase , out_type=_lowercase)
def __a ( self :Optional[int] , _lowercase :Tuple) -> List[str]:
if token in self.lang_token_to_id:
return self.lang_token_to_id[token]
return self.encoder.get(_lowercase , self.encoder[self.unk_token])
def __a ( self :int , _lowercase :int) -> str:
if index in self.id_to_lang_token:
return self.id_to_lang_token[index]
return self.decoder.get(_lowercase , self.unk_token)
def __a ( self :str , _lowercase :Any) -> Any:
UpperCAmelCase_ = []
UpperCAmelCase_ = ''''''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(_lowercase) + token
UpperCAmelCase_ = []
else:
current_sub_tokens.append(_lowercase)
out_string += self.sp_model.decode(_lowercase)
return out_string.strip()
def __a ( self :Dict , _lowercase :List[int] , _lowercase :Optional[List[int]] = None , _lowercase :bool = False) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowercase , token_ids_a=_lowercase , already_has_special_tokens=_lowercase)
UpperCAmelCase_ = [1] * len(self.prefix_tokens)
UpperCAmelCase_ = [1] * len(self.suffix_tokens)
if token_ids_a is None:
return prefix_ones + ([0] * len(_lowercase)) + suffix_ones
return prefix_ones + ([0] * len(_lowercase)) + ([0] * len(_lowercase)) + suffix_ones
def __a ( self :List[Any] , _lowercase :List[int] , _lowercase :Optional[List[int]] = None) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def __a ( self :List[Any]) -> Dict:
UpperCAmelCase_ = {self.convert_ids_to_tokens(_lowercase): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def __getstate__( self :List[str]) -> Dict:
UpperCAmelCase_ = self.__dict__.copy()
UpperCAmelCase_ = None
return state
def __setstate__( self :str , _lowercase :Dict) -> None:
UpperCAmelCase_ = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs'''):
UpperCAmelCase_ = {}
UpperCAmelCase_ = load_spm(self.spm_file , self.sp_model_kwargs)
def __a ( self :Tuple , _lowercase :str , _lowercase :Optional[str] = None) -> Tuple[str]:
UpperCAmelCase_ = Path(_lowercase)
if not save_dir.is_dir():
raise OSError(f"{save_directory} should be a directory")
UpperCAmelCase_ = save_dir / (
(filename_prefix + '''-''' if filename_prefix else '''''') + self.vocab_files_names['''vocab_file''']
)
UpperCAmelCase_ = save_dir / (
(filename_prefix + '''-''' if filename_prefix else '''''') + self.vocab_files_names['''spm_file''']
)
save_json(self.encoder , _lowercase)
if os.path.abspath(self.spm_file) != os.path.abspath(_lowercase) and os.path.isfile(self.spm_file):
copyfile(self.spm_file , _lowercase)
elif not os.path.isfile(self.spm_file):
with open(_lowercase , '''wb''') as fi:
UpperCAmelCase_ = self.sp_model.serialized_model_proto()
fi.write(_lowercase)
return (str(_lowercase), str(_lowercase))
def __a ( self :Union[str, Any] , _lowercase :List[str] , _lowercase :str = "en" , _lowercase :Optional[List[str]] = None , _lowercase :str = "ro" , **_lowercase :Tuple , ) -> BatchEncoding:
UpperCAmelCase_ = src_lang
UpperCAmelCase_ = tgt_lang
self.set_src_lang_special_tokens(self.src_lang)
return super().prepare_seqaseq_batch(_lowercase , _lowercase , **_lowercase)
def __a ( self :int , _lowercase :Dict , _lowercase :Optional[str] , _lowercase :Optional[str] , **_lowercase :List[str]) -> Dict:
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''')
UpperCAmelCase_ = src_lang
UpperCAmelCase_ = self(_lowercase , add_special_tokens=_lowercase , **_lowercase)
UpperCAmelCase_ = self.get_lang_id(_lowercase)
UpperCAmelCase_ = tgt_lang_id
return inputs
def __a ( self :Optional[Any]) -> Optional[int]:
self.set_src_lang_special_tokens(self.src_lang)
def __a ( self :List[str]) -> Tuple:
self.set_tgt_lang_special_tokens(self.tgt_lang)
def __a ( self :Optional[Any] , _lowercase :str) -> None:
UpperCAmelCase_ = self.get_lang_token(_lowercase)
UpperCAmelCase_ = self.lang_token_to_id[lang_token]
UpperCAmelCase_ = [self.cur_lang_id]
UpperCAmelCase_ = [self.eos_token_id]
def __a ( self :str , _lowercase :str) -> None:
UpperCAmelCase_ = self.get_lang_token(_lowercase)
UpperCAmelCase_ = self.lang_token_to_id[lang_token]
UpperCAmelCase_ = [self.cur_lang_id]
UpperCAmelCase_ = [self.eos_token_id]
def __a ( self :Dict , _lowercase :str) -> str:
return self.lang_code_to_token[lang]
def __a ( self :Union[str, Any] , _lowercase :str) -> int:
UpperCAmelCase_ = self.get_lang_token(_lowercase)
return self.lang_token_to_id[lang_token]
def A ( __UpperCAmelCase , __UpperCAmelCase ) -> sentencepiece.SentencePieceProcessor:
'''simple docstring'''
UpperCAmelCase_ = sentencepiece.SentencePieceProcessor(**__UpperCAmelCase )
spm.Load(str(__UpperCAmelCase ) )
return spm
def A ( __UpperCAmelCase ) -> Union[Dict, List]:
'''simple docstring'''
with open(__UpperCAmelCase , '''r''' ) as f:
return json.load(__UpperCAmelCase )
def A ( __UpperCAmelCase , __UpperCAmelCase ) -> None:
'''simple docstring'''
with open(__UpperCAmelCase , '''w''' ) as f:
json.dump(__UpperCAmelCase , __UpperCAmelCase , indent=2 )
| 344 |
def A ( __UpperCAmelCase = 100_0000 ) -> int:
'''simple docstring'''
UpperCAmelCase_ = [i - 1 for i in range(limit + 1 )]
for i in range(2 , limit + 1 ):
if phi[i] == i - 1:
for j in range(2 * i , limit + 1 , __UpperCAmelCase ):
phi[j] -= phi[j] // i
return sum(phi[2 : limit + 1] )
if __name__ == "__main__":
print(solution())
| 344 | 1 |
import inspect
import unittest
from transformers import SegformerConfig, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_MAPPING,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerModel,
)
from transformers.models.segformer.modeling_segformer import SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import SegformerImageProcessor
class a_ ( _snake_case ):
def __a ( self :int) -> int:
UpperCAmelCase_ = self.config_class(**self.inputs_dict)
self.parent.assertTrue(hasattr(_lowercase , '''hidden_sizes'''))
self.parent.assertTrue(hasattr(_lowercase , '''num_attention_heads'''))
self.parent.assertTrue(hasattr(_lowercase , '''num_encoder_blocks'''))
class a_ :
def __init__( self :Optional[Any] , _lowercase :List[str] , _lowercase :List[Any]=13 , _lowercase :str=64 , _lowercase :Union[str, Any]=3 , _lowercase :int=4 , _lowercase :Optional[Any]=[2, 2, 2, 2] , _lowercase :str=[8, 4, 2, 1] , _lowercase :str=[16, 32, 64, 128] , _lowercase :Optional[int]=[1, 4, 8, 16] , _lowercase :Dict=[1, 2, 4, 8] , _lowercase :List[str]=True , _lowercase :List[Any]=True , _lowercase :Optional[Any]="gelu" , _lowercase :Any=0.1 , _lowercase :Any=0.1 , _lowercase :List[Any]=0.02 , _lowercase :List[str]=3 , _lowercase :List[str]=None , ) -> List[str]:
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = image_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = num_encoder_blocks
UpperCAmelCase_ = sr_ratios
UpperCAmelCase_ = depths
UpperCAmelCase_ = hidden_sizes
UpperCAmelCase_ = downsampling_rates
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = is_training
UpperCAmelCase_ = use_labels
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = num_labels
UpperCAmelCase_ = scope
def __a ( self :List[str]) -> Dict:
UpperCAmelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
UpperCAmelCase_ = None
if self.use_labels:
UpperCAmelCase_ = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels)
UpperCAmelCase_ = self.get_config()
return config, pixel_values, labels
def __a ( self :Union[str, Any]) -> int:
return SegformerConfig(
image_size=self.image_size , num_channels=self.num_channels , num_encoder_blocks=self.num_encoder_blocks , depths=self.depths , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def __a ( self :List[Any] , _lowercase :str , _lowercase :List[str] , _lowercase :Optional[Any]) -> Tuple:
UpperCAmelCase_ = SegformerModel(config=_lowercase)
model.to(_lowercase)
model.eval()
UpperCAmelCase_ = model(_lowercase)
UpperCAmelCase_ = UpperCAmelCase_ = self.image_size // (self.downsampling_rates[-1] * 2)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], expected_height, expected_width))
def __a ( self :List[Any] , _lowercase :Any , _lowercase :int , _lowercase :Optional[int]) -> Dict:
UpperCAmelCase_ = self.num_labels
UpperCAmelCase_ = SegformerForSemanticSegmentation(_lowercase)
model.to(_lowercase)
model.eval()
UpperCAmelCase_ = model(_lowercase)
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4))
UpperCAmelCase_ = model(_lowercase , labels=_lowercase)
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4))
self.parent.assertGreater(result.loss , 0.0)
def __a ( self :List[str] , _lowercase :Dict , _lowercase :Dict , _lowercase :Dict) -> Tuple:
UpperCAmelCase_ = 1
UpperCAmelCase_ = SegformerForSemanticSegmentation(config=_lowercase)
model.to(_lowercase)
model.eval()
UpperCAmelCase_ = torch.randint(0 , 1 , (self.batch_size, self.image_size, self.image_size)).to(_lowercase)
UpperCAmelCase_ = model(_lowercase , labels=_lowercase)
self.parent.assertGreater(result.loss , 0.0)
def __a ( self :Dict) -> str:
UpperCAmelCase_ = self.prepare_config_and_inputs()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = config_and_inputs
UpperCAmelCase_ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class a_ ( _snake_case , _snake_case , unittest.TestCase ):
UpperCamelCase__ : List[str] =(
(
SegformerModel,
SegformerForSemanticSegmentation,
SegformerForImageClassification,
)
if is_torch_available()
else ()
)
UpperCamelCase__ : Optional[Any] =(
{
"feature-extraction": SegformerModel,
"image-classification": SegformerForImageClassification,
"image-segmentation": SegformerForSemanticSegmentation,
}
if is_torch_available()
else {}
)
UpperCamelCase__ : Optional[int] =True
UpperCamelCase__ : List[Any] =False
UpperCamelCase__ : List[str] =False
UpperCamelCase__ : int =False
def __a ( self :Optional[int]) -> List[Any]:
UpperCAmelCase_ = SegformerModelTester(self)
UpperCAmelCase_ = SegformerConfigTester(self , config_class=_lowercase)
def __a ( self :Optional[int]) -> str:
self.config_tester.run_common_tests()
def __a ( self :int) -> int:
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowercase)
def __a ( self :Union[str, Any]) -> Union[str, Any]:
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_binary_image_segmentation(*_lowercase)
def __a ( self :List[Any]) -> int:
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_segmentation(*_lowercase)
@unittest.skip('''SegFormer does not use inputs_embeds''')
def __a ( self :str) -> Union[str, Any]:
pass
@unittest.skip('''SegFormer does not have get_input_embeddings method and get_output_embeddings methods''')
def __a ( self :Tuple) -> List[str]:
pass
def __a ( self :int) -> Optional[int]:
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(_lowercase)
UpperCAmelCase_ = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ = [*signature.parameters.keys()]
UpperCAmelCase_ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _lowercase)
def __a ( self :Dict) -> Dict:
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ = True
for model_class in self.all_model_classes:
UpperCAmelCase_ = True
UpperCAmelCase_ = False
UpperCAmelCase_ = True
UpperCAmelCase_ = model_class(_lowercase)
model.to(_lowercase)
model.eval()
with torch.no_grad():
UpperCAmelCase_ = model(**self._prepare_for_class(_lowercase , _lowercase))
UpperCAmelCase_ = outputs.attentions
UpperCAmelCase_ = sum(self.model_tester.depths)
self.assertEqual(len(_lowercase) , _lowercase)
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
UpperCAmelCase_ = True
UpperCAmelCase_ = model_class(_lowercase)
model.to(_lowercase)
model.eval()
with torch.no_grad():
UpperCAmelCase_ = model(**self._prepare_for_class(_lowercase , _lowercase))
UpperCAmelCase_ = outputs.attentions
self.assertEqual(len(_lowercase) , _lowercase)
# verify the first attentions (first block, first layer)
UpperCAmelCase_ = (self.model_tester.image_size // 4) ** 2
UpperCAmelCase_ = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
# verify the last attentions (last block, last layer)
UpperCAmelCase_ = (self.model_tester.image_size // 32) ** 2
UpperCAmelCase_ = (self.model_tester.image_size // (32 * self.model_tester.sr_ratios[-1])) ** 2
self.assertListEqual(
list(attentions[-1].shape[-3:]) , [self.model_tester.num_attention_heads[-1], expected_seq_len, expected_reduced_seq_len] , )
UpperCAmelCase_ = len(_lowercase)
# Check attention is always last and order is fine
UpperCAmelCase_ = True
UpperCAmelCase_ = True
UpperCAmelCase_ = model_class(_lowercase)
model.to(_lowercase)
model.eval()
with torch.no_grad():
UpperCAmelCase_ = model(**self._prepare_for_class(_lowercase , _lowercase))
self.assertEqual(out_len + 1 , len(_lowercase))
UpperCAmelCase_ = outputs.attentions
self.assertEqual(len(_lowercase) , _lowercase)
# verify the first attentions (first block, first layer)
UpperCAmelCase_ = (self.model_tester.image_size // 4) ** 2
UpperCAmelCase_ = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(self_attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
def __a ( self :Tuple) -> List[str]:
def check_hidden_states_output(_lowercase :List[str] , _lowercase :Optional[int] , _lowercase :Dict):
UpperCAmelCase_ = model_class(_lowercase)
model.to(_lowercase)
model.eval()
with torch.no_grad():
UpperCAmelCase_ = model(**self._prepare_for_class(_lowercase , _lowercase))
UpperCAmelCase_ = outputs.hidden_states
UpperCAmelCase_ = self.model_tester.num_encoder_blocks
self.assertEqual(len(_lowercase) , _lowercase)
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:]) , [
self.model_tester.hidden_sizes[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = True
check_hidden_states_output(_lowercase , _lowercase , _lowercase)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase_ = True
check_hidden_states_output(_lowercase , _lowercase , _lowercase)
def __a ( self :Union[str, Any]) -> List[Any]:
if not self.model_tester.is_training:
return
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ = True
for model_class in self.all_model_classes:
if model_class in get_values(_lowercase):
continue
UpperCAmelCase_ = model_class(_lowercase)
model.to(_lowercase)
model.train()
UpperCAmelCase_ = self._prepare_for_class(_lowercase , _lowercase , return_labels=_lowercase)
UpperCAmelCase_ = model(**_lowercase).loss
loss.backward()
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''')
def __a ( self :str) -> str:
pass
@slow
def __a ( self :List[str]) -> int:
for model_name in SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ = SegformerModel.from_pretrained(_lowercase)
self.assertIsNotNone(_lowercase)
def A ( ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
class a_ ( unittest.TestCase ):
@slow
def __a ( self :Optional[int]) -> List[Any]:
# only resize + normalize
UpperCAmelCase_ = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=_lowercase , align=_lowercase , do_random_crop=_lowercase)
UpperCAmelCase_ = SegformerForSemanticSegmentation.from_pretrained('''nvidia/segformer-b0-finetuned-ade-512-512''').to(
_lowercase)
UpperCAmelCase_ = prepare_img()
UpperCAmelCase_ = image_processor(images=_lowercase , return_tensors='''pt''')
UpperCAmelCase_ = encoded_inputs.pixel_values.to(_lowercase)
with torch.no_grad():
UpperCAmelCase_ = model(_lowercase)
UpperCAmelCase_ = torch.Size((1, model.config.num_labels, 128, 128))
self.assertEqual(outputs.logits.shape , _lowercase)
UpperCAmelCase_ = torch.tensor(
[
[[-4.6_310, -5.5_232, -6.2_356], [-5.1_921, -6.1_444, -6.5_996], [-5.4_424, -6.2_790, -6.7_574]],
[[-12.1_391, -13.3_122, -13.9_554], [-12.8_732, -13.9_352, -14.3_563], [-12.9_438, -13.8_226, -14.2_513]],
[[-12.5_134, -13.4_686, -14.4_915], [-12.8_669, -14.4_343, -14.7_758], [-13.2_523, -14.5_819, -15.0_694]],
]).to(_lowercase)
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , _lowercase , atol=1E-4))
@slow
def __a ( self :Optional[Any]) -> Tuple:
# only resize + normalize
UpperCAmelCase_ = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=_lowercase , align=_lowercase , do_random_crop=_lowercase)
UpperCAmelCase_ = SegformerForSemanticSegmentation.from_pretrained(
'''nvidia/segformer-b1-finetuned-cityscapes-1024-1024''').to(_lowercase)
UpperCAmelCase_ = prepare_img()
UpperCAmelCase_ = image_processor(images=_lowercase , return_tensors='''pt''')
UpperCAmelCase_ = encoded_inputs.pixel_values.to(_lowercase)
with torch.no_grad():
UpperCAmelCase_ = model(_lowercase)
UpperCAmelCase_ = torch.Size((1, model.config.num_labels, 128, 128))
self.assertEqual(outputs.logits.shape , _lowercase)
UpperCAmelCase_ = torch.tensor(
[
[[-13.5_748, -13.9_111, -12.6_500], [-14.3_500, -15.3_683, -14.2_328], [-14.7_532, -16.0_424, -15.6_087]],
[[-17.1_651, -15.8_725, -12.9_653], [-17.2_580, -17.3_718, -14.8_223], [-16.6_058, -16.8_783, -16.7_452]],
[[-3.6_456, -3.0_209, -1.4_203], [-3.0_797, -3.1_959, -2.0_000], [-1.8_757, -1.9_217, -1.6_997]],
]).to(_lowercase)
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , _lowercase , atol=1E-1))
@slow
def __a ( self :Tuple) -> str:
# only resize + normalize
UpperCAmelCase_ = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=_lowercase , align=_lowercase , do_random_crop=_lowercase)
UpperCAmelCase_ = SegformerForSemanticSegmentation.from_pretrained('''nvidia/segformer-b0-finetuned-ade-512-512''').to(
_lowercase)
UpperCAmelCase_ = prepare_img()
UpperCAmelCase_ = image_processor(images=_lowercase , return_tensors='''pt''')
UpperCAmelCase_ = encoded_inputs.pixel_values.to(_lowercase)
with torch.no_grad():
UpperCAmelCase_ = model(_lowercase)
UpperCAmelCase_ = outputs.logits.detach().cpu()
UpperCAmelCase_ = image_processor.post_process_semantic_segmentation(outputs=_lowercase , target_sizes=[(500, 300)])
UpperCAmelCase_ = torch.Size((500, 300))
self.assertEqual(segmentation[0].shape , _lowercase)
UpperCAmelCase_ = image_processor.post_process_semantic_segmentation(outputs=_lowercase)
UpperCAmelCase_ = torch.Size((128, 128))
self.assertEqual(segmentation[0].shape , _lowercase)
| 344 |
import tempfile
import torch
from diffusers import PNDMScheduler
from .test_schedulers import SchedulerCommonTest
class a_ ( _snake_case ):
UpperCamelCase__ : List[Any] =(PNDMScheduler,)
UpperCamelCase__ : Optional[Any] =(("num_inference_steps", 50),)
def __a ( self :Union[str, Any] , **_lowercase :Any) -> Union[str, Any]:
UpperCAmelCase_ = {
'''num_train_timesteps''': 1000,
'''beta_start''': 0.0_001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
}
config.update(**_lowercase)
return config
def __a ( self :str , _lowercase :List[Any]=0 , **_lowercase :str) -> Union[str, Any]:
UpperCAmelCase_ = dict(self.forward_default_kwargs)
UpperCAmelCase_ = kwargs.pop('''num_inference_steps''' , _lowercase)
UpperCAmelCase_ = self.dummy_sample
UpperCAmelCase_ = 0.1 * sample
UpperCAmelCase_ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
UpperCAmelCase_ = self.get_scheduler_config(**_lowercase)
UpperCAmelCase_ = scheduler_class(**_lowercase)
scheduler.set_timesteps(_lowercase)
# copy over dummy past residuals
UpperCAmelCase_ = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_lowercase)
UpperCAmelCase_ = scheduler_class.from_pretrained(_lowercase)
new_scheduler.set_timesteps(_lowercase)
# copy over dummy past residuals
UpperCAmelCase_ = dummy_past_residuals[:]
UpperCAmelCase_ = scheduler.step_prk(_lowercase , _lowercase , _lowercase , **_lowercase).prev_sample
UpperCAmelCase_ = new_scheduler.step_prk(_lowercase , _lowercase , _lowercase , **_lowercase).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical"
UpperCAmelCase_ = scheduler.step_plms(_lowercase , _lowercase , _lowercase , **_lowercase).prev_sample
UpperCAmelCase_ = new_scheduler.step_plms(_lowercase , _lowercase , _lowercase , **_lowercase).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical"
def __a ( self :Any) -> Optional[Any]:
pass
def __a ( self :str , _lowercase :int=0 , **_lowercase :Union[str, Any]) -> List[Any]:
UpperCAmelCase_ = dict(self.forward_default_kwargs)
UpperCAmelCase_ = kwargs.pop('''num_inference_steps''' , _lowercase)
UpperCAmelCase_ = self.dummy_sample
UpperCAmelCase_ = 0.1 * sample
UpperCAmelCase_ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
UpperCAmelCase_ = self.get_scheduler_config()
UpperCAmelCase_ = scheduler_class(**_lowercase)
scheduler.set_timesteps(_lowercase)
# copy over dummy past residuals (must be after setting timesteps)
UpperCAmelCase_ = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_lowercase)
UpperCAmelCase_ = scheduler_class.from_pretrained(_lowercase)
# copy over dummy past residuals
new_scheduler.set_timesteps(_lowercase)
# copy over dummy past residual (must be after setting timesteps)
UpperCAmelCase_ = dummy_past_residuals[:]
UpperCAmelCase_ = scheduler.step_prk(_lowercase , _lowercase , _lowercase , **_lowercase).prev_sample
UpperCAmelCase_ = new_scheduler.step_prk(_lowercase , _lowercase , _lowercase , **_lowercase).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical"
UpperCAmelCase_ = scheduler.step_plms(_lowercase , _lowercase , _lowercase , **_lowercase).prev_sample
UpperCAmelCase_ = new_scheduler.step_plms(_lowercase , _lowercase , _lowercase , **_lowercase).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical"
def __a ( self :int , **_lowercase :str) -> Optional[Any]:
UpperCAmelCase_ = self.scheduler_classes[0]
UpperCAmelCase_ = self.get_scheduler_config(**_lowercase)
UpperCAmelCase_ = scheduler_class(**_lowercase)
UpperCAmelCase_ = 10
UpperCAmelCase_ = self.dummy_model()
UpperCAmelCase_ = self.dummy_sample_deter
scheduler.set_timesteps(_lowercase)
for i, t in enumerate(scheduler.prk_timesteps):
UpperCAmelCase_ = model(_lowercase , _lowercase)
UpperCAmelCase_ = scheduler.step_prk(_lowercase , _lowercase , _lowercase).prev_sample
for i, t in enumerate(scheduler.plms_timesteps):
UpperCAmelCase_ = model(_lowercase , _lowercase)
UpperCAmelCase_ = scheduler.step_plms(_lowercase , _lowercase , _lowercase).prev_sample
return sample
def __a ( self :Union[str, Any]) -> int:
UpperCAmelCase_ = dict(self.forward_default_kwargs)
UpperCAmelCase_ = kwargs.pop('''num_inference_steps''' , _lowercase)
for scheduler_class in self.scheduler_classes:
UpperCAmelCase_ = self.get_scheduler_config()
UpperCAmelCase_ = scheduler_class(**_lowercase)
UpperCAmelCase_ = self.dummy_sample
UpperCAmelCase_ = 0.1 * sample
if num_inference_steps is not None and hasattr(_lowercase , '''set_timesteps'''):
scheduler.set_timesteps(_lowercase)
elif num_inference_steps is not None and not hasattr(_lowercase , '''set_timesteps'''):
UpperCAmelCase_ = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
UpperCAmelCase_ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
UpperCAmelCase_ = dummy_past_residuals[:]
UpperCAmelCase_ = scheduler.step_prk(_lowercase , 0 , _lowercase , **_lowercase).prev_sample
UpperCAmelCase_ = scheduler.step_prk(_lowercase , 1 , _lowercase , **_lowercase).prev_sample
self.assertEqual(output_a.shape , sample.shape)
self.assertEqual(output_a.shape , output_a.shape)
UpperCAmelCase_ = scheduler.step_plms(_lowercase , 0 , _lowercase , **_lowercase).prev_sample
UpperCAmelCase_ = scheduler.step_plms(_lowercase , 1 , _lowercase , **_lowercase).prev_sample
self.assertEqual(output_a.shape , sample.shape)
self.assertEqual(output_a.shape , output_a.shape)
def __a ( self :Any) -> Dict:
for timesteps in [100, 1000]:
self.check_over_configs(num_train_timesteps=_lowercase)
def __a ( self :List[Any]) -> Any:
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=_lowercase)
UpperCAmelCase_ = self.scheduler_classes[0]
UpperCAmelCase_ = self.get_scheduler_config(steps_offset=1)
UpperCAmelCase_ = scheduler_class(**_lowercase)
scheduler.set_timesteps(10)
assert torch.equal(
scheduler.timesteps , torch.LongTensor(
[901, 851, 851, 801, 801, 751, 751, 701, 701, 651, 651, 601, 601, 501, 401, 301, 201, 101, 1]) , )
def __a ( self :Optional[int]) -> str:
for beta_start, beta_end in zip([0.0_001, 0.001] , [0.002, 0.02]):
self.check_over_configs(beta_start=_lowercase , beta_end=_lowercase)
def __a ( self :Any) -> List[str]:
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=_lowercase)
def __a ( self :List[Any]) -> Dict:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_lowercase)
def __a ( self :Any) -> Tuple:
for t in [1, 5, 10]:
self.check_over_forward(time_step=_lowercase)
def __a ( self :Tuple) -> Dict:
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100]):
self.check_over_forward(num_inference_steps=_lowercase)
def __a ( self :str) -> List[Any]:
# earlier version of set_timesteps() caused an error indexing alpha's with inference steps as power of 3
UpperCAmelCase_ = 27
for scheduler_class in self.scheduler_classes:
UpperCAmelCase_ = self.dummy_sample
UpperCAmelCase_ = 0.1 * sample
UpperCAmelCase_ = self.get_scheduler_config()
UpperCAmelCase_ = scheduler_class(**_lowercase)
scheduler.set_timesteps(_lowercase)
# before power of 3 fix, would error on first step, so we only need to do two
for i, t in enumerate(scheduler.prk_timesteps[:2]):
UpperCAmelCase_ = scheduler.step_prk(_lowercase , _lowercase , _lowercase).prev_sample
def __a ( self :List[str]) -> int:
with self.assertRaises(_lowercase):
UpperCAmelCase_ = self.scheduler_classes[0]
UpperCAmelCase_ = self.get_scheduler_config()
UpperCAmelCase_ = scheduler_class(**_lowercase)
scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample).prev_sample
def __a ( self :List[str]) -> Dict:
UpperCAmelCase_ = self.full_loop()
UpperCAmelCase_ = torch.sum(torch.abs(_lowercase))
UpperCAmelCase_ = torch.mean(torch.abs(_lowercase))
assert abs(result_sum.item() - 198.1_318) < 1E-2
assert abs(result_mean.item() - 0.2_580) < 1E-3
def __a ( self :Any) -> Tuple:
UpperCAmelCase_ = self.full_loop(prediction_type='''v_prediction''')
UpperCAmelCase_ = torch.sum(torch.abs(_lowercase))
UpperCAmelCase_ = torch.mean(torch.abs(_lowercase))
assert abs(result_sum.item() - 67.3_986) < 1E-2
assert abs(result_mean.item() - 0.0_878) < 1E-3
def __a ( self :int) -> Any:
# We specify different beta, so that the first alpha is 0.99
UpperCAmelCase_ = self.full_loop(set_alpha_to_one=_lowercase , beta_start=0.01)
UpperCAmelCase_ = torch.sum(torch.abs(_lowercase))
UpperCAmelCase_ = torch.mean(torch.abs(_lowercase))
assert abs(result_sum.item() - 230.0_399) < 1E-2
assert abs(result_mean.item() - 0.2_995) < 1E-3
def __a ( self :Any) -> Dict:
# We specify different beta, so that the first alpha is 0.99
UpperCAmelCase_ = self.full_loop(set_alpha_to_one=_lowercase , beta_start=0.01)
UpperCAmelCase_ = torch.sum(torch.abs(_lowercase))
UpperCAmelCase_ = torch.mean(torch.abs(_lowercase))
assert abs(result_sum.item() - 186.9_482) < 1E-2
assert abs(result_mean.item() - 0.2_434) < 1E-3
| 344 | 1 |
import math
import os
import re
import sys
import unittest
from pathlib import Path
from typing import Tuple
from unittest.mock import patch
from parameterized import parameterized
from transformers.testing_utils import (
CaptureStderr,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
get_torch_dist_unique_port,
require_apex,
require_bitsandbytes,
require_fairscale,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
require_torch_non_multi_gpu,
slow,
)
from transformers.trainer_callback import TrainerState
from transformers.trainer_utils import set_seed
UpperCamelCase_ = os.path.abspath(os.path.dirname(__file__))
with ExtendSysPath(f"{bindir}/../../examples/pytorch/translation"):
from run_translation import main # noqa
set_seed(42)
UpperCamelCase_ = "sshleifer/student_marian_en_ro_6_1"
UpperCamelCase_ = "sshleifer/tiny-mbart"
@require_torch
class a_ ( _snake_case ):
def __a ( self :str , _lowercase :Any=False , _lowercase :Tuple=None , _lowercase :Dict=True , _lowercase :Tuple=True , _lowercase :List[Any]=True , _lowercase :List[str]=True , ) -> int:
UpperCAmelCase_ = self.run_trainer(
eval_steps=1 , max_len=12 , model_name=_lowercase , num_train_epochs=1 , distributed=_lowercase , extra_args_str=_lowercase , predict_with_generate=_lowercase , do_train=_lowercase , do_eval=_lowercase , do_predict=_lowercase , )
UpperCAmelCase_ = TrainerState.load_from_json(os.path.join(_lowercase , '''trainer_state.json''')).log_history
if not do_eval:
return
UpperCAmelCase_ = [log for log in logs if '''eval_loss''' in log.keys()]
UpperCAmelCase_ = eval_metrics[0]
if predict_with_generate:
assert "eval_bleu" in first_step_stats
UpperCAmelCase_ = eval_metrics[-1]
assert isinstance(last_step_stats['''eval_bleu'''] , _lowercase)
assert not math.isnan(float(last_step_stats['''eval_loss'''])), "eval_loss must not be `nan`"
@require_torch_non_multi_gpu
def __a ( self :Dict) -> str:
self.run_seqaseq_quick()
@require_torch_multi_gpu
def __a ( self :Any) -> int:
self.run_seqaseq_quick(distributed=_lowercase)
@require_torch_multi_gpu
def __a ( self :int) -> Any:
self.run_seqaseq_quick(distributed=_lowercase)
@unittest.skip('''Requires an update of the env running those tests''')
@require_torch_multi_gpu
@require_fairscale
def __a ( self :Tuple) -> Any:
self.run_seqaseq_quick(distributed=_lowercase , extra_args_str='''--sharded_ddp simple''')
@unittest.skip('''Requires an update of the env running those tests''')
@require_torch_multi_gpu
@require_fairscale
def __a ( self :Tuple) -> List[str]:
self.run_seqaseq_quick(distributed=_lowercase , extra_args_str='''--sharded_ddp simple --fp16''')
@unittest.skip('''Requires an update of the env running those tests''')
@require_torch_multi_gpu
@require_fairscale
def __a ( self :Union[str, Any]) -> Any:
self.run_seqaseq_quick(distributed=_lowercase , extra_args_str='''--sharded_ddp zero_dp_2''' , predict_with_generate=_lowercase)
@unittest.skip('''Requires an update of the env running those tests''')
@require_torch_multi_gpu
@require_fairscale
def __a ( self :int) -> Any:
self.run_seqaseq_quick(
distributed=_lowercase , extra_args_str='''--sharded_ddp zero_dp_2 --fp16''' , predict_with_generate=_lowercase)
@require_apex
@require_torch_gpu
def __a ( self :Tuple) -> str:
# XXX: apex breaks the trainer if it's run twice e.g. run_seq2seq.main() from the same
# program and it breaks other tests that run from the same pytest worker, therefore until this is
# sorted out it must be run only in an external program, that is distributed=True in this
# test and only under one or more gpus - if we want cpu will need to make a special test
#
# specifically to the problem traced it to self.optimizer.step() - if it's run 2nd time via
# 2nd main() call it botches the future eval.
#
self.run_seqaseq_quick(distributed=_lowercase , extra_args_str='''--fp16 --fp16_backend=apex''')
# test 2nd time - was getting eval_loss': nan'
# to reproduce the problem set distributed=False
self.run_seqaseq_quick(distributed=_lowercase , extra_args_str='''--fp16 --fp16_backend=apex''')
@parameterized.expand(['''base''', '''low''', '''high''', '''mixed'''])
@require_torch_multi_gpu
def __a ( self :str , _lowercase :Any) -> List[str]:
# as each sub-test is slow-ish split into multiple sub-tests to avoid CI timeout
UpperCAmelCase_ = {
# test with the default log_level - should be info and thus log info once
'''base''': {'''extra_args_str''': '''''', '''n_matches''': 1},
# test with low log_level and log_level_replica - should be noisy on all processes
# now the info string should appear twice on 2 processes
'''low''': {'''extra_args_str''': '''--log_level debug --log_level_replica debug''', '''n_matches''': 2},
# test with high log_level and low log_level_replica
# now the info string should appear once only on the replica
'''high''': {'''extra_args_str''': '''--log_level error --log_level_replica debug''', '''n_matches''': 1},
# test with high log_level and log_level_replica - should be quiet on all processes
'''mixed''': {'''extra_args_str''': '''--log_level error --log_level_replica error''', '''n_matches''': 0},
}
UpperCAmelCase_ = experiments[experiment_id]
UpperCAmelCase_ = {'''distributed''': True, '''predict_with_generate''': False, '''do_eval''': False, '''do_predict''': False}
UpperCAmelCase_ = '''Running training'''
with CaptureStderr() as cl:
self.run_seqaseq_quick(**_lowercase , extra_args_str=data['''extra_args_str'''])
UpperCAmelCase_ = len(re.findall(_lowercase , cl.err))
self.assertEqual(_lowercase , data['''n_matches'''])
@slow
def __a ( self :Any) -> Dict:
UpperCAmelCase_ = self.run_trainer(
eval_steps=2 , max_len=128 , model_name=_lowercase , learning_rate=3E-4 , num_train_epochs=10 , distributed=_lowercase , )
# Check metrics
UpperCAmelCase_ = TrainerState.load_from_json(os.path.join(_lowercase , '''trainer_state.json''')).log_history
UpperCAmelCase_ = [log for log in logs if '''eval_loss''' in log.keys()]
UpperCAmelCase_ = eval_metrics[0]
UpperCAmelCase_ = eval_metrics[-1]
assert first_step_stats["eval_loss"] > last_step_stats["eval_loss"], "model learned nothing"
assert isinstance(last_step_stats['''eval_bleu'''] , _lowercase)
# test if do_predict saves generations and metrics
UpperCAmelCase_ = os.listdir(_lowercase)
UpperCAmelCase_ = {os.path.basename(_lowercase) for p in contents}
assert "generated_predictions.txt" in contents
assert "predict_results.json" in contents
@slow
@require_bitsandbytes
def __a ( self :List[str]) -> str:
from transformers.training_args import OptimizerNames
def train_and_return_metrics(_lowercase :str) -> Tuple[int, float]:
UpperCAmelCase_ = '''--skip_memory_metrics 0'''
UpperCAmelCase_ = self.run_trainer(
max_len=128 , model_name=_lowercase , learning_rate=3E-4 , num_train_epochs=1 , optim=_lowercase , distributed=_lowercase , extra_args_str=_lowercase , do_eval=_lowercase , do_predict=_lowercase , n_gpus_to_use=1 , )
# Check metrics
UpperCAmelCase_ = TrainerState.load_from_json(Path(_lowercase , '''trainer_state.json''')).log_history
UpperCAmelCase_ = int(logs[0]['''train_mem_gpu_peaked_delta'''] / 2**20)
UpperCAmelCase_ = int(logs[0]['''train_mem_gpu_alloc_delta'''] / 2**20)
UpperCAmelCase_ = logs[0]['''train_loss''']
return gpu_peak_mem_mb, gpu_alloc_mem_mb, loss
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = train_and_return_metrics(OptimizerNames.ADAMW_TORCH.value)
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = train_and_return_metrics(OptimizerNames.ADAMW_BNB.value)
UpperCAmelCase_ = gpu_alloc_mem_orig - gpu_alloc_mem_bnb
UpperCAmelCase_ = gpu_peak_mem_orig + gpu_alloc_mem_orig
UpperCAmelCase_ = gpu_peak_mem_bnb + gpu_alloc_mem_bnb
UpperCAmelCase_ = gpu_total_mem_orig - gpu_total_mem_bnb
# sshleifer/student_marian_en_ro_6_1 has 54M parameter, 29M of which is `nn.Embedding` which
# doesn't get quantized and remains in fp32. Therefore we only have 25M parameters quantized
# in 2 bytes and the diff in optim memory usage is derived as so:
#
# - normal 25*8=~200MB (8 bytes per param)
# - bnb 25*2= ~50MB (2 bytes per param)
#
# Thus we should expect ~150MB total memory saved.
#
# Peak memory should be the same - the total should be different by about that same margin
#
# After leaving a small margin to accommodate for differences between gpus let's check
# that we have at least 120MB in savings
UpperCAmelCase_ = 120
# uncomment the following if this test starts failing - requires py38 for a new print feature
# gpu_peak_mem_diff = gpu_peak_mem_orig - gpu_peak_mem_bnb
# print(f"{gpu_alloc_mem_orig=}MB {gpu_peak_mem_orig=}MB {gpu_alloc_mem_orig+gpu_peak_mem_orig=}MB")
# print(f" {gpu_alloc_mem_bnb=}MB {gpu_peak_mem_bnb=}MB {gpu_alloc_mem_bnb+gpu_peak_mem_bnb=}MB")
# print(f"{gpu_alloc_mem_diff=}MB")
# print(f"{gpu_peak_mem_diff=}MB")
# print(f"{gpu_total_mem_orig=}MB, {gpu_total_mem_bnb=}MB")
# print(f"{gpu_total_mem_diff=}MB, {gpu_total_mem_diff=}MB")
self.assertGreater(
_lowercase , _lowercase , '''should use ~150MB less alloc gpu memory with BNB, compared to without it for this model but got'''
f" a difference of {gpu_alloc_mem_diff}MB, with gpu_alloc_mem_orig={gpu_alloc_mem_orig}MB and"
f" gpu_alloc_mem_bnb={gpu_alloc_mem_bnb}MB" , )
self.assertGreater(
_lowercase , _lowercase , '''should use ~150MB less total gpu memory with BNB, compared to without it for this model but got'''
f" a difference of {gpu_total_mem_diff}MB, with gpu_total_mem_orig={gpu_total_mem_orig}MB and"
f" gpu_total_mem_bnb={gpu_total_mem_bnb}MB" , )
self.assertEqual(
_lowercase , _lowercase , f"loss should be the same, but got loss_orig={loss_orig}, loss_bnb={loss_bnb}")
def __a ( self :Any , _lowercase :int , _lowercase :str , _lowercase :int , _lowercase :float = 3E-3 , _lowercase :str = "adafactor" , _lowercase :bool = False , _lowercase :str = None , _lowercase :int = 0 , _lowercase :bool = True , _lowercase :bool = True , _lowercase :bool = True , _lowercase :bool = True , _lowercase :int = None , ) -> List[Any]:
UpperCAmelCase_ = self.test_file_dir / '''../fixtures/tests_samples/wmt_en_ro'''
UpperCAmelCase_ = self.get_auto_remove_tmp_dir()
UpperCAmelCase_ = f"\n --model_name_or_path {model_name}\n --train_file {data_dir}/train.json\n --validation_file {data_dir}/val.json\n --test_file {data_dir}/test.json\n --output_dir {output_dir}\n --overwrite_output_dir\n --max_train_samples 8\n --max_source_length {max_len}\n --max_target_length {max_len}\n --do_train\n --num_train_epochs {str(_lowercase)}\n --per_device_train_batch_size 4\n --learning_rate {learning_rate}\n --warmup_steps 8\n --logging_steps 0\n --logging_strategy no\n --save_steps {str(_lowercase)}\n --group_by_length\n --label_smoothing_factor 0.1\n --target_lang ro_RO\n --source_lang en_XX\n ".split()
UpperCAmelCase_ = f"\n --do_eval\n --per_device_eval_batch_size 4\n --max_eval_samples 8\n --val_max_target_length {max_len}\n --evaluation_strategy steps\n --eval_steps {str(_lowercase)}\n ".split()
UpperCAmelCase_ = '''
--do_predict
'''.split()
UpperCAmelCase_ = []
if do_train:
args += args_train
if do_eval:
args += args_eval
if do_predict:
args += args_predict
if predict_with_generate:
args += "--predict_with_generate".split()
if do_train:
if optim == "adafactor":
args += "--adafactor".split()
else:
args += f"--optim {optim}".split()
if extra_args_str is not None:
args += extra_args_str.split()
if distributed:
if n_gpus_to_use is None:
UpperCAmelCase_ = get_gpu_count()
UpperCAmelCase_ = get_torch_dist_unique_port()
UpperCAmelCase_ = f"\n -m torch.distributed.run\n --nproc_per_node={n_gpus_to_use}\n --master_port={master_port}\n {self.examples_dir_str}/pytorch/translation/run_translation.py\n ".split()
UpperCAmelCase_ = [sys.executable] + distributed_args + args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(_lowercase , env=self.get_env())
else:
UpperCAmelCase_ = ['''run_translation.py'''] + args
with patch.object(_lowercase , '''argv''' , _lowercase):
main()
return output_dir
| 344 |
import os
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen, xsplitext
from ..table import array_cast
from ..utils.py_utils import no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
from .features import FeatureType
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = False, False, False
@dataclass
class a_ :
UpperCamelCase__ : Optional[int] =None
UpperCamelCase__ : bool =True
UpperCamelCase__ : bool =True
UpperCamelCase__ : Optional[str] =None
# Automatically constructed
UpperCamelCase__ : ClassVar[str] ="dict"
UpperCamelCase__ : ClassVar[Any] =pa.struct({"bytes": pa.binary(), "path": pa.string()} )
UpperCamelCase__ : str =field(default="Audio" , init=_snake_case , repr=_snake_case )
def __call__( self :List[Any]) -> List[Any]:
return self.pa_type
def __a ( self :Any , _lowercase :Union[str, bytes, dict]) -> dict:
try:
import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files.
except ImportError as err:
raise ImportError('''To support encoding audio data, please install \'soundfile\'.''') from err
if isinstance(_lowercase , _lowercase):
return {"bytes": None, "path": value}
elif isinstance(_lowercase , _lowercase):
return {"bytes": value, "path": None}
elif "array" in value:
# convert the audio array to wav bytes
UpperCAmelCase_ = BytesIO()
sf.write(_lowercase , value['''array'''] , value['''sampling_rate'''] , format='''wav''')
return {"bytes": buffer.getvalue(), "path": None}
elif value.get('''path''') is not None and os.path.isfile(value['''path''']):
# we set "bytes": None to not duplicate the data if they're already available locally
if value["path"].endswith('''pcm'''):
# "PCM" only has raw audio bytes
if value.get('''sampling_rate''') is None:
# At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate
raise KeyError('''To use PCM files, please specify a \'sampling_rate\' in Audio object''')
if value.get('''bytes'''):
# If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!)
UpperCAmelCase_ = np.frombuffer(value['''bytes'''] , dtype=np.intaa).astype(np.floataa) / 32767
else:
UpperCAmelCase_ = np.memmap(value['''path'''] , dtype='''h''' , mode='''r''').astype(np.floataa) / 32767
UpperCAmelCase_ = BytesIO(bytes())
sf.write(_lowercase , _lowercase , value['''sampling_rate'''] , format='''wav''')
return {"bytes": buffer.getvalue(), "path": None}
else:
return {"bytes": None, "path": value.get('''path''')}
elif value.get('''bytes''') is not None or value.get('''path''') is not None:
# store the audio bytes, and path is used to infer the audio format using the file extension
return {"bytes": value.get('''bytes'''), "path": value.get('''path''')}
else:
raise ValueError(
f"An audio sample should have one of 'path' or 'bytes' but they are missing or None in {value}.")
def __a ( self :Dict , _lowercase :dict , _lowercase :Optional[Dict[str, Union[str, bool, None]]] = None) -> dict:
if not self.decode:
raise RuntimeError('''Decoding is disabled for this feature. Please use Audio(decode=True) instead.''')
UpperCAmelCase_ , UpperCAmelCase_ = (value['''path'''], BytesIO(value['''bytes'''])) if value['''bytes'''] is not None else (value['''path'''], None)
if path is None and file is None:
raise ValueError(f"An audio sample should have one of 'path' or 'bytes' but both are None in {value}.")
try:
import librosa
import soundfile as sf
except ImportError as err:
raise ImportError('''To support decoding audio files, please install \'librosa\' and \'soundfile\'.''') from err
UpperCAmelCase_ = xsplitext(_lowercase)[1][1:].lower() if path is not None else None
if not config.IS_OPUS_SUPPORTED and audio_format == "opus":
raise RuntimeError(
'''Decoding \'opus\' files requires system library \'libsndfile\'>=1.0.31, '''
'''You can try to update `soundfile` python library: `pip install "soundfile>=0.12.1"`. ''')
elif not config.IS_MP3_SUPPORTED and audio_format == "mp3":
raise RuntimeError(
'''Decoding \'mp3\' files requires system library \'libsndfile\'>=1.1.0, '''
'''You can try to update `soundfile` python library: `pip install "soundfile>=0.12.1"`. ''')
if file is None:
UpperCAmelCase_ = token_per_repo_id or {}
UpperCAmelCase_ = path.split('''::''')[-1]
try:
UpperCAmelCase_ = string_to_dict(_lowercase , config.HUB_DATASETS_URL)['''repo_id''']
UpperCAmelCase_ = token_per_repo_id[repo_id]
except (ValueError, KeyError):
UpperCAmelCase_ = None
with xopen(_lowercase , '''rb''' , use_auth_token=_lowercase) as f:
UpperCAmelCase_ , UpperCAmelCase_ = sf.read(_lowercase)
else:
UpperCAmelCase_ , UpperCAmelCase_ = sf.read(_lowercase)
UpperCAmelCase_ = array.T
if self.mono:
UpperCAmelCase_ = librosa.to_mono(_lowercase)
if self.sampling_rate and self.sampling_rate != sampling_rate:
UpperCAmelCase_ = librosa.resample(_lowercase , orig_sr=_lowercase , target_sr=self.sampling_rate)
UpperCAmelCase_ = self.sampling_rate
return {"path": path, "array": array, "sampling_rate": sampling_rate}
def __a ( self :Union[str, Any]) -> Union["FeatureType", Dict[str, "FeatureType"]]:
from .features import Value
if self.decode:
raise ValueError('''Cannot flatten a decoded Audio feature.''')
return {
"bytes": Value('''binary'''),
"path": Value('''string'''),
}
def __a ( self :int , _lowercase :Union[pa.StringArray, pa.StructArray]) -> pa.StructArray:
if pa.types.is_string(storage.type):
UpperCAmelCase_ = pa.array([None] * len(_lowercase) , type=pa.binary())
UpperCAmelCase_ = pa.StructArray.from_arrays([bytes_array, storage] , ['''bytes''', '''path'''] , mask=storage.is_null())
elif pa.types.is_binary(storage.type):
UpperCAmelCase_ = pa.array([None] * len(_lowercase) , type=pa.string())
UpperCAmelCase_ = pa.StructArray.from_arrays([storage, path_array] , ['''bytes''', '''path'''] , mask=storage.is_null())
elif pa.types.is_struct(storage.type) and storage.type.get_all_field_indices('''array'''):
UpperCAmelCase_ = pa.array([Audio().encode_example(_lowercase) if x is not None else None for x in storage.to_pylist()])
elif pa.types.is_struct(storage.type):
if storage.type.get_field_index('''bytes''') >= 0:
UpperCAmelCase_ = storage.field('''bytes''')
else:
UpperCAmelCase_ = pa.array([None] * len(_lowercase) , type=pa.binary())
if storage.type.get_field_index('''path''') >= 0:
UpperCAmelCase_ = storage.field('''path''')
else:
UpperCAmelCase_ = pa.array([None] * len(_lowercase) , type=pa.string())
UpperCAmelCase_ = pa.StructArray.from_arrays([bytes_array, path_array] , ['''bytes''', '''path'''] , mask=storage.is_null())
return array_cast(_lowercase , self.pa_type)
def __a ( self :Any , _lowercase :pa.StructArray) -> pa.StructArray:
@no_op_if_value_is_null
def path_to_bytes(_lowercase :Tuple):
with xopen(_lowercase , '''rb''') as f:
UpperCAmelCase_ = f.read()
return bytes_
UpperCAmelCase_ = pa.array(
[
(path_to_bytes(x['''path''']) if x['''bytes'''] is None else x['''bytes''']) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
UpperCAmelCase_ = pa.array(
[os.path.basename(_lowercase) if path is not None else None for path in storage.field('''path''').to_pylist()] , type=pa.string() , )
UpperCAmelCase_ = pa.StructArray.from_arrays([bytes_array, path_array] , ['''bytes''', '''path'''] , mask=bytes_array.is_null())
return array_cast(_lowercase , self.pa_type)
| 344 | 1 |
import unittest
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BridgeTowerImageProcessor
class a_ ( unittest.TestCase ):
def __init__( self :Tuple , _lowercase :List[Any] , _lowercase :bool = True , _lowercase :Dict[str, int] = None , _lowercase :int = 32 , _lowercase :bool = True , _lowercase :Union[int, float] = 1 / 255 , _lowercase :bool = True , _lowercase :bool = True , _lowercase :Optional[Union[float, List[float]]] = [0.48_145_466, 0.4_578_275, 0.40_821_073] , _lowercase :Optional[Union[float, List[float]]] = [0.26_862_954, 0.26_130_258, 0.27_577_711] , _lowercase :bool = True , _lowercase :List[Any]=7 , _lowercase :Dict=30 , _lowercase :Optional[int]=400 , _lowercase :Any=3 , ) -> Any:
UpperCAmelCase_ = parent
UpperCAmelCase_ = do_resize
UpperCAmelCase_ = size if size is not None else {'''shortest_edge''': 288}
UpperCAmelCase_ = size_divisor
UpperCAmelCase_ = do_rescale
UpperCAmelCase_ = rescale_factor
UpperCAmelCase_ = do_normalize
UpperCAmelCase_ = do_center_crop
UpperCAmelCase_ = image_mean
UpperCAmelCase_ = image_std
UpperCAmelCase_ = do_pad
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = min_resolution
UpperCAmelCase_ = max_resolution
def __a ( self :str) -> Tuple:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"size_divisor": self.size_divisor,
}
def __a ( self :List[Any] , _lowercase :Tuple , _lowercase :List[str]=False) -> int:
if not batched:
UpperCAmelCase_ = self.size['''shortest_edge''']
UpperCAmelCase_ = image_inputs[0]
if isinstance(_lowercase , Image.Image):
UpperCAmelCase_ , UpperCAmelCase_ = image.size
else:
UpperCAmelCase_ , UpperCAmelCase_ = image.shape[1], image.shape[2]
UpperCAmelCase_ = size / min(_lowercase , _lowercase)
if h < w:
UpperCAmelCase_ , UpperCAmelCase_ = size, scale * w
else:
UpperCAmelCase_ , UpperCAmelCase_ = scale * h, size
UpperCAmelCase_ = int((1333 / 800) * size)
if max(_lowercase , _lowercase) > max_size:
UpperCAmelCase_ = max_size / max(_lowercase , _lowercase)
UpperCAmelCase_ = newh * scale
UpperCAmelCase_ = neww * scale
UpperCAmelCase_ , UpperCAmelCase_ = int(newh + 0.5), int(neww + 0.5)
UpperCAmelCase_ , UpperCAmelCase_ = (
newh // self.size_divisor * self.size_divisor,
neww // self.size_divisor * self.size_divisor,
)
else:
UpperCAmelCase_ = []
for image in image_inputs:
UpperCAmelCase_ , UpperCAmelCase_ = self.get_expected_values([image])
expected_values.append((expected_height, expected_width))
UpperCAmelCase_ = max(_lowercase , key=lambda _lowercase: item[0])[0]
UpperCAmelCase_ = max(_lowercase , key=lambda _lowercase: item[1])[1]
return expected_height, expected_width
@require_torch
@require_vision
class a_ ( _snake_case , unittest.TestCase ):
UpperCamelCase__ : Tuple =BridgeTowerImageProcessor if is_vision_available() else None
def __a ( self :int) -> Dict:
UpperCAmelCase_ = BridgeTowerImageProcessingTester(self)
@property
def __a ( self :Dict) -> Any:
return self.image_processor_tester.prepare_image_processor_dict()
def __a ( self :Dict) -> Tuple:
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(_lowercase , '''image_mean'''))
self.assertTrue(hasattr(_lowercase , '''image_std'''))
self.assertTrue(hasattr(_lowercase , '''do_normalize'''))
self.assertTrue(hasattr(_lowercase , '''do_resize'''))
self.assertTrue(hasattr(_lowercase , '''size'''))
self.assertTrue(hasattr(_lowercase , '''size_divisor'''))
def __a ( self :Union[str, Any]) -> Tuple:
pass
def __a ( self :List[str]) -> Tuple:
# Initialize image processor
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase)
for image in image_inputs:
self.assertIsInstance(_lowercase , Image.Image)
# Test not batched input
UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values
UpperCAmelCase_ , UpperCAmelCase_ = self.image_processor_tester.get_expected_values(_lowercase)
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase_ = image_processing(_lowercase , return_tensors='''pt''').pixel_values
UpperCAmelCase_ , UpperCAmelCase_ = self.image_processor_tester.get_expected_values(_lowercase , batched=_lowercase)
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __a ( self :Union[str, Any]) -> Optional[int]:
# Initialize image processor
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase , numpify=_lowercase)
for image in image_inputs:
self.assertIsInstance(_lowercase , np.ndarray)
# Test not batched input
UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values
UpperCAmelCase_ , UpperCAmelCase_ = self.image_processor_tester.get_expected_values(_lowercase)
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase_ = image_processing(_lowercase , return_tensors='''pt''').pixel_values
UpperCAmelCase_ , UpperCAmelCase_ = self.image_processor_tester.get_expected_values(_lowercase , batched=_lowercase)
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __a ( self :str) -> int:
# Initialize image processor
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase , torchify=_lowercase)
for image in image_inputs:
self.assertIsInstance(_lowercase , torch.Tensor)
# Test not batched input
UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values
UpperCAmelCase_ , UpperCAmelCase_ = self.image_processor_tester.get_expected_values(_lowercase)
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase_ = image_processing(_lowercase , return_tensors='''pt''').pixel_values
UpperCAmelCase_ , UpperCAmelCase_ = self.image_processor_tester.get_expected_values(_lowercase , batched=_lowercase)
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
| 344 |
from ..utils import DummyObject, requires_backends
class a_ ( metaclass=_snake_case ):
UpperCamelCase__ : Any =["torch", "scipy"]
def __init__( self :List[str] , *_lowercase :List[str] , **_lowercase :Union[str, Any]) -> List[Any]:
requires_backends(self , ['''torch''', '''scipy'''])
@classmethod
def __a ( cls :Dict , *_lowercase :Any , **_lowercase :Dict) -> Union[str, Any]:
requires_backends(cls , ['''torch''', '''scipy'''])
@classmethod
def __a ( cls :Optional[Any] , *_lowercase :str , **_lowercase :Optional[Any]) -> Union[str, Any]:
requires_backends(cls , ['''torch''', '''scipy'''])
| 344 | 1 |
import json
import os
import unittest
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class a_ ( _snake_case , unittest.TestCase ):
UpperCamelCase__ : List[Any] =BioGptTokenizer
UpperCamelCase__ : str =False
def __a ( self :int) -> List[str]:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
UpperCAmelCase_ = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''w</w>''',
'''r</w>''',
'''t</w>''',
'''lo''',
'''low''',
'''er</w>''',
'''low</w>''',
'''lowest</w>''',
'''newer</w>''',
'''wider</w>''',
'''<unk>''',
]
UpperCAmelCase_ = dict(zip(_lowercase , range(len(_lowercase))))
UpperCAmelCase_ = ['''l o 123''', '''lo w 1456''', '''e r</w> 1789''', '''''']
UpperCAmelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''])
UpperCAmelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''])
with open(self.vocab_file , '''w''') as fp:
fp.write(json.dumps(_lowercase))
with open(self.merges_file , '''w''') as fp:
fp.write('''\n'''.join(_lowercase))
def __a ( self :Optional[int] , _lowercase :Union[str, Any]) -> Optional[Any]:
UpperCAmelCase_ = '''lower newer'''
UpperCAmelCase_ = '''lower newer'''
return input_text, output_text
def __a ( self :Optional[Any]) -> Union[str, Any]:
UpperCAmelCase_ = BioGptTokenizer(self.vocab_file , self.merges_file)
UpperCAmelCase_ = '''lower'''
UpperCAmelCase_ = ['''low''', '''er</w>''']
UpperCAmelCase_ = tokenizer.tokenize(_lowercase)
self.assertListEqual(_lowercase , _lowercase)
UpperCAmelCase_ = tokens + ['''<unk>''']
UpperCAmelCase_ = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowercase) , _lowercase)
@slow
def __a ( self :Optional[Any]) -> Union[str, Any]:
UpperCAmelCase_ = BioGptTokenizer.from_pretrained('''microsoft/biogpt''')
UpperCAmelCase_ = tokenizer.encode('''sequence builders''' , add_special_tokens=_lowercase)
UpperCAmelCase_ = tokenizer.encode('''multi-sequence build''' , add_special_tokens=_lowercase)
UpperCAmelCase_ = tokenizer.build_inputs_with_special_tokens(_lowercase)
UpperCAmelCase_ = tokenizer.build_inputs_with_special_tokens(_lowercase , _lowercase)
self.assertTrue(encoded_sentence == [2] + text)
self.assertTrue(encoded_pair == [2] + text + [2] + text_a)
| 344 |
from typing import Dict
import numpy as np
import torch
from . import residue_constants as rc
from .tensor_utils import tensor_tree_map, tree_map
def A ( __UpperCAmelCase ) -> Dict[str, torch.Tensor]:
'''simple docstring'''
UpperCAmelCase_ = []
UpperCAmelCase_ = []
UpperCAmelCase_ = []
for rt in rc.restypes:
UpperCAmelCase_ = rc.restype_name_to_atomaa_names[rc.restype_atoa[rt]]
restype_atomaa_to_atomaa_list.append([(rc.atom_order[name] if name else 0) for name in atom_names] )
UpperCAmelCase_ = {name: i for i, name in enumerate(__UpperCAmelCase )}
restype_atomaa_to_atomaa_list.append(
[(atom_name_to_idxaa[name] if name in atom_name_to_idxaa else 0) for name in rc.atom_types] )
restype_atomaa_mask_list.append([(1.0 if name else 0.0) for name in atom_names] )
# Add dummy mapping for restype 'UNK'
restype_atomaa_to_atomaa_list.append([0] * 14 )
restype_atomaa_to_atomaa_list.append([0] * 37 )
restype_atomaa_mask_list.append([0.0] * 14 )
UpperCAmelCase_ = torch.tensor(
__UpperCAmelCase , dtype=torch.intaa , device=protein['''aatype'''].device , )
UpperCAmelCase_ = torch.tensor(
__UpperCAmelCase , dtype=torch.intaa , device=protein['''aatype'''].device , )
UpperCAmelCase_ = torch.tensor(
__UpperCAmelCase , dtype=torch.floataa , device=protein['''aatype'''].device , )
UpperCAmelCase_ = protein['''aatype'''].to(torch.long )
# create the mapping for (residx, atom14) --> atom37, i.e. an array
# with shape (num_res, 14) containing the atom37 indices for this protein
UpperCAmelCase_ = restype_atomaa_to_atomaa[protein_aatype]
UpperCAmelCase_ = restype_atomaa_mask[protein_aatype]
UpperCAmelCase_ = residx_atomaa_mask
UpperCAmelCase_ = residx_atomaa_to_atomaa.long()
# create the gather indices for mapping back
UpperCAmelCase_ = restype_atomaa_to_atomaa[protein_aatype]
UpperCAmelCase_ = residx_atomaa_to_atomaa.long()
# create the corresponding mask
UpperCAmelCase_ = torch.zeros([21, 37] , dtype=torch.floataa , device=protein['''aatype'''].device )
for restype, restype_letter in enumerate(rc.restypes ):
UpperCAmelCase_ = rc.restype_atoa[restype_letter]
UpperCAmelCase_ = rc.residue_atoms[restype_name]
for atom_name in atom_names:
UpperCAmelCase_ = rc.atom_order[atom_name]
UpperCAmelCase_ = 1
UpperCAmelCase_ = restype_atomaa_mask[protein_aatype]
UpperCAmelCase_ = residx_atomaa_mask
return protein
def A ( __UpperCAmelCase ) -> Dict[str, np.ndarray]:
'''simple docstring'''
UpperCAmelCase_ = tree_map(lambda __UpperCAmelCase : torch.tensor(__UpperCAmelCase , device=batch['''aatype'''].device ) , __UpperCAmelCase , np.ndarray )
UpperCAmelCase_ = tensor_tree_map(lambda __UpperCAmelCase : np.array(__UpperCAmelCase ) , make_atomaa_masks(__UpperCAmelCase ) )
return out
| 344 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.