code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
'''simple docstring'''
def _UpperCAmelCase ( a : Optional[int] = 1_0_0_0 ) -> Any:
"""simple docstring"""
lowercase_ : Optional[int] = 3
lowercase_ : List[Any] = 0
while a < n:
if a % 3 == 0 or a % 5 == 0:
result += a
elif a % 1_5 == 0:
result -= a
a += 1
return result
if __name__ == "__main__":
print(f"""{solution() = }""")
| 701 |
'''simple docstring'''
def _UpperCAmelCase ( a : int , a : int ) -> int:
"""simple docstring"""
while second != 0:
lowercase_ : Any = first & second
first ^= second
lowercase_ : List[str] = c << 1
return first
if __name__ == "__main__":
import doctest
doctest.testmod()
A: Union[str, Any] = int(input("Enter the first number: ").strip())
A: Union[str, Any] = int(input("Enter the second number: ").strip())
print(f"""{add(first, second) = }""")
| 7 | 0 |
'''simple docstring'''
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
A: Dict = pytest.mark.integration
@pytest.mark.parametrize('path' , ['paws', 'csv'] )
def _UpperCAmelCase ( a : List[Any] , a : List[str] ) -> Any:
"""simple docstring"""
inspect_dataset(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowercase_ : str = path + '.py'
assert script_name in os.listdir(_SCREAMING_SNAKE_CASE )
assert "__pycache__" not in os.listdir(_SCREAMING_SNAKE_CASE )
@pytest.mark.filterwarnings('ignore:inspect_metric is deprecated:FutureWarning' )
@pytest.mark.filterwarnings('ignore:metric_module_factory is deprecated:FutureWarning' )
@pytest.mark.parametrize('path' , ['accuracy'] )
def _UpperCAmelCase ( a : Optional[Any] , a : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
inspect_metric(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowercase_ : Dict = path + '.py'
assert script_name in os.listdir(_SCREAMING_SNAKE_CASE )
assert "__pycache__" not in os.listdir(_SCREAMING_SNAKE_CASE )
@pytest.mark.parametrize(
'path, config_name, expected_splits' , [
('squad', 'plain_text', ['train', 'validation']),
('dalle-mini/wit', 'dalle-mini--wit', ['train']),
('paws', 'labeled_final', ['train', 'test', 'validation']),
] , )
def _UpperCAmelCase ( a : Dict , a : List[str] , a : List[str] ) -> Optional[Any]:
"""simple docstring"""
lowercase_ : Optional[Any] = get_dataset_config_info(_SCREAMING_SNAKE_CASE , config_name=_SCREAMING_SNAKE_CASE )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'path, config_name, expected_exception' , [
('paws', None, ValueError),
] , )
def _UpperCAmelCase ( a : Optional[Any] , a : Union[str, Any] , a : Optional[int] ) -> Dict:
"""simple docstring"""
with pytest.raises(_SCREAMING_SNAKE_CASE ):
get_dataset_config_info(_SCREAMING_SNAKE_CASE , config_name=_SCREAMING_SNAKE_CASE )
@pytest.mark.parametrize(
'path, expected' , [
('squad', 'plain_text'),
('acronym_identification', 'default'),
('lhoestq/squad', 'plain_text'),
('lhoestq/test', 'default'),
('lhoestq/demo1', 'lhoestq--demo1'),
('dalle-mini/wit', 'dalle-mini--wit'),
] , )
def _UpperCAmelCase ( a : List[str] , a : str ) -> Dict:
"""simple docstring"""
lowercase_ : List[str] = get_dataset_config_names(_SCREAMING_SNAKE_CASE )
assert expected in config_names
@pytest.mark.parametrize(
'path, expected_configs, expected_splits_in_first_config' , [
('squad', ['plain_text'], ['train', 'validation']),
('dalle-mini/wit', ['dalle-mini--wit'], ['train']),
('paws', ['labeled_final', 'labeled_swap', 'unlabeled_final'], ['train', 'test', 'validation']),
] , )
def _UpperCAmelCase ( a : List[Any] , a : List[Any] , a : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
lowercase_ : int = get_dataset_infos(_SCREAMING_SNAKE_CASE )
assert list(infos.keys() ) == expected_configs
lowercase_ : Dict = expected_configs[0]
assert expected_config in infos
lowercase_ : Tuple = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
'path, expected_config, expected_splits' , [
('squad', 'plain_text', ['train', 'validation']),
('dalle-mini/wit', 'dalle-mini--wit', ['train']),
('paws', 'labeled_final', ['train', 'test', 'validation']),
] , )
def _UpperCAmelCase ( a : Dict , a : List[str] , a : int ) -> Dict:
"""simple docstring"""
lowercase_ : Optional[int] = get_dataset_infos(_SCREAMING_SNAKE_CASE )
assert expected_config in infos
lowercase_ : str = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'path, config_name, expected_exception' , [
('paws', None, ValueError),
] , )
def _UpperCAmelCase ( a : List[Any] , a : Union[str, Any] , a : Optional[int] ) -> List[Any]:
"""simple docstring"""
with pytest.raises(_SCREAMING_SNAKE_CASE ):
get_dataset_split_names(_SCREAMING_SNAKE_CASE , config_name=_SCREAMING_SNAKE_CASE )
| 702 |
'''simple docstring'''
class __magic_name__ :
"""simple docstring"""
def __init__( self , _lowercase ) -> Union[str, Any]:
lowercase_ : Dict = n
lowercase_ : Dict = [None] * self.n
lowercase_ : Tuple = 0 # index of the first element
lowercase_ : List[Any] = 0
lowercase_ : List[Any] = 0
def __len__( self ) -> int:
return self.size
def lowerCamelCase__ ( self ) -> bool:
return self.size == 0
def lowerCamelCase__ ( self ) -> List[Any]:
return False if self.is_empty() else self.array[self.front]
def lowerCamelCase__ ( self , _lowercase ) -> Any:
if self.size >= self.n:
raise Exception('QUEUE IS FULL' )
lowercase_ : Tuple = data
lowercase_ : List[Any] = (self.rear + 1) % self.n
self.size += 1
return self
def lowerCamelCase__ ( self ) -> Any:
if self.size == 0:
raise Exception('UNDERFLOW' )
lowercase_ : Dict = self.array[self.front]
lowercase_ : Tuple = None
lowercase_ : int = (self.front + 1) % self.n
self.size -= 1
return temp
| 7 | 0 |
'''simple docstring'''
import os
import sys
import unittest
A: List[Any] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
A: str = os.path.join(git_repo_path, "src", "transformers")
A: Union[str, Any] = "\n{0} = None\n"
A: List[Any] = "\nclass {0}(metaclass=DummyObject):\n _backends = {1}\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, {1})\n"
A: Optional[int] = "\ndef {0}(*args, **kwargs):\n requires_backends({0}, {1})\n"
class __magic_name__ ( unittest.TestCase ):
"""simple docstring"""
def lowerCamelCase__ ( self ) -> Tuple:
lowercase_ : Optional[Any] = find_backend(' _import_structure["models.albert"].append("AlbertTokenizerFast")' )
self.assertIsNone(A_ )
lowercase_ : List[str] = find_backend(' if not is_tokenizers_available():' )
self.assertEqual(A_ , 'tokenizers' )
lowercase_ : List[str] = find_backend(' if not is_tensorflow_text_available():' )
self.assertEqual(A_ , 'tensorflow_text' )
lowercase_ : Optional[Any] = find_backend(' if not (is_sentencepiece_available() and is_tokenizers_available()):' )
self.assertEqual(A_ , 'sentencepiece_and_tokenizers' )
lowercase_ : Any = find_backend(
' if not (is_sentencepiece_available() and is_tensorflow_text_available()):' )
self.assertEqual(A_ , 'sentencepiece_and_tensorflow_text' )
lowercase_ : Dict = find_backend(
' if not (is_sentencepiece_available() and is_tokenizers_available() and is_vision_available()):' )
self.assertEqual(A_ , 'sentencepiece_and_tokenizers_and_vision' )
def lowerCamelCase__ ( self ) -> Tuple:
lowercase_ : Optional[Any] = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn('torch' , A_ )
self.assertIn('tensorflow_text' , A_ )
self.assertIn('sentencepiece_and_tokenizers' , A_ )
# Likewise, we can't assert on the exact content of a key
self.assertIn('BertModel' , objects['torch'] )
self.assertIn('TFBertModel' , objects['tf'] )
self.assertIn('FlaxBertModel' , objects['flax'] )
self.assertIn('BertModel' , objects['torch'] )
self.assertIn('TFBertTokenizer' , objects['tensorflow_text'] )
self.assertIn('convert_slow_tokenizer' , objects['sentencepiece_and_tokenizers'] )
def lowerCamelCase__ ( self ) -> Optional[int]:
lowercase_ : Optional[Any] = create_dummy_object('CONSTANT' , '\'torch\'' )
self.assertEqual(A_ , '\nCONSTANT = None\n' )
lowercase_ : str = create_dummy_object('function' , '\'torch\'' )
self.assertEqual(
A_ , '\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n' )
lowercase_ : Optional[Any] = '\nclass FakeClass(metaclass=DummyObject):\n _backends = \'torch\'\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, \'torch\')\n'
lowercase_ : Optional[Any] = create_dummy_object('FakeClass' , '\'torch\'' )
self.assertEqual(A_ , A_ )
def lowerCamelCase__ ( self ) -> int:
lowercase_ : str = '# This file is autogenerated by the command `make fix-copies`, do not edit.\nfrom ..utils import DummyObject, requires_backends\n\n\nCONSTANT = None\n\n\ndef function(*args, **kwargs):\n requires_backends(function, ["torch"])\n\n\nclass FakeClass(metaclass=DummyObject):\n _backends = ["torch"]\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, ["torch"])\n'
lowercase_ : Optional[int] = create_dummy_files({'torch': ['CONSTANT', 'function', 'FakeClass']} )
self.assertEqual(dummy_files['torch'] , A_ )
| 703 |
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
A: List[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
A: Union[str, Any] = "\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline\n >>> from diffusers.utils import load_image\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... \"kandinsky-community/kandinsky-2-2-prior\", torch_dtype=torch.float16\n ... )\n >>> pipe_prior.to(\"cuda\")\n\n >>> prompt = \"A red cartoon frog, 4k\"\n >>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False)\n\n >>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained(\n ... \"kandinsky-community/kandinsky-2-2-decoder\", torch_dtype=torch.float16\n ... )\n >>> pipe.to(\"cuda\")\n\n >>> init_image = load_image(\n ... \"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main\"\n ... \"/kandinsky/frog.png\"\n ... )\n\n >>> image = pipe(\n ... image=init_image,\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=100,\n ... strength=0.2,\n ... ).images\n\n >>> image[0].save(\"red_frog.png\")\n ```\n"
def _UpperCAmelCase ( a : Tuple , a : Union[str, Any] , a : List[Any]=8 ) -> Dict:
"""simple docstring"""
lowercase_ : List[Any] = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
lowercase_ : List[str] = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
def _UpperCAmelCase ( a : Any , a : Dict=5_1_2 , a : Optional[Any]=5_1_2 ) -> Tuple:
"""simple docstring"""
lowercase_ : int = pil_image.resize((w, h) , resample=Image.BICUBIC , reducing_gap=1 )
lowercase_ : int = np.array(pil_image.convert('RGB' ) )
lowercase_ : Optional[int] = arr.astype(np.floataa ) / 1_27.5 - 1
lowercase_ : Any = np.transpose(a , [2, 0, 1] )
lowercase_ : Any = torch.from_numpy(a ).unsqueeze(0 )
return image
class __magic_name__ ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self , _lowercase , _lowercase , _lowercase , ) -> List[Any]:
super().__init__()
self.register_modules(
unet=_lowercase , scheduler=_lowercase , movq=_lowercase , )
lowercase_ : Dict = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def lowerCamelCase__ ( self , _lowercase , _lowercase , _lowercase ) -> int:
# get the original timestep using init_timestep
lowercase_ : List[Any] = min(int(num_inference_steps * strength ) , _lowercase )
lowercase_ : Tuple = max(num_inference_steps - init_timestep , 0 )
lowercase_ : Optional[Any] = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def lowerCamelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase=None ) -> Any:
if not isinstance(_lowercase , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(_lowercase )}" )
lowercase_ : Dict = image.to(device=_lowercase , dtype=_lowercase )
lowercase_ : Dict = batch_size * num_images_per_prompt
if image.shape[1] == 4:
lowercase_ : str = image
else:
if isinstance(_lowercase , _lowercase ) and len(_lowercase ) != batch_size:
raise ValueError(
f"You have passed a list of generators of length {len(_lowercase )}, but requested an effective batch"
f" size of {batch_size}. Make sure the batch size matches the length of the generators." )
elif isinstance(_lowercase , _lowercase ):
lowercase_ : List[Any] = [
self.movq.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(_lowercase )
]
lowercase_ : Union[str, Any] = torch.cat(_lowercase , dim=0 )
else:
lowercase_ : Union[str, Any] = self.movq.encode(_lowercase ).latent_dist.sample(_lowercase )
lowercase_ : str = self.movq.config.scaling_factor * init_latents
lowercase_ : int = torch.cat([init_latents] , dim=0 )
lowercase_ : Dict = init_latents.shape
lowercase_ : Dict = randn_tensor(_lowercase , generator=_lowercase , device=_lowercase , dtype=_lowercase )
# get latents
lowercase_ : List[str] = self.scheduler.add_noise(_lowercase , _lowercase , _lowercase )
lowercase_ : Optional[Any] = init_latents
return latents
def lowerCamelCase__ ( self , _lowercase=0 ) -> int:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
lowercase_ : List[Any] = torch.device(f"cuda:{gpu_id}" )
lowercase_ : Optional[Any] = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(_lowercase , _lowercase )
def lowerCamelCase__ ( self , _lowercase=0 ) -> int:
if is_accelerate_available() and is_accelerate_version('>=' , '0.17.0.dev0' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.' )
lowercase_ : List[Any] = torch.device(f"cuda:{gpu_id}" )
if self.device.type != "cpu":
self.to('cpu' , silence_dtype_warnings=_lowercase )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
lowercase_ : Tuple = None
for cpu_offloaded_model in [self.unet, self.movq]:
lowercase_ , lowercase_ : Dict = cpu_offload_with_hook(_lowercase , _lowercase , prev_module_hook=_lowercase )
# We'll offload the last model manually.
lowercase_ : List[str] = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def lowerCamelCase__ ( self ) -> List[str]:
if not hasattr(self.unet , '_hf_hook' ):
return self.device
for module in self.unet.modules():
if (
hasattr(_lowercase , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(_lowercase )
def __call__( self , _lowercase , _lowercase , _lowercase , _lowercase = 512 , _lowercase = 512 , _lowercase = 100 , _lowercase = 4.0 , _lowercase = 0.3 , _lowercase = 1 , _lowercase = None , _lowercase = "pil" , _lowercase = True , ) -> str:
lowercase_ : List[Any] = self._execution_device
lowercase_ : List[Any] = guidance_scale > 1.0
if isinstance(_lowercase , _lowercase ):
lowercase_ : Union[str, Any] = torch.cat(_lowercase , dim=0 )
lowercase_ : Optional[Any] = image_embeds.shape[0]
if isinstance(_lowercase , _lowercase ):
lowercase_ : List[str] = torch.cat(_lowercase , dim=0 )
if do_classifier_free_guidance:
lowercase_ : List[str] = image_embeds.repeat_interleave(_lowercase , dim=0 )
lowercase_ : Dict = negative_image_embeds.repeat_interleave(_lowercase , dim=0 )
lowercase_ : Dict = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=_lowercase )
if not isinstance(_lowercase , _lowercase ):
lowercase_ : Union[str, Any] = [image]
if not all(isinstance(_lowercase , (PIL.Image.Image, torch.Tensor) ) for i in image ):
raise ValueError(
f"Input is in incorrect format: {[type(_lowercase ) for i in image]}. Currently, we only support PIL image and pytorch tensor" )
lowercase_ : List[Any] = torch.cat([prepare_image(_lowercase , _lowercase , _lowercase ) for i in image] , dim=0 )
lowercase_ : Dict = image.to(dtype=image_embeds.dtype , device=_lowercase )
lowercase_ : Dict = self.movq.encode(_lowercase )['latents']
lowercase_ : Optional[Any] = latents.repeat_interleave(_lowercase , dim=0 )
self.scheduler.set_timesteps(_lowercase , device=_lowercase )
lowercase_ , lowercase_ : str = self.get_timesteps(_lowercase , _lowercase , _lowercase )
lowercase_ : int = timesteps[:1].repeat(batch_size * num_images_per_prompt )
lowercase_ , lowercase_ : Union[str, Any] = downscale_height_and_width(_lowercase , _lowercase , self.movq_scale_factor )
lowercase_ : List[str] = self.prepare_latents(
_lowercase , _lowercase , _lowercase , _lowercase , image_embeds.dtype , _lowercase , _lowercase )
for i, t in enumerate(self.progress_bar(_lowercase ) ):
# expand the latents if we are doing classifier free guidance
lowercase_ : int = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowercase_ : str = {'image_embeds': image_embeds}
lowercase_ : str = self.unet(
sample=_lowercase , timestep=_lowercase , encoder_hidden_states=_lowercase , added_cond_kwargs=_lowercase , return_dict=_lowercase , )[0]
if do_classifier_free_guidance:
lowercase_ , lowercase_ : Dict = noise_pred.split(latents.shape[1] , dim=1 )
lowercase_ , lowercase_ : Optional[int] = noise_pred.chunk(2 )
lowercase_ , lowercase_ : Tuple = variance_pred.chunk(2 )
lowercase_ : List[str] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
lowercase_ : Tuple = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , 'variance_type' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
lowercase_ , lowercase_ : List[str] = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
lowercase_ : Dict = self.scheduler.step(
_lowercase , _lowercase , _lowercase , generator=_lowercase , )[0]
# post-processing
lowercase_ : Any = self.movq.decode(_lowercase , force_not_quantize=_lowercase )['sample']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f"Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}" )
if output_type in ["np", "pil"]:
lowercase_ : Dict = image * 0.5 + 0.5
lowercase_ : Dict = image.clamp(0 , 1 )
lowercase_ : int = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
lowercase_ : int = self.numpy_to_pil(_lowercase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_lowercase )
| 7 | 0 |
'''simple docstring'''
from __future__ import annotations
from cmath import sqrt
def _UpperCAmelCase ( a : Any , a : Any , a : Tuple ) -> tuple[complex, complex]:
"""simple docstring"""
if a == 0:
raise ValueError('Coefficient \'a\' must not be zero.' )
lowercase_ : Tuple = b * b - 4 * a * c
lowercase_ : List[Any] = (-b + sqrt(_lowercase )) / (2 * a)
lowercase_ : Dict = (-b - sqrt(_lowercase )) / (2 * a)
return (
root_a.real if not root_a.imag else root_a,
root_a.real if not root_a.imag else root_a,
)
def _UpperCAmelCase ( ) -> List[Any]:
"""simple docstring"""
lowercase_ : Optional[Any] = quadratic_roots(a=5 , b=6 , c=1 )
print(f"The solutions are: {solutiona} and {solutiona}" )
if __name__ == "__main__":
main()
| 704 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ..utils import _LazyModule
A: int = {
"config": [
"EXTERNAL_DATA_FORMAT_SIZE_LIMIT",
"OnnxConfig",
"OnnxConfigWithPast",
"OnnxSeq2SeqConfigWithPast",
"PatchingSpec",
],
"convert": ["export", "validate_model_outputs"],
"features": ["FeaturesManager"],
"utils": ["ParameterFormat", "compute_serialized_parameters_size"],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
A: Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 7 | 0 |
'''simple docstring'''
import os
from datetime import datetime as dt
from github import Github
A: Union[str, Any] = [
"good first issue",
"good second issue",
"good difficult issue",
"enhancement",
"new pipeline/model",
"new scheduler",
"wip",
]
def _UpperCAmelCase ( ) -> Dict:
"""simple docstring"""
lowercase_ : Optional[int] = Github(os.environ['GITHUB_TOKEN'] )
lowercase_ : str = g.get_repo('huggingface/diffusers' )
lowercase_ : Optional[int] = repo.get_issues(state='open' )
for issue in open_issues:
lowercase_ : str = sorted(issue.get_comments() , key=lambda a : i.created_at , reverse=_A )
lowercase_ : str = comments[0] if len(_A ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 3_0
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Closes the issue after 7 days of inactivity since the Stalebot notification.
issue.edit(state='closed' )
elif (
"stale" in issue.get_labels()
and last_comment is not None
and last_comment.user.login != "github-actions[bot]"
):
# Opens the issue if someone other than Stalebot commented.
issue.edit(state='open' )
issue.remove_from_labels('stale' )
elif (
(dt.utcnow() - issue.updated_at).days > 2_3
and (dt.utcnow() - issue.created_at).days >= 3_0
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Post a Stalebot notification after 23 days of inactivity.
issue.create_comment(
'This issue has been automatically marked as stale because it has not had '
'recent activity. If you think this still needs to be addressed '
'please comment on this thread.\n\nPlease note that issues that do not follow the '
'[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) '
'are likely to be ignored.' )
issue.add_to_labels('stale' )
if __name__ == "__main__":
main()
| 705 |
'''simple docstring'''
import io
import json
import unittest
from parameterized import parameterized
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device
from utils import calculate_bleu
A: Any = get_tests_dir() + "/test_data/fsmt/fsmt_val_data.json"
with io.open(filename, "r", encoding="utf-8") as f:
A: List[Any] = json.load(f)
@require_torch
class __magic_name__ ( unittest.TestCase ):
"""simple docstring"""
def lowerCamelCase__ ( self , _lowercase ) -> Tuple:
return FSMTTokenizer.from_pretrained(_lowercase )
def lowerCamelCase__ ( self , _lowercase ) -> Optional[int]:
lowercase_ : str = FSMTForConditionalGeneration.from_pretrained(_lowercase ).to(_lowercase )
if torch_device == "cuda":
model.half()
return model
@parameterized.expand(
[
['en-ru', 26.0],
['ru-en', 22.0],
['en-de', 22.0],
['de-en', 29.0],
] )
@slow
def lowerCamelCase__ ( self , _lowercase , _lowercase ) -> Optional[int]:
# note: this test is not testing the best performance since it only evals a small batch
# but it should be enough to detect a regression in the output quality
lowercase_ : Optional[Any] = f"facebook/wmt19-{pair}"
lowercase_ : str = self.get_tokenizer(_lowercase )
lowercase_ : Any = self.get_model(_lowercase )
lowercase_ : Any = bleu_data[pair]['src']
lowercase_ : Any = bleu_data[pair]['tgt']
lowercase_ : Dict = tokenizer(_lowercase , return_tensors='pt' , truncation=_lowercase , padding='longest' ).to(_lowercase )
lowercase_ : str = model.generate(
input_ids=batch.input_ids , num_beams=8 , )
lowercase_ : Any = tokenizer.batch_decode(
_lowercase , skip_special_tokens=_lowercase , clean_up_tokenization_spaces=_lowercase )
lowercase_ : Union[str, Any] = calculate_bleu(_lowercase , _lowercase )
print(_lowercase )
self.assertGreaterEqual(scores['bleu'] , _lowercase )
| 7 | 0 |
'''simple docstring'''
import copy
import inspect
import unittest
from transformers import PretrainedConfig, SwiftFormerConfig
from transformers.testing_utils import (
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwiftFormerForImageClassification, SwiftFormerModel
from transformers.models.swiftformer.modeling_swiftformer import SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __magic_name__ :
def __init__( self , _lowercase , _lowercase=13 , _lowercase=3 , _lowercase=True , _lowercase=True , _lowercase=0.1 , _lowercase=0.1 , _lowercase=224 , _lowercase=1000 , _lowercase=[3, 3, 6, 4] , _lowercase=[48, 56, 112, 220] , ) -> Dict:
lowercase_ : Optional[Any] = parent
lowercase_ : Tuple = batch_size
lowercase_ : Union[str, Any] = num_channels
lowercase_ : List[str] = is_training
lowercase_ : Optional[Any] = use_labels
lowercase_ : Tuple = hidden_dropout_prob
lowercase_ : Any = attention_probs_dropout_prob
lowercase_ : Optional[Any] = num_labels
lowercase_ : Tuple = image_size
lowercase_ : str = layer_depths
lowercase_ : str = embed_dims
def lowerCamelCase__ ( self ) -> Any:
lowercase_ : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase_ : str = None
if self.use_labels:
lowercase_ : Tuple = ids_tensor([self.batch_size] , self.num_labels )
lowercase_ : Optional[int] = self.get_config()
return config, pixel_values, labels
def lowerCamelCase__ ( self ) -> int:
return SwiftFormerConfig(
depths=self.layer_depths , embed_dims=self.embed_dims , mlp_ratio=4 , downsamples=[True, True, True, True] , hidden_act='gelu' , num_labels=self.num_labels , down_patch_size=3 , down_stride=2 , down_pad=1 , drop_rate=0.0 , drop_path_rate=0.0 , use_layer_scale=UpperCAmelCase_ , layer_scale_init_value=1E-5 , )
def lowerCamelCase__ ( self , _lowercase , _lowercase , _lowercase ) -> int:
lowercase_ : Dict = SwiftFormerModel(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
lowercase_ : Dict = model(UpperCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dims[-1], 7, 7) )
def lowerCamelCase__ ( self , _lowercase , _lowercase , _lowercase ) -> List[str]:
lowercase_ : Dict = self.num_labels
lowercase_ : Union[str, Any] = SwiftFormerForImageClassification(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
lowercase_ : str = model(UpperCAmelCase_ , labels=UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
lowercase_ : Union[str, Any] = SwiftFormerForImageClassification(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
lowercase_ : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase_ : List[str] = model(UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase__ ( self ) -> Union[str, Any]:
(lowercase_) : Tuple = self.prepare_config_and_inputs()
lowercase_ : str = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class __magic_name__ ( __lowerCamelCase, __lowerCamelCase, unittest.TestCase ):
SCREAMING_SNAKE_CASE_ : List[str] = (SwiftFormerModel, SwiftFormerForImageClassification) if is_torch_available() else ()
SCREAMING_SNAKE_CASE_ : str = (
{'feature-extraction': SwiftFormerModel, 'image-classification': SwiftFormerForImageClassification}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE_ : Optional[Any] = False
SCREAMING_SNAKE_CASE_ : Optional[Any] = False
SCREAMING_SNAKE_CASE_ : Any = False
SCREAMING_SNAKE_CASE_ : Optional[Any] = False
SCREAMING_SNAKE_CASE_ : Dict = False
def lowerCamelCase__ ( self ) -> Dict:
lowercase_ : Union[str, Any] = SwiftFormerModelTester(self )
lowercase_ : Optional[int] = ConfigTester(
self , config_class=UpperCAmelCase_ , has_text_modality=UpperCAmelCase_ , hidden_size=37 , num_attention_heads=12 , num_hidden_layers=12 , )
def lowerCamelCase__ ( self ) -> Any:
self.config_tester.run_common_tests()
@unittest.skip(reason='SwiftFormer does not use inputs_embeds' )
def lowerCamelCase__ ( self ) -> Optional[Any]:
pass
def lowerCamelCase__ ( self ) -> Optional[int]:
lowercase_ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ : Dict = model_class(UpperCAmelCase_ )
lowercase_ : Union[str, Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCAmelCase_ , nn.Linear ) )
def lowerCamelCase__ ( self ) -> Optional[int]:
lowercase_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ : Optional[Any] = model_class(UpperCAmelCase_ )
lowercase_ : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase_ : Optional[int] = [*signature.parameters.keys()]
lowercase_ : int = ['pixel_values']
self.assertListEqual(arg_names[:1] , UpperCAmelCase_ )
def lowerCamelCase__ ( self ) -> Union[str, Any]:
lowercase_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase_ )
def lowerCamelCase__ ( self ) -> List[Any]:
lowercase_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase_ )
@slow
def lowerCamelCase__ ( self ) -> List[Any]:
for model_name in SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ : Dict = SwiftFormerModel.from_pretrained(UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
@unittest.skip(reason='SwiftFormer does not output attentions' )
def lowerCamelCase__ ( self ) -> Tuple:
pass
def lowerCamelCase__ ( self ) -> Any:
def check_hidden_states_output(_lowercase , _lowercase , _lowercase ):
lowercase_ : Optional[int] = model_class(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
with torch.no_grad():
lowercase_ : Any = model(**self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ ) )
lowercase_ : List[Any] = outputs.hidden_states
lowercase_ : Any = 8
self.assertEqual(len(UpperCAmelCase_ ) , UpperCAmelCase_ ) # TODO
# SwiftFormer's feature maps are of shape (batch_size, embed_dims, height, width)
# with the width and height being successively divided by 2, after every 2 blocks
for i in range(len(UpperCAmelCase_ ) ):
self.assertEqual(
hidden_states[i].shape , torch.Size(
[
self.model_tester.batch_size,
self.model_tester.embed_dims[i // 2],
(self.model_tester.image_size // 4) // 2 ** (i // 2),
(self.model_tester.image_size // 4) // 2 ** (i // 2),
] ) , )
lowercase_ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ : str = True
check_hidden_states_output(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase_ : Optional[Any] = True
check_hidden_states_output(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
def lowerCamelCase__ ( self ) -> str:
def _config_zero_init(_lowercase ):
lowercase_ : Optional[Any] = copy.deepcopy(UpperCAmelCase_ )
for key in configs_no_init.__dict__.keys():
if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key:
setattr(UpperCAmelCase_ , UpperCAmelCase_ , 1E-1_0 )
if isinstance(getattr(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) , UpperCAmelCase_ ):
lowercase_ : Optional[int] = _config_zero_init(getattr(UpperCAmelCase_ , UpperCAmelCase_ ) )
setattr(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
return configs_no_init
lowercase_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ : Dict = _config_zero_init(UpperCAmelCase_ )
for model_class in self.all_model_classes:
lowercase_ : List[str] = model_class(config=UpperCAmelCase_ )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9) / 1E9).round().item() , [0.0, 1.0] , msg=f"Parameter {name} of model {model_class} seems not properly initialized" , )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def lowerCamelCase__ ( self ) -> Any:
pass
def _UpperCAmelCase ( ) -> Dict:
"""simple docstring"""
lowercase_ : str = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class __magic_name__ ( unittest.TestCase ):
@cached_property
def lowerCamelCase__ ( self ) -> List[str]:
return ViTImageProcessor.from_pretrained('MBZUAI/swiftformer-xs' ) if is_vision_available() else None
@slow
def lowerCamelCase__ ( self ) -> Optional[int]:
lowercase_ : Union[str, Any] = SwiftFormerForImageClassification.from_pretrained('MBZUAI/swiftformer-xs' ).to(UpperCAmelCase_ )
lowercase_ : Any = self.default_image_processor
lowercase_ : Tuple = prepare_img()
lowercase_ : Dict = image_processor(images=UpperCAmelCase_ , return_tensors='pt' ).to(UpperCAmelCase_ )
# forward pass
with torch.no_grad():
lowercase_ : Optional[Any] = model(**UpperCAmelCase_ )
# verify the logits
lowercase_ : Dict = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , UpperCAmelCase_ )
lowercase_ : Tuple = torch.tensor([[-2.1_7_0_3E0_0, 2.1_1_0_7E0_0, -2.0_8_1_1E0_0]] ).to(UpperCAmelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCAmelCase_ , atol=1E-4 ) )
| 706 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A: int = {
"configuration_trajectory_transformer": [
"TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"TrajectoryTransformerConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A: Union[str, Any] = [
"TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TrajectoryTransformerModel",
"TrajectoryTransformerPreTrainedModel",
"load_tf_weights_in_trajectory_transformer",
]
if TYPE_CHECKING:
from .configuration_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TrajectoryTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TrajectoryTransformerModel,
TrajectoryTransformerPreTrainedModel,
load_tf_weights_in_trajectory_transformer,
)
else:
import sys
A: int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 7 | 0 |
'''simple docstring'''
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __magic_name__ ( __A ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ['image_processor', 'tokenizer']
SCREAMING_SNAKE_CASE_ : str = 'LayoutLMv2ImageProcessor'
SCREAMING_SNAKE_CASE_ : Dict = ('LayoutXLMTokenizer', 'LayoutXLMTokenizerFast')
def __init__( self , _lowercase=None , _lowercase=None , **_lowercase ) -> Optional[int]:
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , _lowercase , )
lowercase_ : Any = kwargs.pop('feature_extractor' )
lowercase_ : Dict = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(_lowercase , _lowercase )
def __call__( self , _lowercase , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = True , _lowercase = False , _lowercase = None , _lowercase = None , _lowercase = 0 , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = False , _lowercase = False , _lowercase = False , _lowercase = False , _lowercase = True , _lowercase = None , **_lowercase , ) -> BatchEncoding:
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
'You cannot provide bounding boxes '
'if you initialized the image processor with apply_ocr set to True.' )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
'You cannot provide word labels if you initialized the image processor with apply_ocr set to True.' )
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError('You cannot return overflowing tokens without returning the offsets mapping.' )
# first, apply the image processor
lowercase_ : Tuple = self.image_processor(images=_lowercase , return_tensors=_lowercase )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(_lowercase , _lowercase ):
lowercase_ : Optional[Any] = [text] # add batch dimension (as the image processor always adds a batch dimension)
lowercase_ : List[str] = features['''words''']
lowercase_ : Optional[Any] = self.tokenizer(
text=text if text is not None else features['words'] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['boxes'] , word_labels=_lowercase , add_special_tokens=_lowercase , padding=_lowercase , truncation=_lowercase , max_length=_lowercase , stride=_lowercase , pad_to_multiple_of=_lowercase , return_token_type_ids=_lowercase , return_attention_mask=_lowercase , return_overflowing_tokens=_lowercase , return_special_tokens_mask=_lowercase , return_offsets_mapping=_lowercase , return_length=_lowercase , verbose=_lowercase , return_tensors=_lowercase , **_lowercase , )
# add pixel values
lowercase_ : List[str] = features.pop('pixel_values' )
if return_overflowing_tokens is True:
lowercase_ : str = self.get_overflowing_images(_lowercase , encoded_inputs['overflow_to_sample_mapping'] )
lowercase_ : Dict = images
return encoded_inputs
def lowerCamelCase__ ( self , _lowercase , _lowercase ) -> str:
lowercase_ : str = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(_lowercase ) != len(_lowercase ):
raise ValueError(
'Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got'
f" {len(_lowercase )} and {len(_lowercase )}" )
return images_with_overflow
def lowerCamelCase__ ( self , *_lowercase , **_lowercase ) -> int:
return self.tokenizer.batch_decode(*_lowercase , **_lowercase )
def lowerCamelCase__ ( self , *_lowercase , **_lowercase ) -> str:
return self.tokenizer.decode(*_lowercase , **_lowercase )
@property
def lowerCamelCase__ ( self ) -> List[str]:
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def lowerCamelCase__ ( self ) -> List[str]:
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , _lowercase , )
return self.image_processor_class
@property
def lowerCamelCase__ ( self ) -> List[str]:
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , _lowercase , )
return self.image_processor
| 707 |
'''simple docstring'''
def _UpperCAmelCase ( a : str ) -> str:
"""simple docstring"""
lowercase_ : Dict = 0
# if input_string is "aba" than new_input_string become "a|b|a"
lowercase_ : Dict = ''
lowercase_ : Any = ''
# append each character + "|" in new_string for range(0, length-1)
for i in input_string[: len(a ) - 1]:
new_input_string += i + "|"
# append last character
new_input_string += input_string[-1]
# we will store the starting and ending of previous furthest ending palindromic
# substring
lowercase_ , lowercase_ : Dict = 0, 0
# length[i] shows the length of palindromic substring with center i
lowercase_ : List[Any] = [1 for i in range(len(a ) )]
# for each character in new_string find corresponding palindromic string
lowercase_ : Dict = 0
for j in range(len(a ) ):
lowercase_ : Tuple = 1 if j > r else min(length[l + r - j] // 2 , r - j + 1 )
while (
j - k >= 0
and j + k < len(a )
and new_input_string[k + j] == new_input_string[j - k]
):
k += 1
lowercase_ : int = 2 * k - 1
# does this string is ending after the previously explored end (that is r) ?
# if yes the update the new r to the last index of this
if j + k - 1 > r:
lowercase_ : Tuple = j - k + 1 # noqa: E741
lowercase_ : Tuple = j + k - 1
# update max_length and start position
if max_length < length[j]:
lowercase_ : Tuple = length[j]
lowercase_ : List[Any] = j
# create that string
lowercase_ : str = new_input_string[start - max_length // 2 : start + max_length // 2 + 1]
for i in s:
if i != "|":
output_string += i
return output_string
if __name__ == "__main__":
import doctest
doctest.testmod()
| 7 | 0 |
'''simple docstring'''
import inspect
import re
from hashlib import shaaaa
from typing import Dict, List
from .arrow import arrow
from .audiofolder import audiofolder
from .csv import csv
from .imagefolder import imagefolder
from .json import json
from .pandas import pandas
from .parquet import parquet
from .sql import sql # noqa F401
from .text import text
def _UpperCAmelCase ( a : List[str] ) -> str:
"""simple docstring"""
lowercase_ : List[Any] = []
for line in lines:
lowercase_ : Tuple = re.sub(R'#.*' , '' , __lowerCAmelCase ) # remove comments
if line:
filtered_lines.append(__lowerCAmelCase )
lowercase_ : Optional[int] = '\n'.join(__lowerCAmelCase )
# Make a hash from all this code
lowercase_ : str = full_str.encode('utf-8' )
return shaaaa(__lowerCAmelCase ).hexdigest()
# get importable module names and hash for caching
A: Optional[Any] = {
'csv': (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())),
'json': (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())),
'pandas': (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())),
'parquet': (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())),
'arrow': (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())),
'text': (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())),
'imagefolder': (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())),
'audiofolder': (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())),
}
# Used to infer the module to use based on the data files extensions
A: List[str] = {
'.csv': ('csv', {}),
'.tsv': ('csv', {'sep': '\t'}),
'.json': ('json', {}),
'.jsonl': ('json', {}),
'.parquet': ('parquet', {}),
'.arrow': ('arrow', {}),
'.txt': ('text', {}),
}
_EXTENSION_TO_MODULE.update({ext: ("imagefolder", {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ("imagefolder", {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext: ("audiofolder", {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ("audiofolder", {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
A: Dict = {'imagefolder', 'audiofolder'}
# Used to filter data files based on extensions given a module name
A: Dict[str, List[str]] = {}
for _ext, (_module, _) in _EXTENSION_TO_MODULE.items():
_MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext)
_MODULE_TO_EXTENSIONS["imagefolder"].append(".zip")
_MODULE_TO_EXTENSIONS["audiofolder"].append(".zip")
| 708 |
'''simple docstring'''
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class __magic_name__ ( UpperCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ''
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 'hf-legacy' # "hf://"" is reserved for hffs
def __init__( self , _lowercase = None , _lowercase = None , **_lowercase , ) -> Optional[Any]:
super().__init__(self , **_lowercase )
lowercase_ : int = repo_info
lowercase_ : List[Any] = token
lowercase_ : Union[str, Any] = None
def lowerCamelCase__ ( self ) -> Optional[Any]:
if self.dir_cache is None:
lowercase_ : Optional[Any] = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
lowercase_ : str = {
'name': hf_file.rfilename,
'size': None,
'type': 'file',
}
self.dir_cache.update(
{
str(_lowercase ): {'name': str(_lowercase ), 'size': None, 'type': 'directory'}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def lowerCamelCase__ ( self , _lowercase , _lowercase = "rb" , **_lowercase , ) -> Dict:
if not isinstance(self.repo_info , _lowercase ):
raise NotImplementedError(f"Open is only implemented for dataset repositories, but got {self.repo_info}" )
lowercase_ : Optional[int] = hf_hub_url(self.repo_info.id , _lowercase , revision=self.repo_info.sha )
return fsspec.open(
_lowercase , mode=_lowercase , headers=get_authentication_headers_for_url(_lowercase , use_auth_token=self.token ) , client_kwargs={'trust_env': True} , ).open()
def lowerCamelCase__ ( self , _lowercase , **_lowercase ) -> Tuple:
self._get_dirs()
lowercase_ : str = self._strip_protocol(_lowercase )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(_lowercase )
def lowerCamelCase__ ( self , _lowercase , _lowercase=False , **_lowercase ) -> List[str]:
self._get_dirs()
lowercase_ : List[str] = PurePosixPath(path.strip('/' ) )
lowercase_ : List[str] = {}
for p, f in self.dir_cache.items():
lowercase_ : Tuple = PurePosixPath(p.strip('/' ) )
lowercase_ : Optional[int] = p.parent
if root == path:
lowercase_ : List[str] = f
lowercase_ : List[str] = list(paths.values() )
if detail:
return out
else:
return sorted(f['name'] for f in out )
| 7 | 0 |
'''simple docstring'''
import time
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers.generation import (
MaxLengthCriteria,
MaxNewTokensCriteria,
MaxTimeCriteria,
StoppingCriteriaList,
validate_stopping_criteria,
)
@require_torch
class __magic_name__ ( unittest.TestCase ):
"""simple docstring"""
def lowerCamelCase__ ( self , _lowercase ) -> List[Any]:
lowercase_ : List[str] = 3
lowercase_ : Tuple = 250
lowercase_ : str = ids_tensor((batch_size, length) , UpperCamelCase_ )
lowercase_ : Any = torch.ones((batch_size, length) , device=UpperCamelCase_ , dtype=torch.float ) / length
return input_ids, scores
def lowerCamelCase__ ( self ) -> Optional[Any]:
lowercase_ : Tuple = self._get_tensors(5 )
lowercase_ : Tuple = StoppingCriteriaList(
[
MaxLengthCriteria(max_length=10 ),
MaxTimeCriteria(max_time=0.1 ),
] )
self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
lowercase_ : int = self._get_tensors(9 )
self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
lowercase_ : Optional[int] = self._get_tensors(10 )
self.assertTrue(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
def lowerCamelCase__ ( self ) -> Any:
lowercase_ : int = MaxLengthCriteria(max_length=10 )
lowercase_ : Tuple = self._get_tensors(5 )
self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
lowercase_ : Dict = self._get_tensors(9 )
self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
lowercase_ : Optional[int] = self._get_tensors(10 )
self.assertTrue(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
def lowerCamelCase__ ( self ) -> str:
lowercase_ : Optional[Any] = MaxNewTokensCriteria(start_length=5 , max_new_tokens=5 )
lowercase_ : List[str] = self._get_tensors(5 )
self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
lowercase_ : Dict = self._get_tensors(9 )
self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
lowercase_ : Optional[Any] = self._get_tensors(10 )
self.assertTrue(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
lowercase_ : Union[str, Any] = StoppingCriteriaList([criteria] )
self.assertEqual(criteria_list.max_length , 10 )
def lowerCamelCase__ ( self ) -> Any:
lowercase_ : Optional[Any] = self._get_tensors(5 )
lowercase_ : str = MaxTimeCriteria(max_time=0.1 )
self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
lowercase_ : str = MaxTimeCriteria(max_time=0.1 , initial_timestamp=time.time() - 0.2 )
self.assertTrue(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
def lowerCamelCase__ ( self ) -> Dict:
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 10 )
with self.assertWarns(UpperCamelCase_ ):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 11 )
lowercase_ : Optional[int] = validate_stopping_criteria(StoppingCriteriaList() , 11 )
self.assertEqual(len(UpperCamelCase_ ) , 1 )
| 709 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
BertTokenizer,
ViltConfig,
ViltForImageAndTextRetrieval,
ViltForImagesAndTextClassification,
ViltForMaskedLM,
ViltForQuestionAnswering,
ViltImageProcessor,
ViltProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
A: List[Any] = logging.get_logger(__name__)
def _UpperCAmelCase ( a : Any , a : Dict=False , a : Union[str, Any]=False , a : Tuple=False ) -> List[str]:
"""simple docstring"""
lowercase_ : int = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"transformer.blocks.{i}.norm1.weight", f"vilt.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((f"transformer.blocks.{i}.norm1.bias", f"vilt.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append(
(f"transformer.blocks.{i}.attn.proj.weight", f"vilt.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append(
(f"transformer.blocks.{i}.attn.proj.bias", f"vilt.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((f"transformer.blocks.{i}.norm2.weight", f"vilt.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((f"transformer.blocks.{i}.norm2.bias", f"vilt.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append(
(f"transformer.blocks.{i}.mlp.fc1.weight", f"vilt.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((f"transformer.blocks.{i}.mlp.fc1.bias", f"vilt.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((f"transformer.blocks.{i}.mlp.fc2.weight", f"vilt.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((f"transformer.blocks.{i}.mlp.fc2.bias", f"vilt.encoder.layer.{i}.output.dense.bias") )
# embeddings
rename_keys.extend(
[
# text embeddings
('text_embeddings.word_embeddings.weight', 'vilt.embeddings.text_embeddings.word_embeddings.weight'),
(
'text_embeddings.position_embeddings.weight',
'vilt.embeddings.text_embeddings.position_embeddings.weight',
),
('text_embeddings.position_ids', 'vilt.embeddings.text_embeddings.position_ids'),
(
'text_embeddings.token_type_embeddings.weight',
'vilt.embeddings.text_embeddings.token_type_embeddings.weight',
),
('text_embeddings.LayerNorm.weight', 'vilt.embeddings.text_embeddings.LayerNorm.weight'),
('text_embeddings.LayerNorm.bias', 'vilt.embeddings.text_embeddings.LayerNorm.bias'),
# patch embeddings
('transformer.cls_token', 'vilt.embeddings.cls_token'),
('transformer.patch_embed.proj.weight', 'vilt.embeddings.patch_embeddings.projection.weight'),
('transformer.patch_embed.proj.bias', 'vilt.embeddings.patch_embeddings.projection.bias'),
('transformer.pos_embed', 'vilt.embeddings.position_embeddings'),
# token type embeddings
('token_type_embeddings.weight', 'vilt.embeddings.token_type_embeddings.weight'),
] )
# final layernorm + pooler
rename_keys.extend(
[
('transformer.norm.weight', 'vilt.layernorm.weight'),
('transformer.norm.bias', 'vilt.layernorm.bias'),
('pooler.dense.weight', 'vilt.pooler.dense.weight'),
('pooler.dense.bias', 'vilt.pooler.dense.bias'),
] )
# classifier head(s)
if vqa_model:
# classification head
rename_keys.extend(
[
('vqa_classifier.0.weight', 'classifier.0.weight'),
('vqa_classifier.0.bias', 'classifier.0.bias'),
('vqa_classifier.1.weight', 'classifier.1.weight'),
('vqa_classifier.1.bias', 'classifier.1.bias'),
('vqa_classifier.3.weight', 'classifier.3.weight'),
('vqa_classifier.3.bias', 'classifier.3.bias'),
] )
elif nlvr_model:
# classification head
rename_keys.extend(
[
('nlvr2_classifier.0.weight', 'classifier.0.weight'),
('nlvr2_classifier.0.bias', 'classifier.0.bias'),
('nlvr2_classifier.1.weight', 'classifier.1.weight'),
('nlvr2_classifier.1.bias', 'classifier.1.bias'),
('nlvr2_classifier.3.weight', 'classifier.3.weight'),
('nlvr2_classifier.3.bias', 'classifier.3.bias'),
] )
else:
pass
return rename_keys
def _UpperCAmelCase ( a : Dict , a : Tuple ) -> Dict:
"""simple docstring"""
for i in range(config.num_hidden_layers ):
lowercase_ : Optional[int] = 'vilt.'
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowercase_ : str = state_dict.pop(f"transformer.blocks.{i}.attn.qkv.weight" )
lowercase_ : int = state_dict.pop(f"transformer.blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
lowercase_ : Dict = in_proj_weight[
: config.hidden_size, :
]
lowercase_ : List[str] = in_proj_bias[: config.hidden_size]
lowercase_ : int = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowercase_ : Optional[int] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowercase_ : Tuple = in_proj_weight[
-config.hidden_size :, :
]
lowercase_ : Dict = in_proj_bias[-config.hidden_size :]
def _UpperCAmelCase ( a : List[str] ) -> Optional[int]:
"""simple docstring"""
lowercase_ : Union[str, Any] = ['head.weight', 'head.bias']
for k in ignore_keys:
state_dict.pop(a , a )
def _UpperCAmelCase ( a : Optional[Any] , a : Tuple , a : Union[str, Any] ) -> Tuple:
"""simple docstring"""
lowercase_ : List[Any] = dct.pop(a )
lowercase_ : Dict = val
@torch.no_grad()
def _UpperCAmelCase ( a : List[Any] , a : List[Any] ) -> Optional[Any]:
"""simple docstring"""
lowercase_ : str = ViltConfig(image_size=3_8_4 , patch_size=3_2 , tie_word_embeddings=a )
lowercase_ : int = False
lowercase_ : Union[str, Any] = False
lowercase_ : List[str] = False
lowercase_ : str = False
if "vqa" in checkpoint_url:
lowercase_ : str = True
lowercase_ : Optional[int] = 3_1_2_9
lowercase_ : Any = 'huggingface/label-files'
lowercase_ : Optional[Any] = 'vqa2-id2label.json'
lowercase_ : int = json.load(open(hf_hub_download(a , a , repo_type='dataset' ) , 'r' ) )
lowercase_ : Optional[int] = {int(a ): v for k, v in idalabel.items()}
lowercase_ : List[Any] = idalabel
lowercase_ : str = {v: k for k, v in idalabel.items()}
lowercase_ : List[Any] = ViltForQuestionAnswering(a )
elif "nlvr" in checkpoint_url:
lowercase_ : Dict = True
lowercase_ : List[str] = 2
lowercase_ : Tuple = {0: 'False', 1: 'True'}
lowercase_ : Optional[int] = {v: k for k, v in config.idalabel.items()}
lowercase_ : int = 3
lowercase_ : Any = ViltForImagesAndTextClassification(a )
elif "irtr" in checkpoint_url:
lowercase_ : Union[str, Any] = True
lowercase_ : Dict = ViltForImageAndTextRetrieval(a )
elif "mlm_itm" in checkpoint_url:
lowercase_ : int = True
lowercase_ : Tuple = ViltForMaskedLM(a )
else:
raise ValueError('Unknown model type' )
# load state_dict of original model, remove and rename some keys
lowercase_ : List[Any] = torch.hub.load_state_dict_from_url(a , map_location='cpu' )['state_dict']
lowercase_ : Union[str, Any] = create_rename_keys(a , a , a , a )
for src, dest in rename_keys:
rename_key(a , a , a )
read_in_q_k_v(a , a )
if mlm_model or irtr_model:
lowercase_ : str = ['itm_score.fc.weight', 'itm_score.fc.bias']
for k in ignore_keys:
state_dict.pop(a , a )
# load state dict into HuggingFace model
model.eval()
if mlm_model:
lowercase_ , lowercase_ : Dict = model.load_state_dict(a , strict=a )
assert missing_keys == ["mlm_score.decoder.bias"]
else:
model.load_state_dict(a )
# Define processor
lowercase_ : Optional[int] = ViltImageProcessor(size=3_8_4 )
lowercase_ : Optional[int] = BertTokenizer.from_pretrained('bert-base-uncased' )
lowercase_ : Any = ViltProcessor(a , a )
# Forward pass on example inputs (image + text)
if nlvr_model:
lowercase_ : Union[str, Any] = Image.open(requests.get('https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg' , stream=a ).raw )
lowercase_ : Optional[Any] = Image.open(requests.get('https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg' , stream=a ).raw )
lowercase_ : Any = (
'The left image contains twice the number of dogs as the right image, and at least two dogs in total are'
' standing.'
)
lowercase_ : Union[str, Any] = processor(a , a , return_tensors='pt' )
lowercase_ : List[str] = processor(a , a , return_tensors='pt' )
lowercase_ : Union[str, Any] = model(
input_ids=encoding_a.input_ids , pixel_values=encoding_a.pixel_values , pixel_values_a=encoding_a.pixel_values , )
else:
lowercase_ : List[str] = Image.open(requests.get('http://images.cocodataset.org/val2017/000000039769.jpg' , stream=a ).raw )
if mlm_model:
lowercase_ : Dict = 'a bunch of [MASK] laying on a [MASK].'
else:
lowercase_ : List[Any] = 'How many cats are there?'
lowercase_ : List[Any] = processor(a , a , return_tensors='pt' )
lowercase_ : Optional[int] = model(**a )
# Verify outputs
if mlm_model:
lowercase_ : Union[str, Any] = torch.Size([1, 1_1, 3_0_5_2_2] )
lowercase_ : Optional[Any] = torch.tensor([-12.50_61, -12.51_23, -12.51_74] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , a , atol=1e-4 )
# verify masked token prediction equals "cats"
lowercase_ : int = outputs.logits[0, 4, :].argmax(-1 ).item()
assert tokenizer.decode([predicted_id] ) == "cats"
elif vqa_model:
lowercase_ : Optional[Any] = torch.Size([1, 3_1_2_9] )
lowercase_ : Tuple = torch.tensor([-15.94_95, -18.14_72, -10.30_41] )
assert torch.allclose(outputs.logits[0, :3] , a , atol=1e-4 )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , a , atol=1e-4 )
# verify vqa prediction equals "2"
lowercase_ : Union[str, Any] = outputs.logits.argmax(-1 ).item()
assert model.config.idalabel[predicted_idx] == "2"
elif nlvr_model:
lowercase_ : Optional[Any] = torch.Size([1, 2] )
lowercase_ : Optional[Any] = torch.tensor([-2.87_21, 2.12_91] )
assert torch.allclose(outputs.logits[0, :3] , a , atol=1e-4 )
assert outputs.logits.shape == expected_shape
Path(a ).mkdir(exist_ok=a )
print(f"Saving model and processor to {pytorch_dump_folder_path}" )
model.save_pretrained(a )
processor.save_pretrained(a )
if __name__ == "__main__":
A: Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://github.com/dandelin/ViLT/releases/download/200k/vilt_200k_mlm_itm.ckpt",
type=str,
help="URL of the checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
A: Union[str, Any] = parser.parse_args()
convert_vilt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 7 | 0 |
'''simple docstring'''
from argparse import ArgumentParser, Namespace
from typing import Any, List, Optional
from ..pipelines import Pipeline, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from fastapi import Body, FastAPI, HTTPException
from fastapi.routing import APIRoute
from pydantic import BaseModel
from starlette.responses import JSONResponse
from uvicorn import run
__a: Union[str, Any] = True
except (ImportError, AttributeError):
__a: Optional[Any] = object
def _UpperCAmelCase ( *a : List[str] , **a : Optional[int] ) -> str:
"""simple docstring"""
pass
__a: Optional[int] = False
__a: Tuple = logging.get_logger("transformers-cli/serving")
def _UpperCAmelCase ( a : Namespace ) -> Union[str, Any]:
"""simple docstring"""
lowercase_ : Any = pipeline(
task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , )
return ServeCommand(_lowerCAmelCase , args.host , args.port , args.workers )
class __magic_name__ ( UpperCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = 4_2
class __magic_name__ ( UpperCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = 4_2
SCREAMING_SNAKE_CASE_ : str = 4_2
class __magic_name__ ( UpperCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = 4_2
class __magic_name__ ( UpperCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = 4_2
class __magic_name__ ( UpperCAmelCase_ ):
"""simple docstring"""
@staticmethod
def lowerCamelCase__ ( _lowercase ) -> Union[str, Any]:
lowercase_ : int = parser.add_parser(
'serve' , help='CLI tool to run inference requests through REST and GraphQL endpoints.' )
serve_parser.add_argument(
'--task' , type=__A , choices=get_supported_tasks() , help='The task to run the pipeline on' , )
serve_parser.add_argument('--host' , type=__A , default='localhost' , help='Interface the server will listen on.' )
serve_parser.add_argument('--port' , type=__A , default=8888 , help='Port the serving will listen to.' )
serve_parser.add_argument('--workers' , type=__A , default=1 , help='Number of http workers' )
serve_parser.add_argument('--model' , type=__A , help='Model\'s name or path to stored model.' )
serve_parser.add_argument('--config' , type=__A , help='Model\'s config name or path to stored model.' )
serve_parser.add_argument('--tokenizer' , type=__A , help='Tokenizer name to use.' )
serve_parser.add_argument(
'--device' , type=__A , default=-1 , help='Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)' , )
serve_parser.set_defaults(func=__A )
def __init__( self , _lowercase , _lowercase , _lowercase , _lowercase ) -> str:
lowercase_ : Tuple = pipeline
lowercase_ : str = host
lowercase_ : List[str] = port
lowercase_ : Tuple = workers
if not _serve_dependencies_installed:
raise RuntimeError(
'Using serve command requires FastAPI and uvicorn. '
'Please install transformers with [serving]: pip install \"transformers[serving]\".'
'Or install FastAPI and uvicorn separately.' )
else:
logger.info(f"Serving model over {host}:{port}" )
lowercase_ : Optional[int] = FastAPI(
routes=[
APIRoute(
'/' , self.model_info , response_model=__A , response_class=__A , methods=['GET'] , ),
APIRoute(
'/tokenize' , self.tokenize , response_model=__A , response_class=__A , methods=['POST'] , ),
APIRoute(
'/detokenize' , self.detokenize , response_model=__A , response_class=__A , methods=['POST'] , ),
APIRoute(
'/forward' , self.forward , response_model=__A , response_class=__A , methods=['POST'] , ),
] , timeout=600 , )
def lowerCamelCase__ ( self ) -> Union[str, Any]:
run(self._app , host=self.host , port=self.port , workers=self.workers )
def lowerCamelCase__ ( self ) -> str:
return ServeModelInfoResult(infos=vars(self._pipeline.model.config ) )
def lowerCamelCase__ ( self , _lowercase = Body(__A , embed=__A ) , _lowercase = Body(__A , embed=__A ) ) -> List[Any]:
try:
lowercase_ : Tuple = self._pipeline.tokenizer.tokenize(__A )
if return_ids:
lowercase_ : Dict = self._pipeline.tokenizer.convert_tokens_to_ids(__A )
return ServeTokenizeResult(tokens=__A , tokens_ids=__A )
else:
return ServeTokenizeResult(tokens=__A )
except Exception as e:
raise HTTPException(status_code=500 , detail={'model': '', 'error': str(__A )} )
def lowerCamelCase__ ( self , _lowercase = Body(__A , embed=__A ) , _lowercase = Body(__A , embed=__A ) , _lowercase = Body(__A , embed=__A ) , ) -> Tuple:
try:
lowercase_ : List[str] = self._pipeline.tokenizer.decode(__A , __A , __A )
return ServeDeTokenizeResult(model='' , text=__A )
except Exception as e:
raise HTTPException(status_code=500 , detail={'model': '', 'error': str(__A )} )
async def lowerCamelCase__ ( self , _lowercase=Body(__A , embed=__A ) ) -> str:
# Check we don't have empty string
if len(__A ) == 0:
return ServeForwardResult(output=[] , attention=[] )
try:
# Forward through the model
lowercase_ : Tuple = self._pipeline(__A )
return ServeForwardResult(output=__A )
except Exception as e:
raise HTTPException(500 , {'error': str(__A )} )
| 710 |
'''simple docstring'''
def _UpperCAmelCase ( a : list ) -> list:
"""simple docstring"""
for i in range(len(a ) - 1 , 0 , -1 ):
lowercase_ : Any = False
for j in range(a , 0 , -1 ):
if unsorted[j] < unsorted[j - 1]:
lowercase_ , lowercase_ : Any = unsorted[j - 1], unsorted[j]
lowercase_ : int = True
for j in range(a ):
if unsorted[j] > unsorted[j + 1]:
lowercase_ , lowercase_ : Union[str, Any] = unsorted[j + 1], unsorted[j]
lowercase_ : Optional[Any] = True
if not swapped:
break
return unsorted
if __name__ == "__main__":
import doctest
doctest.testmod()
A: Union[str, Any] = input("Enter numbers separated by a comma:\n").strip()
A: Tuple = [int(item) for item in user_input.split(",")]
print(f"""{cocktail_shaker_sort(unsorted) = }""")
| 7 | 0 |
'''simple docstring'''
import argparse
import torch
from safetensors.torch import load_file
from diffusers import StableDiffusionPipeline
def _UpperCAmelCase ( a : List[str] , a : Optional[int] , a : List[str] , a : Optional[Any] , a : List[str] ) -> Dict:
"""simple docstring"""
# load base model
lowercase_ : Union[str, Any] = StableDiffusionPipeline.from_pretrained(__lowerCAmelCase , torch_dtype=torch.floataa )
# load LoRA weight from .safetensors
lowercase_ : Union[str, Any] = load_file(__lowerCAmelCase )
lowercase_ : Optional[int] = []
# directly update weight in diffusers model
for key in state_dict:
# it is suggested to print out the key, it usually will be something like below
# "lora_te_text_model_encoder_layers_0_self_attn_k_proj.lora_down.weight"
# as we have set the alpha beforehand, so just skip
if ".alpha" in key or key in visited:
continue
if "text" in key:
lowercase_ : List[Any] = key.split('.' )[0].split(LORA_PREFIX_TEXT_ENCODER + '_' )[-1].split('_' )
lowercase_ : Dict = pipeline.text_encoder
else:
lowercase_ : List[Any] = key.split('.' )[0].split(LORA_PREFIX_UNET + '_' )[-1].split('_' )
lowercase_ : Any = pipeline.unet
# find the target layer
lowercase_ : Optional[Any] = layer_infos.pop(0 )
while len(__lowerCAmelCase ) > -1:
try:
lowercase_ : str = curr_layer.__getattr__(__lowerCAmelCase )
if len(__lowerCAmelCase ) > 0:
lowercase_ : List[Any] = layer_infos.pop(0 )
elif len(__lowerCAmelCase ) == 0:
break
except Exception:
if len(__lowerCAmelCase ) > 0:
temp_name += "_" + layer_infos.pop(0 )
else:
lowercase_ : int = layer_infos.pop(0 )
lowercase_ : Union[str, Any] = []
if "lora_down" in key:
pair_keys.append(key.replace('lora_down' , 'lora_up' ) )
pair_keys.append(__lowerCAmelCase )
else:
pair_keys.append(__lowerCAmelCase )
pair_keys.append(key.replace('lora_up' , 'lora_down' ) )
# update weight
if len(state_dict[pair_keys[0]].shape ) == 4:
lowercase_ : List[str] = state_dict[pair_keys[0]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
lowercase_ : Optional[Any] = state_dict[pair_keys[1]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(__lowerCAmelCase , __lowerCAmelCase ).unsqueeze(2 ).unsqueeze(3 )
else:
lowercase_ : Optional[int] = state_dict[pair_keys[0]].to(torch.floataa )
lowercase_ : Any = state_dict[pair_keys[1]].to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(__lowerCAmelCase , __lowerCAmelCase )
# update visited list
for item in pair_keys:
visited.append(__lowerCAmelCase )
return pipeline
if __name__ == "__main__":
A: Optional[Any] = argparse.ArgumentParser()
parser.add_argument(
"--base_model_path", default=None, type=str, required=True, help="Path to the base model in diffusers format."
)
parser.add_argument(
"--checkpoint_path", default=None, type=str, required=True, help="Path to the checkpoint to convert."
)
parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.")
parser.add_argument(
"--lora_prefix_unet", default="lora_unet", type=str, help="The prefix of UNet weight in safetensors"
)
parser.add_argument(
"--lora_prefix_text_encoder",
default="lora_te",
type=str,
help="The prefix of text encoder weight in safetensors",
)
parser.add_argument("--alpha", default=0.75, type=float, help="The merging ratio in W = W0 + alpha * deltaW")
parser.add_argument(
"--to_safetensors", action="store_true", help="Whether to store pipeline in safetensors format or not."
)
parser.add_argument("--device", type=str, help="Device to use (e.g. cpu, cuda:0, cuda:1, etc.)")
A: int = parser.parse_args()
A: List[Any] = args.base_model_path
A: Dict = args.checkpoint_path
A: List[str] = args.dump_path
A: List[Any] = args.lora_prefix_unet
A: Optional[Any] = args.lora_prefix_text_encoder
A: Optional[Any] = args.alpha
A: Optional[int] = convert(base_model_path, checkpoint_path, lora_prefix_unet, lora_prefix_text_encoder, alpha)
A: Tuple = pipe.to(args.device)
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 711 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class __magic_name__ ( metaclass=UpperCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = ['transformers', 'torch', 'note_seq']
def __init__( self , *_lowercase , **_lowercase ) -> Dict:
requires_backends(self , ['transformers', 'torch', 'note_seq'] )
@classmethod
def lowerCamelCase__ ( cls , *_lowercase , **_lowercase ) -> List[str]:
requires_backends(cls , ['transformers', 'torch', 'note_seq'] )
@classmethod
def lowerCamelCase__ ( cls , *_lowercase , **_lowercase ) -> Dict:
requires_backends(cls , ['transformers', 'torch', 'note_seq'] )
| 7 | 0 |
import argparse
import os
import re
A: Optional[Any] = "src/diffusers"
# Pattern that looks at the indentation in a line.
A: Optional[Any] = re.compile(r"^(\s*)\S")
# Pattern that matches `"key":" and puts `key` in group 0.
A: Optional[int] = re.compile(r"^\s*\"([^\"]+)\":")
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
A: str = re.compile(r"^\s*_import_structure\[\"([^\"]+)\"\]")
# Pattern that matches `"key",` and puts `key` in group 0.
A: Dict = re.compile(r"^\s*\"([^\"]+)\",\s*$")
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
A: str = re.compile(r"\[([^\]]+)\]")
def _UpperCAmelCase ( a : Tuple ) -> Optional[int]:
"""simple docstring"""
lowercase_ : List[Any] = _re_indent.search(__UpperCAmelCase )
return "" if search is None else search.groups()[0]
def _UpperCAmelCase ( a : Any , a : Any="" , a : Dict=None , a : Tuple=None ) -> Optional[int]:
"""simple docstring"""
lowercase_ : Any = 0
lowercase_ : Optional[int] = code.split('\n' )
if start_prompt is not None:
while not lines[index].startswith(__UpperCAmelCase ):
index += 1
lowercase_ : Dict = ['\n'.join(lines[:index] )]
else:
lowercase_ : Any = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
lowercase_ : Optional[Any] = [lines[index]]
index += 1
while index < len(__UpperCAmelCase ) and (end_prompt is None or not lines[index].startswith(__UpperCAmelCase )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(__UpperCAmelCase ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + ' ' ):
current_block.append(lines[index] )
blocks.append('\n'.join(__UpperCAmelCase ) )
if index < len(__UpperCAmelCase ) - 1:
lowercase_ : Union[str, Any] = [lines[index + 1]]
index += 1
else:
lowercase_ : Union[str, Any] = []
else:
blocks.append('\n'.join(__UpperCAmelCase ) )
lowercase_ : Any = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(__UpperCAmelCase ) > 0:
blocks.append('\n'.join(__UpperCAmelCase ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(__UpperCAmelCase ):
blocks.append('\n'.join(lines[index:] ) )
return blocks
def _UpperCAmelCase ( a : Tuple ) -> Any:
"""simple docstring"""
def _inner(a : int ):
return key(__UpperCAmelCase ).lower().replace('_' , '' )
return _inner
def _UpperCAmelCase ( a : Optional[Any] , a : Optional[int]=None ) -> Optional[Any]:
"""simple docstring"""
def noop(a : Union[str, Any] ):
return x
if key is None:
lowercase_ : List[str] = noop
# Constants are all uppercase, they go first.
lowercase_ : int = [obj for obj in objects if key(__UpperCAmelCase ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
lowercase_ : Any = [obj for obj in objects if key(__UpperCAmelCase )[0].isupper() and not key(__UpperCAmelCase ).isupper()]
# Functions begin with a lowercase, they go last.
lowercase_ : Dict = [obj for obj in objects if not key(__UpperCAmelCase )[0].isupper()]
lowercase_ : Any = ignore_underscore(__UpperCAmelCase )
return sorted(__UpperCAmelCase , key=__UpperCAmelCase ) + sorted(__UpperCAmelCase , key=__UpperCAmelCase ) + sorted(__UpperCAmelCase , key=__UpperCAmelCase )
def _UpperCAmelCase ( a : List[Any] ) -> int:
"""simple docstring"""
def _replace(a : Optional[int] ):
lowercase_ : Optional[int] = match.groups()[0]
if "," not in imports:
return f"[{imports}]"
lowercase_ : List[Any] = [part.strip().replace('\"' , '' ) for part in imports.split(',' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
lowercase_ : Optional[int] = keys[:-1]
return "[" + ", ".join([f"\"{k}\"" for k in sort_objects(__UpperCAmelCase )] ) + "]"
lowercase_ : Dict = import_statement.split('\n' )
if len(__UpperCAmelCase ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
lowercase_ : Optional[Any] = 2 if lines[1].strip() == '[' else 1
lowercase_ : Any = [(i, _re_strip_line.search(__UpperCAmelCase ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
lowercase_ : Tuple = sort_objects(__UpperCAmelCase , key=lambda a : x[1] )
lowercase_ : int = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(__UpperCAmelCase ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
lowercase_ : Dict = _re_bracket_content.sub(_replace , lines[1] )
else:
lowercase_ : Dict = [part.strip().replace('\"' , '' ) for part in lines[1].split(',' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
lowercase_ : Optional[int] = keys[:-1]
lowercase_ : List[Any] = get_indent(lines[1] ) + ', '.join([f"\"{k}\"" for k in sort_objects(__UpperCAmelCase )] )
return "\n".join(__UpperCAmelCase )
else:
# Finally we have to deal with imports fitting on one line
lowercase_ : List[str] = _re_bracket_content.sub(_replace , __UpperCAmelCase )
return import_statement
def _UpperCAmelCase ( a : Tuple , a : List[str]=True ) -> str:
"""simple docstring"""
with open(__UpperCAmelCase , 'r' ) as f:
lowercase_ : List[Any] = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
lowercase_ : Union[str, Any] = split_code_in_indented_blocks(
__UpperCAmelCase , start_prompt='_import_structure = {' , end_prompt='if TYPE_CHECKING:' )
# We ignore block 0 (everything until start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(__UpperCAmelCase ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
lowercase_ : List[str] = main_blocks[block_idx]
lowercase_ : List[str] = block.split('\n' )
# Get to the start of the imports.
lowercase_ : Tuple = 0
while line_idx < len(__UpperCAmelCase ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
lowercase_ : List[Any] = len(__UpperCAmelCase )
else:
line_idx += 1
if line_idx >= len(__UpperCAmelCase ):
continue
# Ignore beginning and last line: they don't contain anything.
lowercase_ : Any = '\n'.join(block_lines[line_idx:-1] )
lowercase_ : Dict = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
lowercase_ : List[str] = split_code_in_indented_blocks(__UpperCAmelCase , indent_level=__UpperCAmelCase )
# We have two categories of import key: list or _import_structure[key].append/extend
lowercase_ : Tuple = _re_direct_key if '_import_structure' in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
lowercase_ : int = [(pattern.search(__UpperCAmelCase ).groups()[0] if pattern.search(__UpperCAmelCase ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
lowercase_ : List[str] = [(i, key) for i, key in enumerate(__UpperCAmelCase ) if key is not None]
lowercase_ : Optional[Any] = [x[0] for x in sorted(__UpperCAmelCase , key=lambda a : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
lowercase_ : Any = 0
lowercase_ : Dict = []
for i in range(len(__UpperCAmelCase ) ):
if keys[i] is None:
reordered_blocks.append(internal_blocks[i] )
else:
lowercase_ : List[Any] = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reordered_blocks.append(__UpperCAmelCase )
count += 1
# And we put our main block back together with its first and last line.
lowercase_ : Tuple = '\n'.join(block_lines[:line_idx] + reordered_blocks + [block_lines[-1]] )
if code != "\n".join(__UpperCAmelCase ):
if check_only:
return True
else:
print(f"Overwriting {file}." )
with open(__UpperCAmelCase , 'w' ) as f:
f.write('\n'.join(__UpperCAmelCase ) )
def _UpperCAmelCase ( a : int=True ) -> Union[str, Any]:
"""simple docstring"""
lowercase_ : Tuple = []
for root, _, files in os.walk(__UpperCAmelCase ):
if "__init__.py" in files:
lowercase_ : List[str] = sort_imports(os.path.join(__UpperCAmelCase , '__init__.py' ) , check_only=__UpperCAmelCase )
if result:
lowercase_ : List[str] = [os.path.join(__UpperCAmelCase , '__init__.py' )]
if len(__UpperCAmelCase ) > 0:
raise ValueError(f"Would overwrite {len(__UpperCAmelCase )} files, run `make style`." )
if __name__ == "__main__":
A: Union[str, Any] = argparse.ArgumentParser()
parser.add_argument("--check_only", action="store_true", help="Whether to only check or fix style.")
A: int = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 712 |
'''simple docstring'''
def _UpperCAmelCase ( a : str , a : str ) -> float:
"""simple docstring"""
def get_matched_characters(a : str , a : str ) -> str:
lowercase_ : Union[str, Any] = []
lowercase_ : Tuple = min(len(_stra ) , len(_stra ) ) // 2
for i, l in enumerate(_stra ):
lowercase_ : Optional[int] = int(max(0 , i - limit ) )
lowercase_ : Union[str, Any] = int(min(i + limit + 1 , len(_stra ) ) )
if l in _stra[left:right]:
matched.append(a )
lowercase_ : Union[str, Any] = f"{_stra[0:_stra.index(a )]} {_stra[_stra.index(a ) + 1:]}"
return "".join(a )
# matching characters
lowercase_ : Union[str, Any] = get_matched_characters(a , a )
lowercase_ : Optional[Any] = get_matched_characters(a , a )
lowercase_ : Optional[int] = len(a )
# transposition
lowercase_ : Dict = (
len([(ca, ca) for ca, ca in zip(a , a ) if ca != ca] ) // 2
)
if not match_count:
lowercase_ : List[str] = 0.0
else:
lowercase_ : Any = (
1
/ 3
* (
match_count / len(a )
+ match_count / len(a )
+ (match_count - transpositions) / match_count
)
)
# common prefix up to 4 characters
lowercase_ : Optional[Any] = 0
for ca, ca in zip(stra[:4] , stra[:4] ):
if ca == ca:
prefix_len += 1
else:
break
return jaro + 0.1 * prefix_len * (1 - jaro)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(jaro_winkler("hello", "world"))
| 7 | 0 |
def _UpperCAmelCase ( a : str ) -> str:
"""simple docstring"""
if not all(char in '01' for char in bin_string ):
raise ValueError('Non-binary value was passed to the function' )
if not bin_string:
raise ValueError('Empty string was passed to the function' )
lowercase_ : Tuple = ''
while len(lowerCAmelCase__ ) % 3 != 0:
lowercase_ : str = '0' + bin_string
lowercase_ : int = [
bin_string[index : index + 3]
for index in range(len(lowerCAmelCase__ ) )
if index % 3 == 0
]
for bin_group in bin_string_in_3_list:
lowercase_ : List[Any] = 0
for index, val in enumerate(lowerCAmelCase__ ):
oct_val += int(2 ** (2 - index) * int(lowerCAmelCase__ ) )
oct_string += str(lowerCAmelCase__ )
return oct_string
if __name__ == "__main__":
from doctest import testmod
testmod()
| 713 |
'''simple docstring'''
from __future__ import annotations
def _UpperCAmelCase ( a : int = 4 ) -> list[list[int]]:
"""simple docstring"""
lowercase_ : Tuple = abs(a ) or 4
return [[1 + x + y * row_size for x in range(a )] for y in range(a )]
def _UpperCAmelCase ( a : list[list[int]] ) -> list[list[int]]:
"""simple docstring"""
return reverse_row(transpose(a ) )
# OR.. transpose(reverse_column(matrix))
def _UpperCAmelCase ( a : list[list[int]] ) -> list[list[int]]:
"""simple docstring"""
return reverse_row(reverse_column(a ) )
# OR.. reverse_column(reverse_row(matrix))
def _UpperCAmelCase ( a : list[list[int]] ) -> list[list[int]]:
"""simple docstring"""
return reverse_column(transpose(a ) )
# OR.. transpose(reverse_row(matrix))
def _UpperCAmelCase ( a : list[list[int]] ) -> list[list[int]]:
"""simple docstring"""
lowercase_ : Any = [list(a ) for x in zip(*a )]
return matrix
def _UpperCAmelCase ( a : list[list[int]] ) -> list[list[int]]:
"""simple docstring"""
lowercase_ : List[str] = matrix[::-1]
return matrix
def _UpperCAmelCase ( a : list[list[int]] ) -> list[list[int]]:
"""simple docstring"""
lowercase_ : str = [x[::-1] for x in matrix]
return matrix
def _UpperCAmelCase ( a : list[list[int]] ) -> None:
"""simple docstring"""
for i in matrix:
print(*a )
if __name__ == "__main__":
A: Dict = make_matrix()
print("\norigin:\n")
print_matrix(matrix)
print("\nrotate 90 counterclockwise:\n")
print_matrix(rotate_aa(matrix))
A: List[Any] = make_matrix()
print("\norigin:\n")
print_matrix(matrix)
print("\nrotate 180:\n")
print_matrix(rotate_aaa(matrix))
A: List[str] = make_matrix()
print("\norigin:\n")
print_matrix(matrix)
print("\nrotate 270 counterclockwise:\n")
print_matrix(rotate_aaa(matrix))
| 7 | 0 |
'''simple docstring'''
import unittest
from transformers import GPTSwaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
A: List[Any] = get_tests_dir("fixtures/test_sentencepiece_with_bytefallback.model")
@require_sentencepiece
@require_tokenizers
class __magic_name__ ( __lowerCAmelCase, unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = GPTSwaTokenizer
SCREAMING_SNAKE_CASE_ : str = False
SCREAMING_SNAKE_CASE_ : Dict = True
SCREAMING_SNAKE_CASE_ : str = False
def lowerCamelCase__ ( self ) -> Union[str, Any]:
super().setUp()
# We have a SentencePiece fixture for testing
lowercase_ : Dict = GPTSwaTokenizer(_UpperCamelCase , eos_token='<unk>' , bos_token='<unk>' , pad_token='<unk>' )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCamelCase__ ( self , _lowercase ) -> Union[str, Any]:
lowercase_ : Optional[int] = """This is a test"""
lowercase_ : Any = """This is a test"""
return input_text, output_text
def lowerCamelCase__ ( self ) -> Dict:
lowercase_ : List[str] = """<s>"""
lowercase_ : str = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_UpperCamelCase ) , _UpperCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_UpperCamelCase ) , _UpperCamelCase )
def lowerCamelCase__ ( self ) -> List[str]:
lowercase_ : Tuple = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<unk>' )
self.assertEqual(vocab_keys[1] , '<s>' )
self.assertEqual(vocab_keys[-1] , 'j' )
self.assertEqual(len(_UpperCamelCase ) , 2000 )
def lowerCamelCase__ ( self ) -> List[str]:
self.assertEqual(self.get_tokenizer().vocab_size , 2000 )
def lowerCamelCase__ ( self ) -> List[str]:
lowercase_ : Optional[Any] = GPTSwaTokenizer(_UpperCamelCase )
lowercase_ : Optional[int] = tokenizer.tokenize('This is a test' )
self.assertListEqual(_UpperCamelCase , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCamelCase ) , [465, 287, 265, 631, 842] )
lowercase_ : Union[str, Any] = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
# fmt: off
self.assertListEqual(
_UpperCamelCase , ['▁I', '▁was', '▁bor', 'n', '▁in', '▁', '<0x39>', '2', '0', '0', '0', ',', '▁and', '▁this', '▁is', '▁f', 'al', 's', '<0xC3>', '<0xA9>', '.'] , )
# fmt: on
lowercase_ : Any = tokenizer.convert_tokens_to_ids(_UpperCamelCase )
self.assertListEqual(
_UpperCamelCase , [262, 272, 1525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260] , )
lowercase_ : List[Any] = tokenizer.convert_ids_to_tokens(_UpperCamelCase )
# fmt: off
self.assertListEqual(
_UpperCamelCase , ['▁I', '▁was', '▁bor', 'n', '▁in', '▁', '<0x39>', '2', '0', '0', '0', ',', '▁and', '▁this', '▁is', '▁f', 'al', 's', '<0xC3>', '<0xA9>', '.'] )
# fmt: on
def lowerCamelCase__ ( self ) -> str:
lowercase_ : List[str] = GPTSwaTokenizer(_UpperCamelCase )
lowercase_ : Optional[int] = ["""This is a test""", """I was born in 92000, and this is falsé."""]
lowercase_ : Optional[Any] = [
[465, 287, 265, 631, 842],
[262, 272, 1525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260],
]
# Test that encode_fast returns the same as tokenize + convert_tokens_to_ids
for text, expected_ids in zip(_UpperCamelCase , _UpperCamelCase ):
self.assertListEqual(tokenizer.encode_fast(_UpperCamelCase ) , _UpperCamelCase )
# Test that decode_fast returns the input text
for text, token_ids in zip(_UpperCamelCase , _UpperCamelCase ):
self.assertEqual(tokenizer.decode_fast(_UpperCamelCase ) , _UpperCamelCase )
@slow
def lowerCamelCase__ ( self ) -> Tuple:
lowercase_ : Dict = [
"""<|python|>def fibonacci(n)\n if n < 0:\n print('Incorrect input')""",
"""Hey there, how are you doing this fine day?""",
"""This is a text with a trailing spaces followed by a dot .""",
"""Häj sväjs lillebrör! =)""",
"""Det är inget fel på Mr. Cool""",
]
# fmt: off
lowercase_ : List[str] = {"""input_ids""": [[6_3423, 5, 6811, 1_4954, 282, 816, 3821, 6_3466, 6_3425, 6_3462, 18, 6_3978, 678, 301, 1320, 6_3423, 6_3455, 6_3458, 18, 6_3982, 4246, 3940, 1901, 4_7789, 5547, 1_8994], [1_9630, 1100, 6_3446, 1342, 633, 544, 4488, 593, 5102, 2416, 6_3495, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1652, 428, 268, 1936, 515, 268, 5_8593, 2_2413, 9106, 546, 268, 3_3213, 6_3979, 698, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_5130, 6_3450, 924, 6_3449, 2249, 4062, 1558, 318, 6_3504, 2_1498, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [509, 377, 2827, 2559, 332, 6575, 6_3443, 2_6801, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_UpperCamelCase , model_name='AI-Sweden/gpt-sw3-126m' , sequences=_UpperCamelCase , ) | 714 |
'''simple docstring'''
# Usage:
# ./gen-card-facebook-wmt19.py
import os
from pathlib import Path
def _UpperCAmelCase ( a : Dict , a : Optional[int] , a : Tuple ) -> Optional[int]:
"""simple docstring"""
lowercase_ : Any = {
'en': 'Machine learning is great, isn\'t it?',
'ru': 'Машинное обучение - это здорово, не так ли?',
'de': 'Maschinelles Lernen ist großartig, oder?',
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
lowercase_ : List[str] = {
'ru-en': ['[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)', '39.20'],
'en-ru': ['[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)', '33.47'],
'en-de': ['[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)', '42.83'],
'de-en': ['[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)', '41.35'],
}
lowercase_ : Optional[Any] = f"{src_lang}-{tgt_lang}"
lowercase_ : Optional[Any] = f"\n---\nlanguage: \n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt19\n- facebook\nlicense: apache-2.0\ndatasets:\n- wmt19\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.\n\nFor more details, please see, [Facebook FAIR's WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).\n\nThe abbreviation FSMT stands for FairSeqMachineTranslation\n\nAll four models are available:\n\n* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)\n* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)\n* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)\n* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = \"facebook/wmt19-{src_lang}-{tgt_lang}\"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = \"{texts[src_lang]}\"\ninput_ids = tokenizer.encode(input, return_tensors=\"pt\")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n- The original (and this ported model) doesn't seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)\n\n## Training data\n\nPretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).\n\n## Eval results\n\npair | fairseq | transformers\n-------|---------|----------\n{pair} | {scores[pair][0]} | {scores[pair][1]}\n\nThe score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn't support:\n- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).\n- re-ranking\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=15\nmkdir -p $DATA_DIR\nsacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\nnote: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt19/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)\n\n\n### BibTeX entry and citation info\n\n```bibtex\n@inproceedings{{...,\n year={{2020}},\n title={{Facebook FAIR's WMT19 News Translation Task Submission}},\n author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},\n booktitle={{Proc. of WMT}},\n}}\n```\n\n\n## TODO\n\n- port model ensemble (fairseq uses 4 model checkpoints)\n\n"
os.makedirs(a , exist_ok=a )
lowercase_ : int = os.path.join(a , 'README.md' )
print(f"Generating {path}" )
with open(a , 'w' , encoding='utf-8' ) as f:
f.write(a )
# make sure we are under the root of the project
A: List[str] = Path(__file__).resolve().parent.parent.parent
A: List[str] = repo_dir / "model_cards"
for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
A , A , A: Any = model_name.split("-")
A: int = model_cards_dir / "facebook" / model_name
write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
| 7 | 0 |
from random import randint
from tempfile import TemporaryFile
import numpy as np
def _UpperCAmelCase ( a : Union[str, Any] , a : Union[str, Any] , a : List[str] ) -> List[Any]:
"""simple docstring"""
lowercase_ : Optional[Any] = 0
if start < end:
lowercase_ : Optional[Any] = randint(_lowerCamelCase , _lowerCamelCase )
lowercase_ : Dict = a[end]
lowercase_ : int = a[pivot]
lowercase_ : List[str] = temp
lowercase_ : Any = _in_place_partition(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
count += _in_place_quick_sort(_lowerCamelCase , _lowerCamelCase , p - 1 )
count += _in_place_quick_sort(_lowerCamelCase , p + 1 , _lowerCamelCase )
return count
def _UpperCAmelCase ( a : List[str] , a : str , a : str ) -> List[Any]:
"""simple docstring"""
lowercase_ : Tuple = 0
lowercase_ : Union[str, Any] = randint(_lowerCamelCase , _lowerCamelCase )
lowercase_ : List[str] = a[end]
lowercase_ : Tuple = a[pivot]
lowercase_ : str = temp
lowercase_ : Tuple = start - 1
for index in range(_lowerCamelCase , _lowerCamelCase ):
count += 1
if a[index] < a[end]: # check if current val is less than pivot value
lowercase_ : str = new_pivot_index + 1
lowercase_ : Tuple = a[new_pivot_index]
lowercase_ : Optional[Any] = a[index]
lowercase_ : int = temp
lowercase_ : str = a[new_pivot_index + 1]
lowercase_ : List[Any] = a[end]
lowercase_ : Tuple = temp
return new_pivot_index + 1, count
A: Optional[Any] = TemporaryFile()
A: Tuple = 1_0_0 # 1000 elements are to be sorted
A , A: List[str] = 0, 1 # mean and standard deviation
A: Optional[Any] = np.random.normal(mu, sigma, p)
np.save(outfile, X)
print("The array is")
print(X)
outfile.seek(0) # using the same array
A: str = np.load(outfile)
A: List[str] = len(M) - 1
A: List[Any] = _in_place_quick_sort(M, 0, r)
print(
"No of Comparisons for 100 elements selected from a standard normal distribution"
"is :"
)
print(z)
| 715 |
'''simple docstring'''
import json
import logging
import os
import socket
import git
import numpy as np
import torch
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - PID: %(process)d - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
)
A: Tuple = logging.getLogger(__name__)
def _UpperCAmelCase ( a : str ) -> List[Any]:
"""simple docstring"""
lowercase_ : List[str] = git.Repo(search_parent_directories=a )
lowercase_ : Union[str, Any] = {
'repo_id': str(a ),
'repo_sha': str(repo.head.object.hexsha ),
'repo_branch': str(repo.active_branch ),
}
with open(os.path.join(a , 'git_log.json' ) , 'w' ) as f:
json.dump(a , a , indent=4 )
def _UpperCAmelCase ( a : str ) -> Union[str, Any]:
"""simple docstring"""
if params.n_gpu <= 0:
lowercase_ : int = 0
lowercase_ : Union[str, Any] = -1
lowercase_ : List[str] = True
lowercase_ : Optional[Any] = False
return
assert torch.cuda.is_available()
logger.info('Initializing GPUs' )
if params.n_gpu > 1:
assert params.local_rank != -1
lowercase_ : Dict = int(os.environ['WORLD_SIZE'] )
lowercase_ : Union[str, Any] = int(os.environ['N_GPU_NODE'] )
lowercase_ : Optional[int] = int(os.environ['RANK'] )
# number of nodes / node ID
lowercase_ : int = params.world_size // params.n_gpu_per_node
lowercase_ : str = params.global_rank // params.n_gpu_per_node
lowercase_ : Dict = True
assert params.n_nodes == int(os.environ['N_NODES'] )
assert params.node_id == int(os.environ['NODE_RANK'] )
# local job (single GPU)
else:
assert params.local_rank == -1
lowercase_ : str = 1
lowercase_ : Dict = 0
lowercase_ : Tuple = 0
lowercase_ : List[Any] = 0
lowercase_ : int = 1
lowercase_ : Tuple = 1
lowercase_ : str = False
# sanity checks
assert params.n_nodes >= 1
assert 0 <= params.node_id < params.n_nodes
assert 0 <= params.local_rank <= params.global_rank < params.world_size
assert params.world_size == params.n_nodes * params.n_gpu_per_node
# define whether this is the master process / if we are in multi-node distributed mode
lowercase_ : List[str] = params.node_id == 0 and params.local_rank == 0
lowercase_ : Optional[Any] = params.n_nodes > 1
# summary
lowercase_ : int = f"--- Global rank: {params.global_rank} - "
logger.info(PREFIX + 'Number of nodes: %i' % params.n_nodes )
logger.info(PREFIX + 'Node ID : %i' % params.node_id )
logger.info(PREFIX + 'Local rank : %i' % params.local_rank )
logger.info(PREFIX + 'World size : %i' % params.world_size )
logger.info(PREFIX + 'GPUs per node : %i' % params.n_gpu_per_node )
logger.info(PREFIX + 'Master : %s' % str(params.is_master ) )
logger.info(PREFIX + 'Multi-node : %s' % str(params.multi_node ) )
logger.info(PREFIX + 'Multi-GPU : %s' % str(params.multi_gpu ) )
logger.info(PREFIX + 'Hostname : %s' % socket.gethostname() )
# set GPU device
torch.cuda.set_device(params.local_rank )
# initialize multi-GPU
if params.multi_gpu:
logger.info('Initializing PyTorch distributed' )
torch.distributed.init_process_group(
init_method='env://' , backend='nccl' , )
def _UpperCAmelCase ( a : Dict ) -> Optional[int]:
"""simple docstring"""
np.random.seed(args.seed )
torch.manual_seed(args.seed )
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed )
| 7 | 0 |
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import evaluate
import numpy as np
import torch
from datasets import load_dataset
from PIL import Image
from torchvision.transforms import (
CenterCrop,
Compose,
Normalize,
RandomHorizontalFlip,
RandomResizedCrop,
Resize,
ToTensor,
)
import transformers
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
AutoConfig,
AutoImageProcessor,
AutoModelForImageClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
A: Dict = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.31.0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/image-classification/requirements.txt")
A: int = list(MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING.keys())
A: List[Any] = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
def _UpperCAmelCase ( a : Optional[int] ) -> Optional[int]:
"""simple docstring"""
with open(_lowerCamelCase , 'rb' ) as f:
lowercase_ : str = Image.open(_lowerCamelCase )
return im.convert('RGB' )
@dataclass
class __magic_name__ :
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = field(
default=_A, metadata={
'help': 'Name of a dataset from the hub (could be your own, possibly private dataset hosted on the hub).'
}, )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = field(
default=_A, metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} )
SCREAMING_SNAKE_CASE_ : Optional[Any] = field(default=_A, metadata={'help': 'A folder containing the training data.'} )
SCREAMING_SNAKE_CASE_ : int = field(default=_A, metadata={'help': 'A folder containing the validation data.'} )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = field(
default=0.15, metadata={'help': 'Percent to split off of train for validation.'} )
SCREAMING_SNAKE_CASE_ : List[str] = field(
default=_A, metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
}, )
SCREAMING_SNAKE_CASE_ : List[Any] = field(
default=_A, metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
}, )
def lowerCamelCase__ ( self ) -> List[Any]:
if self.dataset_name is None and (self.train_dir is None and self.validation_dir is None):
raise ValueError(
'You must specify either a dataset name from the hub or a train and/or validation directory.' )
@dataclass
class __magic_name__ :
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = field(
default='google/vit-base-patch16-224-in21k', metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'}, )
SCREAMING_SNAKE_CASE_ : Tuple = field(
default=_A, metadata={'help': 'If training from scratch, pass a model type from the list: ' + ', '.join(_A )}, )
SCREAMING_SNAKE_CASE_ : List[Any] = field(
default=_A, metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
SCREAMING_SNAKE_CASE_ : Tuple = field(
default=_A, metadata={'help': 'Where do you want to store the pretrained models downloaded from s3'} )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = field(
default='main', metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'}, )
SCREAMING_SNAKE_CASE_ : str = field(default=_A, metadata={'help': 'Name or path of preprocessor config.'} )
SCREAMING_SNAKE_CASE_ : int = field(
default=_A, metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
}, )
SCREAMING_SNAKE_CASE_ : Optional[int] = field(
default=_A, metadata={'help': 'Will enable to load a pretrained model whose head dimensions are different.'}, )
def _UpperCAmelCase ( a : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
lowercase_ : Dict = torch.stack([example['pixel_values'] for example in examples] )
lowercase_ : Union[str, Any] = torch.tensor([example['labels'] for example in examples] )
return {"pixel_values": pixel_values, "labels": labels}
def _UpperCAmelCase ( ) -> Optional[int]:
"""simple docstring"""
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowercase_ : int = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowercase_ : Tuple = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowercase_ : Tuple = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('run_image_classification' , _lowerCamelCase , _lowerCamelCase )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
lowercase_ : int = training_args.get_process_log_level()
logger.setLevel(_lowerCamelCase )
transformers.utils.logging.set_verbosity(_lowerCamelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ f"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
logger.info(f"Training/evaluation parameters {training_args}" )
# Detecting last checkpoint.
lowercase_ : Optional[int] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowercase_ : List[str] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Initialize our dataset and prepare it for the 'image-classification' task.
if data_args.dataset_name is not None:
lowercase_ : Union[str, Any] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir , task='image-classification' , use_auth_token=True if model_args.use_auth_token else None , )
else:
lowercase_ : Union[str, Any] = {}
if data_args.train_dir is not None:
lowercase_ : Union[str, Any] = os.path.join(data_args.train_dir , '**' )
if data_args.validation_dir is not None:
lowercase_ : Optional[Any] = os.path.join(data_args.validation_dir , '**' )
lowercase_ : Any = load_dataset(
'imagefolder' , data_files=_lowerCamelCase , cache_dir=model_args.cache_dir , task='image-classification' , )
# If we don't have a validation split, split off a percentage of train as validation.
lowercase_ : Union[str, Any] = None if "validation" in dataset.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , _lowerCamelCase ) and data_args.train_val_split > 0.0:
lowercase_ : int = dataset["train"].train_test_split(data_args.train_val_split )
lowercase_ : Tuple = split["train"]
lowercase_ : List[Any] = split["test"]
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
lowercase_ : str = dataset["train"].features["labels"].names
lowercase_ : int = {}, {}
for i, label in enumerate(_lowerCamelCase ):
lowercase_ : Union[str, Any] = str(_lowerCamelCase )
lowercase_ : Dict = label
# Load the accuracy metric from the datasets package
lowercase_ : Any = evaluate.load('accuracy' )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(a : Optional[Any] ):
return metric.compute(predictions=np.argmax(p.predictions , axis=1 ) , references=p.label_ids )
lowercase_ : str = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(_lowerCamelCase ) , labelaid=_lowerCamelCase , idalabel=_lowerCamelCase , finetuning_task='image-classification' , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
lowercase_ : str = AutoModelForImageClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=_lowerCamelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
lowercase_ : Union[str, Any] = AutoImageProcessor.from_pretrained(
model_args.image_processor_name or model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Define torchvision transforms to be applied to each image.
if "shortest_edge" in image_processor.size:
lowercase_ : Union[str, Any] = image_processor.size["shortest_edge"]
else:
lowercase_ : int = (image_processor.size["height"], image_processor.size["width"])
lowercase_ : int = Normalize(mean=image_processor.image_mean , std=image_processor.image_std )
lowercase_ : Optional[Any] = Compose(
[
RandomResizedCrop(_lowerCamelCase ),
RandomHorizontalFlip(),
ToTensor(),
normalize,
] )
lowercase_ : List[str] = Compose(
[
Resize(_lowerCamelCase ),
CenterCrop(_lowerCamelCase ),
ToTensor(),
normalize,
] )
def train_transforms(a : Tuple ):
lowercase_ : Union[str, Any] = [
_train_transforms(pil_img.convert('RGB' ) ) for pil_img in example_batch["image"]
]
return example_batch
def val_transforms(a : Optional[Any] ):
lowercase_ : Tuple = [_val_transforms(pil_img.convert('RGB' ) ) for pil_img in example_batch["image"]]
return example_batch
if training_args.do_train:
if "train" not in dataset:
raise ValueError('--do_train requires a train dataset' )
if data_args.max_train_samples is not None:
lowercase_ : List[str] = (
dataset["train"].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
dataset["train"].set_transform(_lowerCamelCase )
if training_args.do_eval:
if "validation" not in dataset:
raise ValueError('--do_eval requires a validation dataset' )
if data_args.max_eval_samples is not None:
lowercase_ : Tuple = (
dataset["validation"].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
dataset["validation"].set_transform(_lowerCamelCase )
# Initalize our trainer
lowercase_ : Optional[int] = Trainer(
model=_lowerCamelCase , args=_lowerCamelCase , train_dataset=dataset['train'] if training_args.do_train else None , eval_dataset=dataset['validation'] if training_args.do_eval else None , compute_metrics=_lowerCamelCase , tokenizer=_lowerCamelCase , data_collator=_lowerCamelCase , )
# Training
if training_args.do_train:
lowercase_ : int = None
if training_args.resume_from_checkpoint is not None:
lowercase_ : Union[str, Any] = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowercase_ : int = last_checkpoint
lowercase_ : int = trainer.train(resume_from_checkpoint=_lowerCamelCase )
trainer.save_model()
trainer.log_metrics('train' , train_result.metrics )
trainer.save_metrics('train' , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
lowercase_ : str = trainer.evaluate()
trainer.log_metrics('eval' , _lowerCamelCase )
trainer.save_metrics('eval' , _lowerCamelCase )
# Write model card and (optionally) push to hub
lowercase_ : Any = {
"finetuned_from": model_args.model_name_or_path,
"tasks": "image-classification",
"dataset": data_args.dataset_name,
"tags": ["image-classification", "vision"],
}
if training_args.push_to_hub:
trainer.push_to_hub(**_lowerCamelCase )
else:
trainer.create_model_card(**_lowerCamelCase )
if __name__ == "__main__":
main()
| 716 |
'''simple docstring'''
import os
from distutils.util import strtobool
def _UpperCAmelCase ( a : Any , a : int ) -> Any:
"""simple docstring"""
for e in env_keys:
lowercase_ : Optional[Any] = int(os.environ.get(a , -1 ) )
if val >= 0:
return val
return default
def _UpperCAmelCase ( a : List[Any] , a : Dict=False ) -> Optional[Any]:
"""simple docstring"""
lowercase_ : Optional[int] = os.environ.get(a , str(a ) )
return strtobool(a ) == 1 # As its name indicates `strtobool` actually returns an int...
def _UpperCAmelCase ( a : List[Any] , a : Dict="no" ) -> str:
"""simple docstring"""
lowercase_ : List[Any] = os.environ.get(a , str(a ) )
return value
| 7 | 0 |
'''simple docstring'''
import math
def _UpperCAmelCase ( a : list , a : int ) -> int:
"""simple docstring"""
lowercase_ : Optional[int] = len(a_ )
lowercase_ : Optional[Any] = int(math.floor(math.sqrt(a_ ) ) )
lowercase_ : List[str] = 0
while arr[min(a_ , a_ ) - 1] < x:
lowercase_ : Optional[Any] = step
step += int(math.floor(math.sqrt(a_ ) ) )
if prev >= n:
return -1
while arr[prev] < x:
lowercase_ : Any = prev + 1
if prev == min(a_ , a_ ):
return -1
if arr[prev] == x:
return prev
return -1
if __name__ == "__main__":
A: Union[str, Any] = input("Enter numbers separated by a comma:\n").strip()
A: List[str] = [int(item) for item in user_input.split(",")]
A: Dict = int(input("Enter the number to be searched:\n"))
A: Tuple = jump_search(arr, x)
if res == -1:
print("Number not found!")
else:
print(f"""Number {x} is at index {res}""")
| 717 |
'''simple docstring'''
from typing import Dict, Optional
import numpy as np
import datasets
A: int = "\nIoU is the area of overlap between the predicted segmentation and the ground truth divided by the area of union\nbetween the predicted segmentation and the ground truth. For binary (two classes) or multi-class segmentation,\nthe mean IoU of the image is calculated by taking the IoU of each class and averaging them.\n"
A: List[str] = "\nArgs:\n predictions (`List[ndarray]`):\n List of predicted segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n references (`List[ndarray]`):\n List of ground truth segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n num_labels (`int`):\n Number of classes (categories).\n ignore_index (`int`):\n Index that will be ignored during evaluation.\n nan_to_num (`int`, *optional*):\n If specified, NaN values will be replaced by the number defined by the user.\n label_map (`dict`, *optional*):\n If specified, dictionary mapping old label indices to new label indices.\n reduce_labels (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background,\n and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by 255.\n\nReturns:\n `Dict[str, float | ndarray]` comprising various elements:\n - *mean_iou* (`float`):\n Mean Intersection-over-Union (IoU averaged over all categories).\n - *mean_accuracy* (`float`):\n Mean accuracy (averaged over all categories).\n - *overall_accuracy* (`float`):\n Overall accuracy on all images.\n - *per_category_accuracy* (`ndarray` of shape `(num_labels,)`):\n Per category accuracy.\n - *per_category_iou* (`ndarray` of shape `(num_labels,)`):\n Per category IoU.\n\nExamples:\n\n >>> import numpy as np\n\n >>> mean_iou = datasets.load_metric(\"mean_iou\")\n\n >>> # suppose one has 3 different segmentation maps predicted\n >>> predicted_1 = np.array([[1, 2], [3, 4], [5, 255]])\n >>> actual_1 = np.array([[0, 3], [5, 4], [6, 255]])\n\n >>> predicted_2 = np.array([[2, 7], [9, 2], [3, 6]])\n >>> actual_2 = np.array([[1, 7], [9, 2], [3, 6]])\n\n >>> predicted_3 = np.array([[2, 2, 3], [8, 2, 4], [3, 255, 2]])\n >>> actual_3 = np.array([[1, 2, 2], [8, 2, 1], [3, 255, 1]])\n\n >>> predicted = [predicted_1, predicted_2, predicted_3]\n >>> ground_truth = [actual_1, actual_2, actual_3]\n\n >>> results = mean_iou.compute(predictions=predicted, references=ground_truth, num_labels=10, ignore_index=255, reduce_labels=False)\n >>> print(results) # doctest: +NORMALIZE_WHITESPACE\n {'mean_iou': 0.47750000000000004, 'mean_accuracy': 0.5916666666666666, 'overall_accuracy': 0.5263157894736842, 'per_category_iou': array([0. , 0. , 0.375, 0.4 , 0.5 , 0. , 0.5 , 1. , 1. , 1. ]), 'per_category_accuracy': array([0. , 0. , 0.75 , 0.66666667, 1. , 0. , 0.5 , 1. , 1. , 1. ])}\n"
A: Union[str, Any] = "\\n@software{MMSegmentation_Contributors_OpenMMLab_Semantic_Segmentation_2020,\nauthor = {{MMSegmentation Contributors}},\nlicense = {Apache-2.0},\nmonth = {7},\ntitle = {{OpenMMLab Semantic Segmentation Toolbox and Benchmark}},\nurl = {https://github.com/open-mmlab/mmsegmentation},\nyear = {2020}\n}"
def _UpperCAmelCase ( a : str , a : Union[str, Any] , a : Dict , a : bool , a : Optional[Dict[int, int]] = None , a : bool = False , ) -> str:
"""simple docstring"""
if label_map is not None:
for old_id, new_id in label_map.items():
lowercase_ : Union[str, Any] = new_id
# turn into Numpy arrays
lowercase_ : List[Any] = np.array(a )
lowercase_ : Optional[Any] = np.array(a )
if reduce_labels:
lowercase_ : Any = 2_5_5
lowercase_ : Dict = label - 1
lowercase_ : List[Any] = 2_5_5
lowercase_ : Any = label != ignore_index
lowercase_ : List[Any] = np.not_equal(a , a )
lowercase_ : Optional[int] = pred_label[mask]
lowercase_ : Union[str, Any] = np.array(a )[mask]
lowercase_ : Optional[int] = pred_label[pred_label == label]
lowercase_ : Optional[int] = np.histogram(a , bins=a , range=(0, num_labels - 1) )[0]
lowercase_ : Optional[int] = np.histogram(a , bins=a , range=(0, num_labels - 1) )[0]
lowercase_ : Dict = np.histogram(a , bins=a , range=(0, num_labels - 1) )[0]
lowercase_ : Optional[Any] = area_pred_label + area_label - area_intersect
return area_intersect, area_union, area_pred_label, area_label
def _UpperCAmelCase ( a : int , a : Optional[Any] , a : Optional[int] , a : bool , a : Optional[Dict[int, int]] = None , a : bool = False , ) -> Dict:
"""simple docstring"""
lowercase_ : Dict = np.zeros((num_labels,) , dtype=np.floataa )
lowercase_ : List[str] = np.zeros((num_labels,) , dtype=np.floataa )
lowercase_ : List[Any] = np.zeros((num_labels,) , dtype=np.floataa )
lowercase_ : str = np.zeros((num_labels,) , dtype=np.floataa )
for result, gt_seg_map in zip(a , a ):
lowercase_ , lowercase_ , lowercase_ , lowercase_ : Tuple = intersect_and_union(
a , a , a , a , a , a )
total_area_intersect += area_intersect
total_area_union += area_union
total_area_pred_label += area_pred_label
total_area_label += area_label
return total_area_intersect, total_area_union, total_area_pred_label, total_area_label
def _UpperCAmelCase ( a : Optional[Any] , a : List[str] , a : Optional[Any] , a : bool , a : Optional[int] = None , a : Optional[Dict[int, int]] = None , a : bool = False , ) -> Optional[int]:
"""simple docstring"""
lowercase_ , lowercase_ , lowercase_ , lowercase_ : Optional[Any] = total_intersect_and_union(
a , a , a , a , a , a )
# compute metrics
lowercase_ : str = {}
lowercase_ : str = total_area_intersect.sum() / total_area_label.sum()
lowercase_ : Optional[Any] = total_area_intersect / total_area_union
lowercase_ : List[Any] = total_area_intersect / total_area_label
lowercase_ : Any = np.nanmean(a )
lowercase_ : Optional[Any] = np.nanmean(a )
lowercase_ : int = all_acc
lowercase_ : Union[str, Any] = iou
lowercase_ : Optional[Any] = acc
if nan_to_num is not None:
lowercase_ : Optional[int] = {metric: np.nan_to_num(a , nan=a ) for metric, metric_value in metrics.items()}
return metrics
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class __magic_name__ ( datasets.Metric ):
"""simple docstring"""
def lowerCamelCase__ ( self ) -> int:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
# 1st Seq - height dim, 2nd - width dim
{
'predictions': datasets.Sequence(datasets.Sequence(datasets.Value('uint16' ) ) ),
'references': datasets.Sequence(datasets.Sequence(datasets.Value('uint16' ) ) ),
} ) , reference_urls=[
'https://github.com/open-mmlab/mmsegmentation/blob/71c201b1813267d78764f306a297ca717827c4bf/mmseg/core/evaluation/metrics.py'
] , )
def lowerCamelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase = None , _lowercase = None , _lowercase = False , ) -> Tuple:
lowercase_ : Optional[int] = mean_iou(
results=_lowercase , gt_seg_maps=_lowercase , num_labels=_lowercase , ignore_index=_lowercase , nan_to_num=_lowercase , label_map=_lowercase , reduce_labels=_lowercase , )
return iou_result
| 7 | 0 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionImageVariationPipeline
from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device
A: Any = False
class __magic_name__ ( unittest.TestCase ):
"""simple docstring"""
pass
@slow
@require_torch_gpu
class __magic_name__ ( unittest.TestCase ):
"""simple docstring"""
def lowerCamelCase__ ( self ) -> List[Any]:
lowercase_ : Optional[int] = VersatileDiffusionImageVariationPipeline.from_pretrained('shi-labs/versatile-diffusion' )
pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
lowercase_ : Any = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' )
lowercase_ : Optional[int] = torch.manual_seed(0 )
lowercase_ : Any = pipe(
image=__UpperCamelCase , generator=__UpperCamelCase , guidance_scale=7.5 , num_inference_steps=50 , output_type='numpy' , ).images
lowercase_ : Union[str, Any] = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
lowercase_ : Union[str, Any] = np.array([0.04_41, 0.04_69, 0.05_07, 0.05_75, 0.06_32, 0.06_50, 0.08_65, 0.09_09, 0.09_45] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 718 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A: Dict = logging.get_logger(__name__)
A: Optional[Any] = {
"google/vit-base-patch16-224": "https://huggingface.co/vit-base-patch16-224/resolve/main/config.json",
# See all ViT models at https://huggingface.co/models?filter=vit
}
class __magic_name__ ( UpperCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = 'vit'
def __init__( self , _lowercase=768 , _lowercase=12 , _lowercase=12 , _lowercase=3072 , _lowercase="gelu" , _lowercase=0.0 , _lowercase=0.0 , _lowercase=0.02 , _lowercase=1E-1_2 , _lowercase=224 , _lowercase=16 , _lowercase=3 , _lowercase=True , _lowercase=16 , **_lowercase , ) -> List[str]:
super().__init__(**_lowercase )
lowercase_ : Optional[int] = hidden_size
lowercase_ : str = num_hidden_layers
lowercase_ : str = num_attention_heads
lowercase_ : int = intermediate_size
lowercase_ : List[Any] = hidden_act
lowercase_ : Any = hidden_dropout_prob
lowercase_ : List[str] = attention_probs_dropout_prob
lowercase_ : str = initializer_range
lowercase_ : List[str] = layer_norm_eps
lowercase_ : Any = image_size
lowercase_ : Tuple = patch_size
lowercase_ : Optional[Any] = num_channels
lowercase_ : str = qkv_bias
lowercase_ : List[str] = encoder_stride
class __magic_name__ ( UpperCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = version.parse('1.11' )
@property
def lowerCamelCase__ ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def lowerCamelCase__ ( self ) -> float:
return 1E-4
| 7 | 0 |
class __magic_name__ :
"""simple docstring"""
def __init__( self , _lowercase , _lowercase ) -> Union[str, Any]:
lowercase_ : int = name
lowercase_ : int = val
def __str__( self ) -> Any:
return f"{self.__class__.__name__}({self.name}, {self.val})"
def __lt__( self , _lowercase ) -> List[Any]:
return self.val < other.val
class __magic_name__ :
"""simple docstring"""
def __init__( self , _lowercase ) -> Optional[Any]:
lowercase_ : List[Any] = {}
lowercase_ : Union[str, Any] = {}
lowercase_ : Optional[Any] = self.build_heap(snake_case_ )
def __getitem__( self , _lowercase ) -> Optional[Any]:
return self.get_value(snake_case_ )
def lowerCamelCase__ ( self , _lowercase ) -> Union[str, Any]:
return (idx - 1) // 2
def lowerCamelCase__ ( self , _lowercase ) -> str:
return idx * 2 + 1
def lowerCamelCase__ ( self , _lowercase ) -> Union[str, Any]:
return idx * 2 + 2
def lowerCamelCase__ ( self , _lowercase ) -> Optional[Any]:
return self.heap_dict[key]
def lowerCamelCase__ ( self , _lowercase ) -> str:
lowercase_ : Tuple = len(snake_case_ ) - 1
lowercase_ : Dict = self.get_parent_idx(snake_case_ )
for idx, i in enumerate(snake_case_ ):
lowercase_ : Tuple = idx
lowercase_ : Any = i.val
for i in range(snake_case_ , -1 , -1 ):
self.sift_down(snake_case_ , snake_case_ )
return array
def lowerCamelCase__ ( self , _lowercase , _lowercase ) -> Any:
while True:
lowercase_ : Optional[int] = self.get_left_child_idx(snake_case_ ) # noqa: E741
lowercase_ : int = self.get_right_child_idx(snake_case_ )
lowercase_ : Optional[Any] = idx
if l < len(snake_case_ ) and array[l] < array[idx]:
lowercase_ : Optional[Any] = l
if r < len(snake_case_ ) and array[r] < array[smallest]:
lowercase_ : Tuple = r
if smallest != idx:
lowercase_ , lowercase_ : List[Any] = array[smallest], array[idx]
(
(
lowercase_
) , (
lowercase_
) ,
) : Union[str, Any] = (
self.idx_of_element[array[smallest]],
self.idx_of_element[array[idx]],
)
lowercase_ : Optional[Any] = smallest
else:
break
def lowerCamelCase__ ( self , _lowercase ) -> str:
lowercase_ : Optional[Any] = self.get_parent_idx(snake_case_ )
while p >= 0 and self.heap[p] > self.heap[idx]:
lowercase_ , lowercase_ : Any = self.heap[idx], self.heap[p]
lowercase_ , lowercase_ : Optional[Any] = (
self.idx_of_element[self.heap[idx]],
self.idx_of_element[self.heap[p]],
)
lowercase_ : List[Any] = p
lowercase_ : List[str] = self.get_parent_idx(snake_case_ )
def lowerCamelCase__ ( self ) -> Dict:
return self.heap[0]
def lowerCamelCase__ ( self ) -> int:
lowercase_ , lowercase_ : int = self.heap[-1], self.heap[0]
lowercase_ , lowercase_ : Tuple = (
self.idx_of_element[self.heap[-1]],
self.idx_of_element[self.heap[0]],
)
lowercase_ : List[Any] = self.heap.pop()
del self.idx_of_element[x]
self.sift_down(0 , self.heap )
return x
def lowerCamelCase__ ( self , _lowercase ) -> Optional[int]:
self.heap.append(snake_case_ )
lowercase_ : Union[str, Any] = len(self.heap ) - 1
lowercase_ : Optional[Any] = node.val
self.sift_up(len(self.heap ) - 1 )
def lowerCamelCase__ ( self ) -> Union[str, Any]:
return len(self.heap ) == 0
def lowerCamelCase__ ( self , _lowercase , _lowercase ) -> Tuple:
assert (
self.heap[self.idx_of_element[node]].val > new_value
), "newValue must be less that current value"
lowercase_ : Union[str, Any] = new_value
lowercase_ : int = new_value
self.sift_up(self.idx_of_element[node] )
A: Any = Node("R", -1)
A: Any = Node("B", 6)
A: Optional[int] = Node("A", 3)
A: List[str] = Node("X", 1)
A: int = Node("E", 4)
# Use one of these two ways to generate Min-Heap
# Generating Min-Heap from array
A: str = MinHeap([r, b, a, x, e])
# Generating Min-Heap by Insert method
# myMinHeap.insert(a)
# myMinHeap.insert(b)
# myMinHeap.insert(x)
# myMinHeap.insert(r)
# myMinHeap.insert(e)
# Before
print("Min Heap - before decrease key")
for i in my_min_heap.heap:
print(i)
print("Min Heap - After decrease key of node [B -> -17]")
my_min_heap.decrease_key(b, -1_7)
# After
for i in my_min_heap.heap:
print(i)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 719 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A: int = logging.get_logger(__name__)
A: int = {
"bigcode/gpt_bigcode-santacoder": "https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json",
}
class __magic_name__ ( UpperCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = 'gpt_bigcode'
SCREAMING_SNAKE_CASE_ : int = ['past_key_values']
SCREAMING_SNAKE_CASE_ : Any = {
'hidden_size': 'n_embd',
'max_position_embeddings': 'n_positions',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self , _lowercase=5_0257 , _lowercase=1024 , _lowercase=768 , _lowercase=12 , _lowercase=12 , _lowercase=None , _lowercase="gelu_pytorch_tanh" , _lowercase=0.1 , _lowercase=0.1 , _lowercase=0.1 , _lowercase=1E-5 , _lowercase=0.02 , _lowercase=True , _lowercase=True , _lowercase=5_0256 , _lowercase=5_0256 , _lowercase=True , _lowercase=True , _lowercase=True , **_lowercase , ) -> Any:
lowercase_ : Tuple = vocab_size
lowercase_ : str = n_positions
lowercase_ : List[str] = n_embd
lowercase_ : str = n_layer
lowercase_ : Optional[Any] = n_head
lowercase_ : Optional[int] = n_inner
lowercase_ : Union[str, Any] = activation_function
lowercase_ : Dict = resid_pdrop
lowercase_ : str = embd_pdrop
lowercase_ : Optional[Any] = attn_pdrop
lowercase_ : List[Any] = layer_norm_epsilon
lowercase_ : Optional[int] = initializer_range
lowercase_ : List[Any] = scale_attn_weights
lowercase_ : Any = use_cache
lowercase_ : List[str] = attention_softmax_in_fpaa
lowercase_ : Any = scale_attention_softmax_in_fpaa
lowercase_ : Optional[Any] = multi_query
lowercase_ : Optional[Any] = bos_token_id
lowercase_ : Optional[Any] = eos_token_id
super().__init__(bos_token_id=_lowercase , eos_token_id=_lowercase , **_lowercase )
| 7 | 0 |
'''simple docstring'''
import math
import tensorflow as tf
from packaging import version
def _UpperCAmelCase ( a : List[Any] ) -> Dict:
"""simple docstring"""
lowercase_ : int = tf.convert_to_tensor(__UpperCamelCase )
lowercase_ : int = 0.5 * (1.0 + tf.math.erf(x / tf.cast(tf.sqrt(2.0 ) , x.dtype ) ))
return x * cdf
def _UpperCAmelCase ( a : List[str] ) -> Union[str, Any]:
"""simple docstring"""
lowercase_ : int = tf.convert_to_tensor(__UpperCamelCase )
lowercase_ : List[Any] = tf.cast(math.pi , x.dtype )
lowercase_ : int = tf.cast(0.04_47_15 , x.dtype )
lowercase_ : Optional[int] = 0.5 * (1.0 + tf.tanh(tf.sqrt(2.0 / pi ) * (x + coeff * tf.pow(__UpperCamelCase , 3 )) ))
return x * cdf
def _UpperCAmelCase ( a : str ) -> str:
"""simple docstring"""
lowercase_ : Optional[Any] = tf.convert_to_tensor(__UpperCamelCase )
return x * tf.tanh(tf.math.softplus(__UpperCamelCase ) )
def _UpperCAmelCase ( a : Any ) -> Union[str, Any]:
"""simple docstring"""
lowercase_ : str = tf.convert_to_tensor(__UpperCamelCase )
lowercase_ : int = tf.cast(0.04_47_15 , x.dtype )
lowercase_ : Optional[int] = tf.cast(0.79_78_84_56_08 , x.dtype )
return 0.5 * x * (1.0 + tf.tanh(x * coeffa * (1.0 + coeffa * x * x) ))
def _UpperCAmelCase ( a : List[str] ) -> Optional[Any]:
"""simple docstring"""
lowercase_ : Tuple = tf.convert_to_tensor(__UpperCamelCase )
lowercase_ : str = tf.cast(1.7_02 , x.dtype )
return x * tf.math.sigmoid(coeff * x )
def _UpperCAmelCase ( a : int ) -> Optional[int]:
"""simple docstring"""
return tf.clip_by_value(_gelu(__UpperCamelCase ) , -1_0 , 1_0 )
def _UpperCAmelCase ( a : Optional[int] , a : List[str]=-1 ) -> Any:
"""simple docstring"""
lowercase_ : List[Any] = tf.split(__UpperCamelCase , 2 , axis=__UpperCamelCase )
return a * tf.math.sigmoid(__UpperCamelCase )
if version.parse(tf.version.VERSION) >= version.parse("2.4"):
def _UpperCAmelCase ( a : List[Any] ) -> Any:
"""simple docstring"""
return tf.keras.activations.gelu(__UpperCamelCase , approximate=__UpperCamelCase )
A: int = tf.keras.activations.gelu
A: Optional[Any] = approximate_gelu_wrap
else:
A: List[Any] = _gelu
A: Any = _gelu_new
A: Dict = {
'''gelu''': gelu,
'''gelu_10''': gelu_aa,
'''gelu_fast''': gelu_fast,
'''gelu_new''': gelu_new,
'''glu''': glu,
'''mish''': mish,
'''quick_gelu''': quick_gelu,
'''relu''': tf.keras.activations.relu,
'''sigmoid''': tf.keras.activations.sigmoid,
'''silu''': tf.keras.activations.swish,
'''swish''': tf.keras.activations.swish,
'''tanh''': tf.keras.activations.tanh,
}
def _UpperCAmelCase ( a : Any ) -> List[Any]:
"""simple docstring"""
if activation_string in ACTaFN:
return ACTaFN[activation_string]
else:
raise KeyError(f"function {activation_string} not found in ACT2FN mapping {list(ACTaFN.keys() )}" )
| 720 |
'''simple docstring'''
import unittest
from diffusers.pipelines.pipeline_utils import is_safetensors_compatible
class __magic_name__ ( unittest.TestCase ):
"""simple docstring"""
def lowerCamelCase__ ( self ) -> Optional[Any]:
lowercase_ : Tuple = [
'safety_checker/pytorch_model.bin',
'safety_checker/model.safetensors',
'vae/diffusion_pytorch_model.bin',
'vae/diffusion_pytorch_model.safetensors',
'text_encoder/pytorch_model.bin',
'text_encoder/model.safetensors',
'unet/diffusion_pytorch_model.bin',
'unet/diffusion_pytorch_model.safetensors',
]
self.assertTrue(is_safetensors_compatible(_lowercase ) )
def lowerCamelCase__ ( self ) -> Dict:
lowercase_ : Tuple = [
'unet/diffusion_pytorch_model.bin',
'unet/diffusion_pytorch_model.safetensors',
]
self.assertTrue(is_safetensors_compatible(_lowercase ) )
def lowerCamelCase__ ( self ) -> str:
lowercase_ : int = [
'safety_checker/pytorch_model.bin',
'safety_checker/model.safetensors',
'vae/diffusion_pytorch_model.bin',
'vae/diffusion_pytorch_model.safetensors',
'text_encoder/pytorch_model.bin',
'text_encoder/model.safetensors',
'unet/diffusion_pytorch_model.bin',
# Removed: 'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(_lowercase ) )
def lowerCamelCase__ ( self ) -> Any:
lowercase_ : Dict = [
'text_encoder/pytorch_model.bin',
'text_encoder/model.safetensors',
]
self.assertTrue(is_safetensors_compatible(_lowercase ) )
def lowerCamelCase__ ( self ) -> Optional[Any]:
lowercase_ : List[Any] = [
'safety_checker/pytorch_model.bin',
'safety_checker/model.safetensors',
'vae/diffusion_pytorch_model.bin',
'vae/diffusion_pytorch_model.safetensors',
'text_encoder/pytorch_model.bin',
# Removed: 'text_encoder/model.safetensors',
'unet/diffusion_pytorch_model.bin',
'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(_lowercase ) )
def lowerCamelCase__ ( self ) -> Optional[int]:
lowercase_ : str = [
'safety_checker/pytorch_model.fp16.bin',
'safety_checker/model.fp16.safetensors',
'vae/diffusion_pytorch_model.fp16.bin',
'vae/diffusion_pytorch_model.fp16.safetensors',
'text_encoder/pytorch_model.fp16.bin',
'text_encoder/model.fp16.safetensors',
'unet/diffusion_pytorch_model.fp16.bin',
'unet/diffusion_pytorch_model.fp16.safetensors',
]
lowercase_ : Union[str, Any] = 'fp16'
self.assertTrue(is_safetensors_compatible(_lowercase , variant=_lowercase ) )
def lowerCamelCase__ ( self ) -> int:
lowercase_ : Optional[int] = [
'unet/diffusion_pytorch_model.fp16.bin',
'unet/diffusion_pytorch_model.fp16.safetensors',
]
lowercase_ : Dict = 'fp16'
self.assertTrue(is_safetensors_compatible(_lowercase , variant=_lowercase ) )
def lowerCamelCase__ ( self ) -> int:
# pass variant but use the non-variant filenames
lowercase_ : Optional[int] = [
'unet/diffusion_pytorch_model.bin',
'unet/diffusion_pytorch_model.safetensors',
]
lowercase_ : Dict = 'fp16'
self.assertTrue(is_safetensors_compatible(_lowercase , variant=_lowercase ) )
def lowerCamelCase__ ( self ) -> Union[str, Any]:
lowercase_ : int = [
'safety_checker/pytorch_model.fp16.bin',
'safety_checker/model.fp16.safetensors',
'vae/diffusion_pytorch_model.fp16.bin',
'vae/diffusion_pytorch_model.fp16.safetensors',
'text_encoder/pytorch_model.fp16.bin',
'text_encoder/model.fp16.safetensors',
'unet/diffusion_pytorch_model.fp16.bin',
# Removed: 'unet/diffusion_pytorch_model.fp16.safetensors',
]
lowercase_ : Dict = 'fp16'
self.assertFalse(is_safetensors_compatible(_lowercase , variant=_lowercase ) )
def lowerCamelCase__ ( self ) -> int:
lowercase_ : str = [
'text_encoder/pytorch_model.fp16.bin',
'text_encoder/model.fp16.safetensors',
]
lowercase_ : str = 'fp16'
self.assertTrue(is_safetensors_compatible(_lowercase , variant=_lowercase ) )
def lowerCamelCase__ ( self ) -> List[str]:
# pass variant but use the non-variant filenames
lowercase_ : List[Any] = [
'text_encoder/pytorch_model.bin',
'text_encoder/model.safetensors',
]
lowercase_ : Dict = 'fp16'
self.assertTrue(is_safetensors_compatible(_lowercase , variant=_lowercase ) )
def lowerCamelCase__ ( self ) -> List[str]:
lowercase_ : Union[str, Any] = [
'safety_checker/pytorch_model.fp16.bin',
'safety_checker/model.fp16.safetensors',
'vae/diffusion_pytorch_model.fp16.bin',
'vae/diffusion_pytorch_model.fp16.safetensors',
'text_encoder/pytorch_model.fp16.bin',
# 'text_encoder/model.fp16.safetensors',
'unet/diffusion_pytorch_model.fp16.bin',
'unet/diffusion_pytorch_model.fp16.safetensors',
]
lowercase_ : Dict = 'fp16'
self.assertFalse(is_safetensors_compatible(_lowercase , variant=_lowercase ) )
| 7 | 0 |
'''simple docstring'''
import argparse
from typing import List
import evaluate
import numpy as np
import torch
from datasets import DatasetDict, load_dataset
# New Code #
# We'll be using StratifiedKFold for this example
from sklearn.model_selection import StratifiedKFold
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to perform Cross Validation,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
A: str = 1_6
A: Optional[int] = 3_2
def _UpperCAmelCase ( a : Accelerator , a : DatasetDict , a : List[int] , a : List[int] , a : int = 1_6 ) -> int:
"""simple docstring"""
lowercase_ : Optional[Any] = AutoTokenizer.from_pretrained('bert-base-cased' )
lowercase_ : str = DatasetDict(
{
'train': dataset['train'].select(snake_case__ ),
'validation': dataset['train'].select(snake_case__ ),
'test': dataset['validation'],
} )
def tokenize_function(a : Optional[Any] ):
# max_length=None => use the model max length (it's actually the default)
lowercase_ : Union[str, Any] = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=snake_case__ , max_length=snake_case__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowercase_ : List[Any] = datasets.map(
snake_case__ , batched=snake_case__ , remove_columns=['idx', 'sentence1', 'sentence2'] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowercase_ : Optional[Any] = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(a : Optional[Any] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowercase_ : Optional[int] = 1_2_8 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowercase_ : Union[str, Any] = 1_6
elif accelerator.mixed_precision != "no":
lowercase_ : Any = 8
else:
lowercase_ : Dict = None
return tokenizer.pad(
snake_case__ , padding='longest' , max_length=snake_case__ , pad_to_multiple_of=snake_case__ , return_tensors='pt' , )
# Instantiate dataloaders.
lowercase_ : Dict = DataLoader(
tokenized_datasets['train'] , shuffle=snake_case__ , collate_fn=snake_case__ , batch_size=snake_case__ )
lowercase_ : List[str] = DataLoader(
tokenized_datasets['validation'] , shuffle=snake_case__ , collate_fn=snake_case__ , batch_size=snake_case__ )
lowercase_ : List[Any] = DataLoader(
tokenized_datasets['test'] , shuffle=snake_case__ , collate_fn=snake_case__ , batch_size=snake_case__ )
return train_dataloader, eval_dataloader, test_dataloader
def _UpperCAmelCase ( a : str , a : List[str] ) -> Union[str, Any]:
"""simple docstring"""
# New Code #
lowercase_ : Tuple = []
# Download the dataset
lowercase_ : Dict = load_dataset('glue' , 'mrpc' )
# Create our splits
lowercase_ : Optional[Any] = StratifiedKFold(n_splits=int(args.num_folds ) )
# Initialize accelerator
lowercase_ : str = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowercase_ : Dict = config['lr']
lowercase_ : str = int(config['num_epochs'] )
lowercase_ : Union[str, Any] = int(config['seed'] )
lowercase_ : Optional[int] = int(config['batch_size'] )
lowercase_ : int = evaluate.load('glue' , 'mrpc' )
# If the batch size is too big we use gradient accumulation
lowercase_ : int = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
lowercase_ : Optional[int] = batch_size // MAX_GPU_BATCH_SIZE
lowercase_ : Dict = MAX_GPU_BATCH_SIZE
set_seed(snake_case__ )
# New Code #
# Create our folds:
lowercase_ : List[Any] = kfold.split(np.zeros(datasets['train'].num_rows ) , datasets['train']['label'] )
lowercase_ : Optional[int] = []
# Iterate over them
for i, (train_idxs, valid_idxs) in enumerate(snake_case__ ):
lowercase_ , lowercase_ , lowercase_ : Tuple = get_fold_dataloaders(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowercase_ : List[str] = AutoModelForSequenceClassification.from_pretrained('bert-base-cased' , return_dict=snake_case__ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowercase_ : Optional[int] = model.to(accelerator.device )
# Instantiate optimizer
lowercase_ : str = AdamW(params=model.parameters() , lr=snake_case__ )
# Instantiate scheduler
lowercase_ : str = get_linear_schedule_with_warmup(
optimizer=snake_case__ , num_warmup_steps=1_0_0 , num_training_steps=(len(snake_case__ ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ : List[str] = accelerator.prepare(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
# Now we train the model
for epoch in range(snake_case__ ):
model.train()
for step, batch in enumerate(snake_case__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
lowercase_ : str = model(**snake_case__ )
lowercase_ : Union[str, Any] = outputs.loss
lowercase_ : List[Any] = loss / gradient_accumulation_steps
accelerator.backward(snake_case__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(snake_case__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowercase_ : Optional[int] = model(**snake_case__ )
lowercase_ : int = outputs.logits.argmax(dim=-1 )
lowercase_ , lowercase_ : List[Any] = accelerator.gather_for_metrics((predictions, batch['labels']) )
metric.add_batch(
predictions=snake_case__ , references=snake_case__ , )
lowercase_ : Tuple = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"epoch {epoch}:" , snake_case__ )
# New Code #
# We also run predictions on the test set at the very end
lowercase_ : str = []
for step, batch in enumerate(snake_case__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowercase_ : Union[str, Any] = model(**snake_case__ )
lowercase_ : str = outputs.logits
lowercase_ , lowercase_ : int = accelerator.gather_for_metrics((predictions, batch['labels']) )
fold_predictions.append(predictions.cpu() )
if i == 0:
# We need all of the test predictions
test_references.append(references.cpu() )
# Use accelerator.print to print only on the main process.
test_predictions.append(torch.cat(snake_case__ , dim=0 ) )
# We now need to release all our memory and get rid of the current model, optimizer, etc
accelerator.free_memory()
# New Code #
# Finally we check the accuracy of our folded results:
lowercase_ : Union[str, Any] = torch.cat(snake_case__ , dim=0 )
lowercase_ : Optional[int] = torch.stack(snake_case__ , dim=0 ).sum(dim=0 ).div(int(args.num_folds ) ).argmax(dim=-1 )
lowercase_ : List[Any] = metric.compute(predictions=snake_case__ , references=snake_case__ )
accelerator.print('Average test metrics from all folds:' , snake_case__ )
def _UpperCAmelCase ( ) -> Tuple:
"""simple docstring"""
lowercase_ : Optional[int] = argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument(
'--mixed_precision' , type=snake_case__ , default=snake_case__ , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' )
# New Code #
parser.add_argument('--num_folds' , type=snake_case__ , default=3 , help='The number of splits to perform across the dataset' )
lowercase_ : Optional[Any] = parser.parse_args()
lowercase_ : str = {'lr': 2e-5, 'num_epochs': 3, 'seed': 4_2, 'batch_size': 1_6}
training_function(snake_case__ , snake_case__ )
if __name__ == "__main__":
main()
| 721 |
'''simple docstring'''
import argparse
A: List[Any] = "docs/source/_static/js/custom.js"
def _UpperCAmelCase ( a : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
with open(a , encoding='utf-8' , newline='\n' ) as f:
lowercase_ : List[Any] = f.readlines()
lowercase_ : Dict = 0
# First let's put the right version
while not lines[index].startswith('const stableVersion =' ):
index += 1
lowercase_ : Dict = f"const stableVersion = \"v{version}\"\n"
# Then update the dictionary
while not lines[index].startswith('const versionMapping = {' ):
index += 1
# We go until the end
while not lines[index].startswith('}' ):
index += 1
# We add the new version at the end
lines[index - 1] += f" \"v{version}\": \"v{version}\",\n"
with open(a , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.writelines(a )
if __name__ == "__main__":
A: str = argparse.ArgumentParser()
parser.add_argument("--version", help="Release version.")
A: List[str] = parser.parse_args()
update_custom_js(args.version)
| 7 | 0 |
'''simple docstring'''
from __future__ import annotations
class __magic_name__ :
"""simple docstring"""
def __init__( self , _lowercase , _lowercase ) -> List[Any]:
lowercase_ , lowercase_ : Tuple = text, pattern
lowercase_ , lowercase_ : Dict = len(_lowercase ), len(_lowercase )
def lowerCamelCase__ ( self , _lowercase ) -> Dict:
for i in range(self.patLen - 1 , -1 , -1 ):
if char == self.pattern[i]:
return i
return -1
def lowerCamelCase__ ( self , _lowercase ) -> str:
for i in range(self.patLen - 1 , -1 , -1 ):
if self.pattern[i] != self.text[current_pos + i]:
return current_pos + i
return -1
def lowerCamelCase__ ( self ) -> Tuple:
# searches pattern in text and returns index positions
lowercase_ : Optional[Any] = []
for i in range(self.textLen - self.patLen + 1 ):
lowercase_ : str = self.mismatch_in_text(_lowercase )
if mismatch_index == -1:
positions.append(_lowercase )
else:
lowercase_ : List[Any] = self.match_in_pattern(self.text[mismatch_index] )
lowercase_ : str = (
mismatch_index - match_index
) # shifting index lgtm [py/multiple-definition]
return positions
A: Union[str, Any] = "ABAABA"
A: Tuple = "AB"
A: int = BoyerMooreSearch(text, pattern)
A: List[str] = bms.bad_character_heuristic()
if len(positions) == 0:
print("No match found")
else:
print("Pattern found in following positions: ")
print(positions)
| 700 |
'''simple docstring'''
def _UpperCAmelCase ( a : list[list[float]] ) -> list[list[float]]:
"""simple docstring"""
lowercase_ : list[list[float]] = []
for data in source_data:
for i, el in enumerate(a ):
if len(a ) < i + 1:
data_lists.append([] )
data_lists[i].append(float(a ) )
return data_lists
def _UpperCAmelCase ( a : list[list[float]] , a : list[int] ) -> list[list[float]]:
"""simple docstring"""
lowercase_ : list[list[float]] = []
for dlist, weight in zip(a , a ):
lowercase_ : Tuple = min(a )
lowercase_ : Any = max(a )
lowercase_ : list[float] = []
# for weight 0 score is 1 - actual score
if weight == 0:
for item in dlist:
try:
score.append(1 - ((item - mind) / (maxd - mind)) )
except ZeroDivisionError:
score.append(1 )
elif weight == 1:
for item in dlist:
try:
score.append((item - mind) / (maxd - mind) )
except ZeroDivisionError:
score.append(0 )
# weight not 0 or 1
else:
lowercase_ : str = f"Invalid weight of {weight:f} provided"
raise ValueError(a )
score_lists.append(a )
return score_lists
def _UpperCAmelCase ( a : list[list[float]] ) -> list[float]:
"""simple docstring"""
lowercase_ : list[float] = [0 for i in range(len(score_lists[0] ) )]
for slist in score_lists:
for j, ele in enumerate(a ):
lowercase_ : List[Any] = final_scores[j] + ele
return final_scores
def _UpperCAmelCase ( a : list[list[float]] , a : list[int] ) -> list[list[float]]:
"""simple docstring"""
lowercase_ : int = get_data(a )
lowercase_ : Optional[int] = calculate_each_score(a , a )
lowercase_ : Dict = generate_final_scores(a )
# append scores to source data
for i, ele in enumerate(a ):
source_data[i].append(a )
return source_data
| 7 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A: Union[str, Any] = logging.get_logger(__name__)
A: Dict = {
"roberta-base": "https://huggingface.co/roberta-base/resolve/main/config.json",
"roberta-large": "https://huggingface.co/roberta-large/resolve/main/config.json",
"roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/config.json",
"distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/config.json",
"roberta-base-openai-detector": "https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json",
"roberta-large-openai-detector": "https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json",
}
class __magic_name__ ( UpperCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = 'roberta'
def __init__( self , _lowercase=5_0265 , _lowercase=768 , _lowercase=12 , _lowercase=12 , _lowercase=3072 , _lowercase="gelu" , _lowercase=0.1 , _lowercase=0.1 , _lowercase=512 , _lowercase=2 , _lowercase=0.02 , _lowercase=1E-1_2 , _lowercase=1 , _lowercase=0 , _lowercase=2 , _lowercase="absolute" , _lowercase=True , _lowercase=None , **_lowercase , ) -> Tuple:
super().__init__(pad_token_id=_lowercase , bos_token_id=_lowercase , eos_token_id=_lowercase , **_lowercase )
lowercase_ : Any = vocab_size
lowercase_ : List[str] = hidden_size
lowercase_ : Union[str, Any] = num_hidden_layers
lowercase_ : Tuple = num_attention_heads
lowercase_ : List[Any] = hidden_act
lowercase_ : Any = intermediate_size
lowercase_ : Any = hidden_dropout_prob
lowercase_ : List[Any] = attention_probs_dropout_prob
lowercase_ : Tuple = max_position_embeddings
lowercase_ : Optional[Any] = type_vocab_size
lowercase_ : Any = initializer_range
lowercase_ : Union[str, Any] = layer_norm_eps
lowercase_ : Optional[int] = position_embedding_type
lowercase_ : int = use_cache
lowercase_ : List[str] = classifier_dropout
class __magic_name__ ( UpperCAmelCase_ ):
"""simple docstring"""
@property
def lowerCamelCase__ ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
lowercase_ : Optional[int] = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
lowercase_ : Any = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 701 |
'''simple docstring'''
def _UpperCAmelCase ( a : int , a : int ) -> int:
"""simple docstring"""
while second != 0:
lowercase_ : Any = first & second
first ^= second
lowercase_ : List[str] = c << 1
return first
if __name__ == "__main__":
import doctest
doctest.testmod()
A: Union[str, Any] = int(input("Enter the first number: ").strip())
A: Union[str, Any] = int(input("Enter the second number: ").strip())
print(f"""{add(first, second) = }""")
| 7 | 0 |
'''simple docstring'''
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import TimesformerConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
TimesformerForVideoClassification,
TimesformerModel,
)
from transformers.models.timesformer.modeling_timesformer import TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class __magic_name__ :
"""simple docstring"""
def __init__( self , _lowercase , _lowercase=13 , _lowercase=10 , _lowercase=3 , _lowercase=2 , _lowercase=2 , _lowercase=True , _lowercase=True , _lowercase=32 , _lowercase=5 , _lowercase=4 , _lowercase=37 , _lowercase="gelu" , _lowercase=0.1 , _lowercase=0.1 , _lowercase=10 , _lowercase=0.02 , _lowercase="divided_space_time" , _lowercase=None , ) -> Tuple:
lowercase_ : List[str] = parent
lowercase_ : str = batch_size
lowercase_ : Optional[Any] = image_size
lowercase_ : Dict = num_channels
lowercase_ : List[str] = patch_size
lowercase_ : int = num_frames
lowercase_ : List[str] = is_training
lowercase_ : Union[str, Any] = use_labels
lowercase_ : int = hidden_size
lowercase_ : List[Any] = num_hidden_layers
lowercase_ : str = num_attention_heads
lowercase_ : int = intermediate_size
lowercase_ : Tuple = hidden_act
lowercase_ : List[str] = hidden_dropout_prob
lowercase_ : int = attention_probs_dropout_prob
lowercase_ : List[str] = attention_type
lowercase_ : int = initializer_range
lowercase_ : List[str] = scope
lowercase_ : Any = num_labels
# in TimeSformer, the number of spatial tokens equals num_frames * num_patches per frame + 1 CLS token
lowercase_ : List[Any] = (image_size // patch_size) ** 2
lowercase_ : str = (num_frames) * self.num_patches_per_frame + 1
def lowerCamelCase__ ( self ) -> int:
lowercase_ : Optional[int] = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
lowercase_ : Any = None
if self.use_labels:
lowercase_ : str = ids_tensor([self.batch_size] , self.num_labels )
lowercase_ : Optional[Any] = self.get_config()
return config, pixel_values, labels
def lowerCamelCase__ ( self ) -> List[str]:
lowercase_ : Tuple = TimesformerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , attention_type=self.attention_type , )
lowercase_ : List[Any] = self.num_labels
return config
def lowerCamelCase__ ( self , _lowercase , _lowercase , _lowercase ) -> List[str]:
lowercase_ : Any = TimesformerModel(config=__A )
model.to(__A )
model.eval()
lowercase_ : int = model(__A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase__ ( self , _lowercase , _lowercase , _lowercase ) -> str:
lowercase_ : Any = TimesformerForVideoClassification(__A )
model.to(__A )
model.eval()
lowercase_ : Dict = model(__A )
# verify the logits shape
lowercase_ : Union[str, Any] = torch.Size((self.batch_size, self.num_labels) )
self.parent.assertEqual(result.logits.shape , __A )
def lowerCamelCase__ ( self ) -> List[str]:
lowercase_ : Dict = self.prepare_config_and_inputs()
lowercase_ : List[str] = config_and_inputs
lowercase_ : Tuple = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class __magic_name__ ( UpperCamelCase_, UpperCamelCase_, unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = (TimesformerModel, TimesformerForVideoClassification) if is_torch_available() else ()
SCREAMING_SNAKE_CASE_ : Dict = (
{'feature-extraction': TimesformerModel, 'video-classification': TimesformerForVideoClassification}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE_ : int = False
SCREAMING_SNAKE_CASE_ : Optional[int] = False
SCREAMING_SNAKE_CASE_ : int = False
SCREAMING_SNAKE_CASE_ : Union[str, Any] = False
def lowerCamelCase__ ( self ) -> Tuple:
lowercase_ : Optional[int] = TimesformerModelTester(self )
lowercase_ : Tuple = ConfigTester(
self , config_class=__A , has_text_modality=__A , hidden_size=37 )
def lowerCamelCase__ ( self , _lowercase , _lowercase , _lowercase=False ) -> Dict:
lowercase_ : Optional[Any] = copy.deepcopy(__A )
if return_labels:
if model_class in get_values(__A ):
lowercase_ : List[str] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__A )
return inputs_dict
def lowerCamelCase__ ( self ) -> List[Any]:
self.config_tester.run_common_tests()
@unittest.skip(reason='TimeSformer does not use inputs_embeds' )
def lowerCamelCase__ ( self ) -> List[Any]:
pass
def lowerCamelCase__ ( self ) -> Dict:
lowercase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ : Any = model_class(__A )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowercase_ : Tuple = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__A , nn.Linear ) )
def lowerCamelCase__ ( self ) -> Any:
lowercase_ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ : Any = model_class(__A )
lowercase_ : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase_ : Union[str, Any] = [*signature.parameters.keys()]
lowercase_ : int = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __A )
def lowerCamelCase__ ( self ) -> List[Any]:
lowercase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A )
def lowerCamelCase__ ( self ) -> List[Any]:
lowercase_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_video_classification(*__A )
@slow
def lowerCamelCase__ ( self ) -> Union[str, Any]:
for model_name in TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ : List[str] = TimesformerModel.from_pretrained(__A )
self.assertIsNotNone(__A )
def lowerCamelCase__ ( self ) -> List[Any]:
if not self.has_attentions:
pass
else:
lowercase_ : int = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ : Union[str, Any] = True
for model_class in self.all_model_classes:
lowercase_ : List[Any] = self.model_tester.seq_length
lowercase_ : Optional[int] = self.model_tester.num_frames
lowercase_ : int = True
lowercase_ : Optional[Any] = False
lowercase_ : Optional[int] = True
lowercase_ : Optional[int] = model_class(__A )
model.to(__A )
model.eval()
with torch.no_grad():
lowercase_ : str = model(**self._prepare_for_class(__A , __A ) )
lowercase_ : Dict = outputs.attentions
self.assertEqual(len(__A ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowercase_ : List[Any] = True
lowercase_ : List[str] = model_class(__A )
model.to(__A )
model.eval()
with torch.no_grad():
lowercase_ : int = model(**self._prepare_for_class(__A , __A ) )
lowercase_ : Dict = outputs.attentions
self.assertEqual(len(__A ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
lowercase_ : Dict = len(__A )
# Check attention is always last and order is fine
lowercase_ : Union[str, Any] = True
lowercase_ : List[str] = True
lowercase_ : Optional[Any] = model_class(__A )
model.to(__A )
model.eval()
with torch.no_grad():
lowercase_ : str = model(**self._prepare_for_class(__A , __A ) )
self.assertEqual(out_len + 1 , len(__A ) )
lowercase_ : str = outputs.attentions
self.assertEqual(len(__A ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
def lowerCamelCase__ ( self ) -> Optional[Any]:
def check_hidden_states_output(_lowercase , _lowercase , _lowercase ):
lowercase_ : Optional[int] = model_class(__A )
model.to(__A )
model.eval()
with torch.no_grad():
lowercase_ : str = model(**self._prepare_for_class(__A , __A ) )
lowercase_ : Dict = outputs.hidden_states
lowercase_ : Union[str, Any] = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(__A ) , __A )
lowercase_ : Tuple = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
lowercase_ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ : int = True
check_hidden_states_output(__A , __A , __A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase_ : str = True
check_hidden_states_output(__A , __A , __A )
def _UpperCAmelCase ( ) -> Optional[int]:
"""simple docstring"""
lowercase_ : List[Any] = hf_hub_download(
repo_id='hf-internal-testing/spaghetti-video' , filename='eating_spaghetti.npy' , repo_type='dataset' )
lowercase_ : List[Any] = np.load(snake_case_ )
return list(snake_case_ )
@require_torch
@require_vision
class __magic_name__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def lowerCamelCase__ ( self ) -> List[Any]:
# logits were tested with a different mean and std, so we use the same here
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def lowerCamelCase__ ( self ) -> Optional[Any]:
lowercase_ : Tuple = TimesformerForVideoClassification.from_pretrained('facebook/timesformer-base-finetuned-k400' ).to(
__A )
lowercase_ : Tuple = self.default_image_processor
lowercase_ : str = prepare_video()
lowercase_ : str = image_processor(video[:8] , return_tensors='pt' ).to(__A )
# forward pass
with torch.no_grad():
lowercase_ : Optional[Any] = model(**__A )
# verify the logits
lowercase_ : Optional[Any] = torch.Size((1, 400) )
self.assertEqual(outputs.logits.shape , __A )
lowercase_ : Tuple = torch.tensor([-0.30_16, -0.77_13, -0.42_05] ).to(__A )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __A , atol=1E-4 ) )
| 702 |
'''simple docstring'''
class __magic_name__ :
"""simple docstring"""
def __init__( self , _lowercase ) -> Union[str, Any]:
lowercase_ : Dict = n
lowercase_ : Dict = [None] * self.n
lowercase_ : Tuple = 0 # index of the first element
lowercase_ : List[Any] = 0
lowercase_ : List[Any] = 0
def __len__( self ) -> int:
return self.size
def lowerCamelCase__ ( self ) -> bool:
return self.size == 0
def lowerCamelCase__ ( self ) -> List[Any]:
return False if self.is_empty() else self.array[self.front]
def lowerCamelCase__ ( self , _lowercase ) -> Any:
if self.size >= self.n:
raise Exception('QUEUE IS FULL' )
lowercase_ : Tuple = data
lowercase_ : List[Any] = (self.rear + 1) % self.n
self.size += 1
return self
def lowerCamelCase__ ( self ) -> Any:
if self.size == 0:
raise Exception('UNDERFLOW' )
lowercase_ : Dict = self.array[self.front]
lowercase_ : Tuple = None
lowercase_ : int = (self.front + 1) % self.n
self.size -= 1
return temp
| 7 | 0 |
'''simple docstring'''
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class __magic_name__ :
"""simple docstring"""
def __init__( self , _lowercase , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase="resnet50" , _lowercase=3 , _lowercase=32 , _lowercase=3 , _lowercase=True , _lowercase=True , ) -> List[str]:
lowercase_ : int = parent
lowercase_ : List[Any] = out_indices if out_indices is not None else [4]
lowercase_ : List[Any] = stage_names
lowercase_ : Any = out_features
lowercase_ : Optional[int] = backbone
lowercase_ : Optional[int] = batch_size
lowercase_ : Any = image_size
lowercase_ : Any = num_channels
lowercase_ : Optional[int] = use_pretrained_backbone
lowercase_ : Optional[int] = is_training
def lowerCamelCase__ ( self ) -> Union[str, Any]:
lowercase_ : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase_ : Union[str, Any] = self.get_config()
return config, pixel_values
def lowerCamelCase__ ( self ) -> str:
return TimmBackboneConfig(
image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , )
def lowerCamelCase__ ( self , _lowercase , _lowercase ) -> Any:
lowercase_ : int = TimmBackbone(config=_lowercase )
model.to(_lowercase )
model.eval()
with torch.no_grad():
lowercase_ : Dict = model(_lowercase )
self.parent.assertEqual(
result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 14, 14) , )
def lowerCamelCase__ ( self ) -> int:
lowercase_ : Tuple = self.prepare_config_and_inputs()
lowercase_ : int = config_and_inputs
lowercase_ : Optional[Any] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class __magic_name__ ( lowercase__, lowercase__, lowercase__, unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = (TimmBackbone,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {'feature-extraction': TimmBackbone} if is_torch_available() else {}
SCREAMING_SNAKE_CASE_ : List[str] = False
SCREAMING_SNAKE_CASE_ : List[str] = False
SCREAMING_SNAKE_CASE_ : Tuple = False
SCREAMING_SNAKE_CASE_ : int = False
def lowerCamelCase__ ( self ) -> Any:
lowercase_ : int = TimmBackboneModelTester(self )
lowercase_ : Optional[int] = ConfigTester(self , config_class=_lowercase , has_text_modality=_lowercase )
def lowerCamelCase__ ( self ) -> Optional[int]:
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCamelCase__ ( self ) -> List[Any]:
lowercase_ : List[Any] = "resnet18"
lowercase_ : Optional[int] = "microsoft/resnet-18"
lowercase_ : Tuple = AutoBackbone.from_pretrained(_lowercase , use_timm_backbone=_lowercase )
lowercase_ : Dict = AutoBackbone.from_pretrained(_lowercase )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(len(timm_model.stage_names ) , len(transformers_model.stage_names ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices , (-1,) )
self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names ) - 1] )
lowercase_ : str = AutoBackbone.from_pretrained(_lowercase , use_timm_backbone=_lowercase , out_indices=[1, 2, 3] )
lowercase_ : Any = AutoBackbone.from_pretrained(_lowercase , out_indices=[1, 2, 3] )
self.assertEqual(timm_model.out_indices , transformers_model.out_indices )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
@unittest.skip('TimmBackbone doesn\'t support feed forward chunking' )
def lowerCamelCase__ ( self ) -> Optional[int]:
pass
@unittest.skip('TimmBackbone doesn\'t have num_hidden_layers attribute' )
def lowerCamelCase__ ( self ) -> List[Any]:
pass
@unittest.skip('TimmBackbone initialization is managed on the timm side' )
def lowerCamelCase__ ( self ) -> Optional[Any]:
pass
@unittest.skip('TimmBackbone models doesn\'t have inputs_embeds' )
def lowerCamelCase__ ( self ) -> Optional[int]:
pass
@unittest.skip('TimmBackbone models doesn\'t have inputs_embeds' )
def lowerCamelCase__ ( self ) -> str:
pass
@unittest.skip('TimmBackbone model cannot be created without specifying a backbone checkpoint' )
def lowerCamelCase__ ( self ) -> Any:
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone' )
def lowerCamelCase__ ( self ) -> int:
pass
@unittest.skip('model weights aren\'t tied in TimmBackbone.' )
def lowerCamelCase__ ( self ) -> List[str]:
pass
@unittest.skip('model weights aren\'t tied in TimmBackbone.' )
def lowerCamelCase__ ( self ) -> Union[str, Any]:
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone' )
def lowerCamelCase__ ( self ) -> List[Any]:
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone' )
def lowerCamelCase__ ( self ) -> Optional[int]:
pass
@unittest.skip('TimmBackbone doesn\'t have hidden size info in its configuration.' )
def lowerCamelCase__ ( self ) -> Tuple:
pass
@unittest.skip('TimmBackbone doesn\'t support output_attentions.' )
def lowerCamelCase__ ( self ) -> Tuple:
pass
@unittest.skip('Safetensors is not supported by timm.' )
def lowerCamelCase__ ( self ) -> List[Any]:
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def lowerCamelCase__ ( self ) -> Tuple:
pass
def lowerCamelCase__ ( self ) -> Any:
lowercase_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ : str = model_class(_lowercase )
lowercase_ : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase_ : Optional[int] = [*signature.parameters.keys()]
lowercase_ : int = ["pixel_values"]
self.assertListEqual(arg_names[:1] , _lowercase )
def lowerCamelCase__ ( self ) -> Union[str, Any]:
lowercase_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ : Any = True
lowercase_ : Optional[int] = self.has_attentions
# no need to test all models as different heads yield the same functionality
lowercase_ : str = self.all_model_classes[0]
lowercase_ : str = model_class(_lowercase )
model.to(_lowercase )
lowercase_ : Any = self._prepare_for_class(_lowercase , _lowercase )
lowercase_ : int = model(**_lowercase )
lowercase_ : int = outputs[0][-1]
# Encoder-/Decoder-only models
lowercase_ : Any = outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
lowercase_ : List[Any] = outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=_lowercase )
self.assertIsNotNone(hidden_states.grad )
if self.has_attentions:
self.assertIsNotNone(attentions.grad )
def lowerCamelCase__ ( self ) -> List[str]:
lowercase_ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ : Dict = model_class(_lowercase )
model.to(_lowercase )
model.eval()
lowercase_ : int = model(**_lowercase )
self.assertEqual(len(result.feature_maps ) , len(config.out_indices ) )
self.assertEqual(len(model.channels ) , len(config.out_indices ) )
# Check output of last stage is taken if out_features=None, out_indices=None
lowercase_ : Optional[int] = copy.deepcopy(_lowercase )
lowercase_ : str = None
lowercase_ : Tuple = model_class(_lowercase )
model.to(_lowercase )
model.eval()
lowercase_ : Union[str, Any] = model(**_lowercase )
self.assertEqual(len(result.feature_maps ) , 1 )
self.assertEqual(len(model.channels ) , 1 )
# Check backbone can be initialized with fresh weights
lowercase_ : Optional[Any] = copy.deepcopy(_lowercase )
lowercase_ : Optional[Any] = False
lowercase_ : Any = model_class(_lowercase )
model.to(_lowercase )
model.eval()
lowercase_ : List[Any] = model(**_lowercase )
| 703 |
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
A: List[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
A: Union[str, Any] = "\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline\n >>> from diffusers.utils import load_image\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... \"kandinsky-community/kandinsky-2-2-prior\", torch_dtype=torch.float16\n ... )\n >>> pipe_prior.to(\"cuda\")\n\n >>> prompt = \"A red cartoon frog, 4k\"\n >>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False)\n\n >>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained(\n ... \"kandinsky-community/kandinsky-2-2-decoder\", torch_dtype=torch.float16\n ... )\n >>> pipe.to(\"cuda\")\n\n >>> init_image = load_image(\n ... \"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main\"\n ... \"/kandinsky/frog.png\"\n ... )\n\n >>> image = pipe(\n ... image=init_image,\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=100,\n ... strength=0.2,\n ... ).images\n\n >>> image[0].save(\"red_frog.png\")\n ```\n"
def _UpperCAmelCase ( a : Tuple , a : Union[str, Any] , a : List[Any]=8 ) -> Dict:
"""simple docstring"""
lowercase_ : List[Any] = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
lowercase_ : List[str] = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
def _UpperCAmelCase ( a : Any , a : Dict=5_1_2 , a : Optional[Any]=5_1_2 ) -> Tuple:
"""simple docstring"""
lowercase_ : int = pil_image.resize((w, h) , resample=Image.BICUBIC , reducing_gap=1 )
lowercase_ : int = np.array(pil_image.convert('RGB' ) )
lowercase_ : Optional[int] = arr.astype(np.floataa ) / 1_27.5 - 1
lowercase_ : Any = np.transpose(a , [2, 0, 1] )
lowercase_ : Any = torch.from_numpy(a ).unsqueeze(0 )
return image
class __magic_name__ ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self , _lowercase , _lowercase , _lowercase , ) -> List[Any]:
super().__init__()
self.register_modules(
unet=_lowercase , scheduler=_lowercase , movq=_lowercase , )
lowercase_ : Dict = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def lowerCamelCase__ ( self , _lowercase , _lowercase , _lowercase ) -> int:
# get the original timestep using init_timestep
lowercase_ : List[Any] = min(int(num_inference_steps * strength ) , _lowercase )
lowercase_ : Tuple = max(num_inference_steps - init_timestep , 0 )
lowercase_ : Optional[Any] = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def lowerCamelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase=None ) -> Any:
if not isinstance(_lowercase , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(_lowercase )}" )
lowercase_ : Dict = image.to(device=_lowercase , dtype=_lowercase )
lowercase_ : Dict = batch_size * num_images_per_prompt
if image.shape[1] == 4:
lowercase_ : str = image
else:
if isinstance(_lowercase , _lowercase ) and len(_lowercase ) != batch_size:
raise ValueError(
f"You have passed a list of generators of length {len(_lowercase )}, but requested an effective batch"
f" size of {batch_size}. Make sure the batch size matches the length of the generators." )
elif isinstance(_lowercase , _lowercase ):
lowercase_ : List[Any] = [
self.movq.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(_lowercase )
]
lowercase_ : Union[str, Any] = torch.cat(_lowercase , dim=0 )
else:
lowercase_ : Union[str, Any] = self.movq.encode(_lowercase ).latent_dist.sample(_lowercase )
lowercase_ : str = self.movq.config.scaling_factor * init_latents
lowercase_ : int = torch.cat([init_latents] , dim=0 )
lowercase_ : Dict = init_latents.shape
lowercase_ : Dict = randn_tensor(_lowercase , generator=_lowercase , device=_lowercase , dtype=_lowercase )
# get latents
lowercase_ : List[str] = self.scheduler.add_noise(_lowercase , _lowercase , _lowercase )
lowercase_ : Optional[Any] = init_latents
return latents
def lowerCamelCase__ ( self , _lowercase=0 ) -> int:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
lowercase_ : List[Any] = torch.device(f"cuda:{gpu_id}" )
lowercase_ : Optional[Any] = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(_lowercase , _lowercase )
def lowerCamelCase__ ( self , _lowercase=0 ) -> int:
if is_accelerate_available() and is_accelerate_version('>=' , '0.17.0.dev0' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.' )
lowercase_ : List[Any] = torch.device(f"cuda:{gpu_id}" )
if self.device.type != "cpu":
self.to('cpu' , silence_dtype_warnings=_lowercase )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
lowercase_ : Tuple = None
for cpu_offloaded_model in [self.unet, self.movq]:
lowercase_ , lowercase_ : Dict = cpu_offload_with_hook(_lowercase , _lowercase , prev_module_hook=_lowercase )
# We'll offload the last model manually.
lowercase_ : List[str] = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def lowerCamelCase__ ( self ) -> List[str]:
if not hasattr(self.unet , '_hf_hook' ):
return self.device
for module in self.unet.modules():
if (
hasattr(_lowercase , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(_lowercase )
def __call__( self , _lowercase , _lowercase , _lowercase , _lowercase = 512 , _lowercase = 512 , _lowercase = 100 , _lowercase = 4.0 , _lowercase = 0.3 , _lowercase = 1 , _lowercase = None , _lowercase = "pil" , _lowercase = True , ) -> str:
lowercase_ : List[Any] = self._execution_device
lowercase_ : List[Any] = guidance_scale > 1.0
if isinstance(_lowercase , _lowercase ):
lowercase_ : Union[str, Any] = torch.cat(_lowercase , dim=0 )
lowercase_ : Optional[Any] = image_embeds.shape[0]
if isinstance(_lowercase , _lowercase ):
lowercase_ : List[str] = torch.cat(_lowercase , dim=0 )
if do_classifier_free_guidance:
lowercase_ : List[str] = image_embeds.repeat_interleave(_lowercase , dim=0 )
lowercase_ : Dict = negative_image_embeds.repeat_interleave(_lowercase , dim=0 )
lowercase_ : Dict = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=_lowercase )
if not isinstance(_lowercase , _lowercase ):
lowercase_ : Union[str, Any] = [image]
if not all(isinstance(_lowercase , (PIL.Image.Image, torch.Tensor) ) for i in image ):
raise ValueError(
f"Input is in incorrect format: {[type(_lowercase ) for i in image]}. Currently, we only support PIL image and pytorch tensor" )
lowercase_ : List[Any] = torch.cat([prepare_image(_lowercase , _lowercase , _lowercase ) for i in image] , dim=0 )
lowercase_ : Dict = image.to(dtype=image_embeds.dtype , device=_lowercase )
lowercase_ : Dict = self.movq.encode(_lowercase )['latents']
lowercase_ : Optional[Any] = latents.repeat_interleave(_lowercase , dim=0 )
self.scheduler.set_timesteps(_lowercase , device=_lowercase )
lowercase_ , lowercase_ : str = self.get_timesteps(_lowercase , _lowercase , _lowercase )
lowercase_ : int = timesteps[:1].repeat(batch_size * num_images_per_prompt )
lowercase_ , lowercase_ : Union[str, Any] = downscale_height_and_width(_lowercase , _lowercase , self.movq_scale_factor )
lowercase_ : List[str] = self.prepare_latents(
_lowercase , _lowercase , _lowercase , _lowercase , image_embeds.dtype , _lowercase , _lowercase )
for i, t in enumerate(self.progress_bar(_lowercase ) ):
# expand the latents if we are doing classifier free guidance
lowercase_ : int = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowercase_ : str = {'image_embeds': image_embeds}
lowercase_ : str = self.unet(
sample=_lowercase , timestep=_lowercase , encoder_hidden_states=_lowercase , added_cond_kwargs=_lowercase , return_dict=_lowercase , )[0]
if do_classifier_free_guidance:
lowercase_ , lowercase_ : Dict = noise_pred.split(latents.shape[1] , dim=1 )
lowercase_ , lowercase_ : Optional[int] = noise_pred.chunk(2 )
lowercase_ , lowercase_ : Tuple = variance_pred.chunk(2 )
lowercase_ : List[str] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
lowercase_ : Tuple = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , 'variance_type' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
lowercase_ , lowercase_ : List[str] = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
lowercase_ : Dict = self.scheduler.step(
_lowercase , _lowercase , _lowercase , generator=_lowercase , )[0]
# post-processing
lowercase_ : Any = self.movq.decode(_lowercase , force_not_quantize=_lowercase )['sample']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f"Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}" )
if output_type in ["np", "pil"]:
lowercase_ : Dict = image * 0.5 + 0.5
lowercase_ : Dict = image.clamp(0 , 1 )
lowercase_ : int = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
lowercase_ : int = self.numpy_to_pil(_lowercase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_lowercase )
| 7 | 0 |
'''simple docstring'''
def _UpperCAmelCase ( a : int = 4_0_0_0_0_0_0 ) -> Any:
"""simple docstring"""
lowercase_ : Dict = [0, 1]
lowercase_ : str = 0
while fib[i] <= n:
fib.append(fib[i] + fib[i + 1] )
if fib[i + 2] > n:
break
i += 1
lowercase_ : Optional[int] = 0
for j in range(len(a ) - 1 ):
if fib[j] % 2 == 0:
total += fib[j]
return total
if __name__ == "__main__":
print(f"""{solution() = }""")
| 704 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ..utils import _LazyModule
A: int = {
"config": [
"EXTERNAL_DATA_FORMAT_SIZE_LIMIT",
"OnnxConfig",
"OnnxConfigWithPast",
"OnnxSeq2SeqConfigWithPast",
"PatchingSpec",
],
"convert": ["export", "validate_model_outputs"],
"features": ["FeaturesManager"],
"utils": ["ParameterFormat", "compute_serialized_parameters_size"],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
A: Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 7 | 0 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_distilbert import DistilBertTokenizer
A: int = logging.get_logger(__name__)
A: str = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
A: List[Any] = {
"vocab_file": {
"distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt",
"distilbert-base-uncased-distilled-squad": (
"https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt"
),
"distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt",
"distilbert-base-cased-distilled-squad": (
"https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt"
),
"distilbert-base-german-cased": "https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt",
"distilbert-base-multilingual-cased": (
"https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json",
"distilbert-base-uncased-distilled-squad": (
"https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json"
),
"distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json",
"distilbert-base-cased-distilled-squad": (
"https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json"
),
"distilbert-base-german-cased": (
"https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json"
),
"distilbert-base-multilingual-cased": (
"https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json"
),
},
}
A: Tuple = {
"distilbert-base-uncased": 5_1_2,
"distilbert-base-uncased-distilled-squad": 5_1_2,
"distilbert-base-cased": 5_1_2,
"distilbert-base-cased-distilled-squad": 5_1_2,
"distilbert-base-german-cased": 5_1_2,
"distilbert-base-multilingual-cased": 5_1_2,
}
A: List[Any] = {
"distilbert-base-uncased": {"do_lower_case": True},
"distilbert-base-uncased-distilled-squad": {"do_lower_case": True},
"distilbert-base-cased": {"do_lower_case": False},
"distilbert-base-cased-distilled-squad": {"do_lower_case": False},
"distilbert-base-german-cased": {"do_lower_case": False},
"distilbert-base-multilingual-cased": {"do_lower_case": False},
}
class __magic_name__ ( __a ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ : List[str] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ : str = PRETRAINED_INIT_CONFIGURATION
SCREAMING_SNAKE_CASE_ : Optional[int] = ['input_ids', 'attention_mask']
SCREAMING_SNAKE_CASE_ : List[Any] = DistilBertTokenizer
def __init__( self , _lowercase=None , _lowercase=None , _lowercase=True , _lowercase="[UNK]" , _lowercase="[SEP]" , _lowercase="[PAD]" , _lowercase="[CLS]" , _lowercase="[MASK]" , _lowercase=True , _lowercase=None , **_lowercase , ) -> Union[str, Any]:
super().__init__(
a_ , tokenizer_file=a_ , do_lower_case=a_ , unk_token=a_ , sep_token=a_ , pad_token=a_ , cls_token=a_ , mask_token=a_ , tokenize_chinese_chars=a_ , strip_accents=a_ , **a_ , )
lowercase_ : int = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , a_ ) != do_lower_case
or normalizer_state.get('strip_accents' , a_ ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , a_ ) != tokenize_chinese_chars
):
lowercase_ : Optional[Any] = getattr(a_ , normalizer_state.pop('type' ) )
lowercase_ : List[Any] = do_lower_case
lowercase_ : Optional[Any] = strip_accents
lowercase_ : Optional[int] = tokenize_chinese_chars
lowercase_ : Dict = normalizer_class(**a_ )
lowercase_ : int = do_lower_case
def lowerCamelCase__ ( self , _lowercase , _lowercase=None ) -> List[Any]:
lowercase_ : Tuple = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowerCamelCase__ ( self , _lowercase , _lowercase = None ) -> List[str]:
lowercase_ : List[Any] = [self.sep_token_id]
lowercase_ : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCamelCase__ ( self , _lowercase , _lowercase = None ) -> Optional[int]:
lowercase_ : Optional[Any] = self._tokenizer.model.save(a_ , name=a_ )
return tuple(a_ )
| 705 |
'''simple docstring'''
import io
import json
import unittest
from parameterized import parameterized
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device
from utils import calculate_bleu
A: Any = get_tests_dir() + "/test_data/fsmt/fsmt_val_data.json"
with io.open(filename, "r", encoding="utf-8") as f:
A: List[Any] = json.load(f)
@require_torch
class __magic_name__ ( unittest.TestCase ):
"""simple docstring"""
def lowerCamelCase__ ( self , _lowercase ) -> Tuple:
return FSMTTokenizer.from_pretrained(_lowercase )
def lowerCamelCase__ ( self , _lowercase ) -> Optional[int]:
lowercase_ : str = FSMTForConditionalGeneration.from_pretrained(_lowercase ).to(_lowercase )
if torch_device == "cuda":
model.half()
return model
@parameterized.expand(
[
['en-ru', 26.0],
['ru-en', 22.0],
['en-de', 22.0],
['de-en', 29.0],
] )
@slow
def lowerCamelCase__ ( self , _lowercase , _lowercase ) -> Optional[int]:
# note: this test is not testing the best performance since it only evals a small batch
# but it should be enough to detect a regression in the output quality
lowercase_ : Optional[Any] = f"facebook/wmt19-{pair}"
lowercase_ : str = self.get_tokenizer(_lowercase )
lowercase_ : Any = self.get_model(_lowercase )
lowercase_ : Any = bleu_data[pair]['src']
lowercase_ : Any = bleu_data[pair]['tgt']
lowercase_ : Dict = tokenizer(_lowercase , return_tensors='pt' , truncation=_lowercase , padding='longest' ).to(_lowercase )
lowercase_ : str = model.generate(
input_ids=batch.input_ids , num_beams=8 , )
lowercase_ : Any = tokenizer.batch_decode(
_lowercase , skip_special_tokens=_lowercase , clean_up_tokenization_spaces=_lowercase )
lowercase_ : Union[str, Any] = calculate_bleu(_lowercase , _lowercase )
print(_lowercase )
self.assertGreaterEqual(scores['bleu'] , _lowercase )
| 7 | 0 |
'''simple docstring'''
import argparse
from pathlib import Path
from transformers import AutoConfig, AutoTokenizer, RagConfig, RagSequenceForGeneration, RagTokenForGeneration
def _UpperCAmelCase ( a : Any , a : Optional[Any] , a : Optional[Any] , a : List[str] , a : Optional[int] = None , a : Optional[Any] = None , a : Dict = None , ) -> Tuple:
"""simple docstring"""
if config_name_or_path is None:
lowercase_ : Dict = 'facebook/rag-token-base' if model_type == 'rag_token' else 'facebook/rag-sequence-base'
if generator_tokenizer_name_or_path is None:
lowercase_ : Optional[Any] = generator_name_or_path
if question_encoder_tokenizer_name_or_path is None:
lowercase_ : Any = question_encoder_name_or_path
lowercase_ : Tuple = RagTokenForGeneration if model_type == 'rag_token' else RagSequenceForGeneration
# Save model.
lowercase_ : str = RagConfig.from_pretrained(__snake_case )
lowercase_ : Union[str, Any] = AutoConfig.from_pretrained(__snake_case )
lowercase_ : Any = AutoConfig.from_pretrained(__snake_case )
lowercase_ : Optional[Any] = gen_config
lowercase_ : List[Any] = question_encoder_config
lowercase_ : Tuple = model_class.from_pretrained_question_encoder_generator(
__snake_case , __snake_case , config=__snake_case )
rag_model.save_pretrained(__snake_case )
# Sanity check.
model_class.from_pretrained(__snake_case )
# Save tokenizers.
lowercase_ : Optional[Any] = AutoTokenizer.from_pretrained(__snake_case )
gen_tokenizer.save_pretrained(dest_dir / 'generator_tokenizer/' )
lowercase_ : Tuple = AutoTokenizer.from_pretrained(__snake_case )
question_encoder_tokenizer.save_pretrained(dest_dir / 'question_encoder_tokenizer/' )
if __name__ == "__main__":
A: List[str] = argparse.ArgumentParser()
parser.add_argument(
"--model_type",
choices=["rag_sequence", "rag_token"],
required=True,
type=str,
help="RAG model type: rag_sequence, rag_token",
)
parser.add_argument("--dest", type=str, required=True, help="Path to the output checkpoint directory.")
parser.add_argument("--generator_name_or_path", type=str, required=True, help="Generator model identifier")
parser.add_argument(
"--question_encoder_name_or_path", type=str, required=True, help="Question encoder model identifier"
)
parser.add_argument(
"--generator_tokenizer_name_or_path",
type=str,
help="Generator tokenizer identifier, if not specified, resolves to ``generator_name_or_path``",
)
parser.add_argument(
"--question_encoder_tokenizer_name_or_path",
type=str,
help="Question encoder tokenizer identifier, if not specified, resolves to ``question_encoder_name_or_path``",
)
parser.add_argument(
"--config_name_or_path",
type=str,
help=(
"Identifier of the model config to use, if not provided, resolves to a base config for a given"
" ``model_type``"
),
)
A: int = parser.parse_args()
A: Tuple = Path(args.dest)
dest_dir.mkdir(exist_ok=True)
consolidate(
args.model_type,
args.generator_name_or_path,
args.question_encoder_name_or_path,
dest_dir,
args.config_name_or_path,
args.generator_tokenizer_name_or_path,
args.question_encoder_tokenizer_name_or_path,
)
| 706 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A: int = {
"configuration_trajectory_transformer": [
"TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"TrajectoryTransformerConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A: Union[str, Any] = [
"TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TrajectoryTransformerModel",
"TrajectoryTransformerPreTrainedModel",
"load_tf_weights_in_trajectory_transformer",
]
if TYPE_CHECKING:
from .configuration_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TrajectoryTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TrajectoryTransformerModel,
TrajectoryTransformerPreTrainedModel,
load_tf_weights_in_trajectory_transformer,
)
else:
import sys
A: int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 7 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A: Tuple = logging.get_logger(__name__)
A: Union[str, Any] = {
# See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert
}
class __magic_name__ ( _UpperCAmelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = 'megatron-bert'
def __init__( self , _lowercase=2_9056 , _lowercase=1024 , _lowercase=24 , _lowercase=16 , _lowercase=4096 , _lowercase="gelu" , _lowercase=0.1 , _lowercase=0.1 , _lowercase=512 , _lowercase=2 , _lowercase=0.02 , _lowercase=1E-1_2 , _lowercase=0 , _lowercase="absolute" , _lowercase=True , **_lowercase , ) -> List[str]:
super().__init__(pad_token_id=lowercase_ , **lowercase_ )
lowercase_ : int = vocab_size
lowercase_ : List[str] = hidden_size
lowercase_ : List[str] = num_hidden_layers
lowercase_ : Dict = num_attention_heads
lowercase_ : Tuple = hidden_act
lowercase_ : Optional[int] = intermediate_size
lowercase_ : Any = hidden_dropout_prob
lowercase_ : str = attention_probs_dropout_prob
lowercase_ : List[Any] = max_position_embeddings
lowercase_ : Tuple = type_vocab_size
lowercase_ : Union[str, Any] = initializer_range
lowercase_ : str = layer_norm_eps
lowercase_ : List[Any] = position_embedding_type
lowercase_ : List[Any] = use_cache
| 707 |
'''simple docstring'''
def _UpperCAmelCase ( a : str ) -> str:
"""simple docstring"""
lowercase_ : Dict = 0
# if input_string is "aba" than new_input_string become "a|b|a"
lowercase_ : Dict = ''
lowercase_ : Any = ''
# append each character + "|" in new_string for range(0, length-1)
for i in input_string[: len(a ) - 1]:
new_input_string += i + "|"
# append last character
new_input_string += input_string[-1]
# we will store the starting and ending of previous furthest ending palindromic
# substring
lowercase_ , lowercase_ : Dict = 0, 0
# length[i] shows the length of palindromic substring with center i
lowercase_ : List[Any] = [1 for i in range(len(a ) )]
# for each character in new_string find corresponding palindromic string
lowercase_ : Dict = 0
for j in range(len(a ) ):
lowercase_ : Tuple = 1 if j > r else min(length[l + r - j] // 2 , r - j + 1 )
while (
j - k >= 0
and j + k < len(a )
and new_input_string[k + j] == new_input_string[j - k]
):
k += 1
lowercase_ : int = 2 * k - 1
# does this string is ending after the previously explored end (that is r) ?
# if yes the update the new r to the last index of this
if j + k - 1 > r:
lowercase_ : Tuple = j - k + 1 # noqa: E741
lowercase_ : Tuple = j + k - 1
# update max_length and start position
if max_length < length[j]:
lowercase_ : Tuple = length[j]
lowercase_ : List[Any] = j
# create that string
lowercase_ : str = new_input_string[start - max_length // 2 : start + max_length // 2 + 1]
for i in s:
if i != "|":
output_string += i
return output_string
if __name__ == "__main__":
import doctest
doctest.testmod()
| 7 | 0 |
'''simple docstring'''
import json
import os
import unittest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __magic_name__ ( a__, unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = CLIPTokenizer
SCREAMING_SNAKE_CASE_ : Tuple = CLIPTokenizerFast
SCREAMING_SNAKE_CASE_ : List[str] = True
SCREAMING_SNAKE_CASE_ : int = {}
SCREAMING_SNAKE_CASE_ : int = False
def snake_case__ ( self ) -> Any:
super().setUp()
# fmt: off
lowercase_ : Dict = ["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"]
# fmt: on
lowercase_ : Any = dict(zip(lowerCAmelCase__ , range(len(lowerCAmelCase__ ) ) ) )
lowercase_ : Optional[int] = ["#version: 0.2", "l o", "lo w</w>", "e r</w>"]
lowercase_ : List[str] = {"unk_token": "<unk>"}
lowercase_ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
lowercase_ : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(lowerCAmelCase__ ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(lowerCAmelCase__ ) )
def snake_case__ ( self , **_lowercase ) -> List[Any]:
kwargs.update(self.special_tokens_map )
return CLIPTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def snake_case__ ( self , **_lowercase ) -> int:
kwargs.update(self.special_tokens_map )
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def snake_case__ ( self , _lowercase ) -> Optional[int]:
lowercase_ : List[Any] = "lower newer"
lowercase_ : int = "lower newer"
return input_text, output_text
def snake_case__ ( self ) -> str:
lowercase_ : Optional[Any] = CLIPTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
lowercase_ : List[Any] = "lower newer"
lowercase_ : Optional[int] = ["lo", "w", "er</w>", "n", "e", "w", "er</w>"]
lowercase_ : List[str] = tokenizer.tokenize(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
lowercase_ : List[Any] = tokens + [tokenizer.unk_token]
lowercase_ : List[Any] = [10, 2, 16, 9, 3, 2, 16, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) , lowerCAmelCase__ )
@require_ftfy
def snake_case__ ( self ) -> Dict:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
lowercase_ : List[str] = self.tokenizer_class.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ )
lowercase_ : List[Any] = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ )
lowercase_ : Any = "A\n'll 11p223RF☆ho!!to?'d'd''d of a cat to-$''d."
lowercase_ : str = tokenizer_s.tokenize(lowerCAmelCase__ )
lowercase_ : Union[str, Any] = tokenizer_r.tokenize(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
# Test that the tokenization is identical on an example containing a character (Latin Small Letter A
# with Tilde) encoded in 2 different ways
lowercase_ : List[Any] = "xa\u0303y" + " " + "x\xe3y"
lowercase_ : str = tokenizer_s.tokenize(lowerCAmelCase__ )
lowercase_ : Optional[int] = tokenizer_r.tokenize(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
# Test that the tokenization is identical on unicode of space type
lowercase_ : Any = [
"\u0009", # (horizontal tab, '\t')
"\u000B", # (vertical tab)
"\u000C", # (form feed)
"\u0020", # (space, ' ')
"\u200E", # (left-to-right mark):w
"\u200F", # (right-to-left mark)
]
for unicode_seq in spaces_unicodes:
lowercase_ : Any = tokenizer_s.tokenize(lowerCAmelCase__ )
lowercase_ : Dict = tokenizer_r.tokenize(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
# Test that the tokenization is identical on unicode of line break type
lowercase_ : Any = [
"\u000A", # (line feed, '\n')
"\r\n", # (carriage return and line feed, '\r\n')
"\u000D", # (carriage return, '\r')
"\r", # (carriage return, '\r')
"\u000D", # (carriage return, '\r')
"\u2028", # (line separator)
"\u2029", # (paragraph separator)
# "\u0085", # (next line)
]
# The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms
# it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a
# space (and thus into an empty list).
for unicode_seq in line_break_unicodes:
lowercase_ : List[str] = tokenizer_s.tokenize(lowerCAmelCase__ )
lowercase_ : Tuple = tokenizer_r.tokenize(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def snake_case__ ( self ) -> List[str]:
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
lowercase_ : Tuple = "hello" # `hello` is a token in the vocabulary of `pretrained_name`
lowercase_ : Tuple = f"{text_of_1_token} {text_of_1_token}"
lowercase_ : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(
lowerCAmelCase__ , use_fast=lowerCAmelCase__ , )
lowercase_ : str = tokenizer_r(lowerCAmelCase__ , return_offsets_mapping=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowerCAmelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowerCAmelCase__ ) + 1, len(lowerCAmelCase__ ) + 1 + len(lowerCAmelCase__ )) , )
lowercase_ : Optional[Any] = f" {text}"
lowercase_ : Optional[int] = self.rust_tokenizer_class.from_pretrained(
lowerCAmelCase__ , use_fast=lowerCAmelCase__ , )
lowercase_ : Any = tokenizer_r(lowerCAmelCase__ , return_offsets_mapping=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(lowerCAmelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowerCAmelCase__ ) + 1, 1 + len(lowerCAmelCase__ ) + 1 + len(lowerCAmelCase__ )) , )
def snake_case__ ( self ) -> Union[str, Any]:
# Test related to the breaking change introduced in transformers v4.17.0
# We need to check that an error in raised when the user try to load a previous version of the tokenizer.
with self.assertRaises(lowerCAmelCase__ ) as context:
self.rust_tokenizer_class.from_pretrained('robot-test/old-clip-tokenizer' )
self.assertTrue(
context.exception.args[0].startswith(
'The `backend_tokenizer` provided does not match the expected format.' ) )
@require_ftfy
def snake_case__ ( self ) -> Union[str, Any]:
super().test_tokenization_python_rust_equals()
def snake_case__ ( self ) -> List[str]:
# CLIP always lower cases letters
pass
| 708 |
'''simple docstring'''
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class __magic_name__ ( UpperCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ''
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 'hf-legacy' # "hf://"" is reserved for hffs
def __init__( self , _lowercase = None , _lowercase = None , **_lowercase , ) -> Optional[Any]:
super().__init__(self , **_lowercase )
lowercase_ : int = repo_info
lowercase_ : List[Any] = token
lowercase_ : Union[str, Any] = None
def lowerCamelCase__ ( self ) -> Optional[Any]:
if self.dir_cache is None:
lowercase_ : Optional[Any] = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
lowercase_ : str = {
'name': hf_file.rfilename,
'size': None,
'type': 'file',
}
self.dir_cache.update(
{
str(_lowercase ): {'name': str(_lowercase ), 'size': None, 'type': 'directory'}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def lowerCamelCase__ ( self , _lowercase , _lowercase = "rb" , **_lowercase , ) -> Dict:
if not isinstance(self.repo_info , _lowercase ):
raise NotImplementedError(f"Open is only implemented for dataset repositories, but got {self.repo_info}" )
lowercase_ : Optional[int] = hf_hub_url(self.repo_info.id , _lowercase , revision=self.repo_info.sha )
return fsspec.open(
_lowercase , mode=_lowercase , headers=get_authentication_headers_for_url(_lowercase , use_auth_token=self.token ) , client_kwargs={'trust_env': True} , ).open()
def lowerCamelCase__ ( self , _lowercase , **_lowercase ) -> Tuple:
self._get_dirs()
lowercase_ : str = self._strip_protocol(_lowercase )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(_lowercase )
def lowerCamelCase__ ( self , _lowercase , _lowercase=False , **_lowercase ) -> List[str]:
self._get_dirs()
lowercase_ : List[str] = PurePosixPath(path.strip('/' ) )
lowercase_ : List[str] = {}
for p, f in self.dir_cache.items():
lowercase_ : Tuple = PurePosixPath(p.strip('/' ) )
lowercase_ : Optional[int] = p.parent
if root == path:
lowercase_ : List[str] = f
lowercase_ : List[str] = list(paths.values() )
if detail:
return out
else:
return sorted(f['name'] for f in out )
| 7 | 0 |
'''simple docstring'''
A: str = {
0: '''0''',
1: '''1''',
2: '''2''',
3: '''3''',
4: '''4''',
5: '''5''',
6: '''6''',
7: '''7''',
8: '''8''',
9: '''9''',
1_0: '''a''',
1_1: '''b''',
1_2: '''c''',
1_3: '''d''',
1_4: '''e''',
1_5: '''f''',
}
def _UpperCAmelCase ( a : float ) -> int:
"""simple docstring"""
assert type(UpperCAmelCase__ ) in (int, float) and decimal == int(UpperCAmelCase__ )
lowercase_ : Dict = int(UpperCAmelCase__ )
lowercase_ : List[Any] = ''
lowercase_ : List[Any] = False
if decimal < 0:
lowercase_ : int = True
decimal *= -1
while decimal > 0:
lowercase_ , lowercase_ : Optional[int] = divmod(UpperCAmelCase__ , 1_6 )
lowercase_ : List[str] = values[remainder] + hexadecimal
lowercase_ : List[Any] = '0x' + hexadecimal
if negative:
lowercase_ : Union[str, Any] = '-' + hexadecimal
return hexadecimal
if __name__ == "__main__":
import doctest
doctest.testmod()
| 709 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
BertTokenizer,
ViltConfig,
ViltForImageAndTextRetrieval,
ViltForImagesAndTextClassification,
ViltForMaskedLM,
ViltForQuestionAnswering,
ViltImageProcessor,
ViltProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
A: List[Any] = logging.get_logger(__name__)
def _UpperCAmelCase ( a : Any , a : Dict=False , a : Union[str, Any]=False , a : Tuple=False ) -> List[str]:
"""simple docstring"""
lowercase_ : int = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"transformer.blocks.{i}.norm1.weight", f"vilt.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((f"transformer.blocks.{i}.norm1.bias", f"vilt.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append(
(f"transformer.blocks.{i}.attn.proj.weight", f"vilt.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append(
(f"transformer.blocks.{i}.attn.proj.bias", f"vilt.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((f"transformer.blocks.{i}.norm2.weight", f"vilt.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((f"transformer.blocks.{i}.norm2.bias", f"vilt.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append(
(f"transformer.blocks.{i}.mlp.fc1.weight", f"vilt.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((f"transformer.blocks.{i}.mlp.fc1.bias", f"vilt.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((f"transformer.blocks.{i}.mlp.fc2.weight", f"vilt.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((f"transformer.blocks.{i}.mlp.fc2.bias", f"vilt.encoder.layer.{i}.output.dense.bias") )
# embeddings
rename_keys.extend(
[
# text embeddings
('text_embeddings.word_embeddings.weight', 'vilt.embeddings.text_embeddings.word_embeddings.weight'),
(
'text_embeddings.position_embeddings.weight',
'vilt.embeddings.text_embeddings.position_embeddings.weight',
),
('text_embeddings.position_ids', 'vilt.embeddings.text_embeddings.position_ids'),
(
'text_embeddings.token_type_embeddings.weight',
'vilt.embeddings.text_embeddings.token_type_embeddings.weight',
),
('text_embeddings.LayerNorm.weight', 'vilt.embeddings.text_embeddings.LayerNorm.weight'),
('text_embeddings.LayerNorm.bias', 'vilt.embeddings.text_embeddings.LayerNorm.bias'),
# patch embeddings
('transformer.cls_token', 'vilt.embeddings.cls_token'),
('transformer.patch_embed.proj.weight', 'vilt.embeddings.patch_embeddings.projection.weight'),
('transformer.patch_embed.proj.bias', 'vilt.embeddings.patch_embeddings.projection.bias'),
('transformer.pos_embed', 'vilt.embeddings.position_embeddings'),
# token type embeddings
('token_type_embeddings.weight', 'vilt.embeddings.token_type_embeddings.weight'),
] )
# final layernorm + pooler
rename_keys.extend(
[
('transformer.norm.weight', 'vilt.layernorm.weight'),
('transformer.norm.bias', 'vilt.layernorm.bias'),
('pooler.dense.weight', 'vilt.pooler.dense.weight'),
('pooler.dense.bias', 'vilt.pooler.dense.bias'),
] )
# classifier head(s)
if vqa_model:
# classification head
rename_keys.extend(
[
('vqa_classifier.0.weight', 'classifier.0.weight'),
('vqa_classifier.0.bias', 'classifier.0.bias'),
('vqa_classifier.1.weight', 'classifier.1.weight'),
('vqa_classifier.1.bias', 'classifier.1.bias'),
('vqa_classifier.3.weight', 'classifier.3.weight'),
('vqa_classifier.3.bias', 'classifier.3.bias'),
] )
elif nlvr_model:
# classification head
rename_keys.extend(
[
('nlvr2_classifier.0.weight', 'classifier.0.weight'),
('nlvr2_classifier.0.bias', 'classifier.0.bias'),
('nlvr2_classifier.1.weight', 'classifier.1.weight'),
('nlvr2_classifier.1.bias', 'classifier.1.bias'),
('nlvr2_classifier.3.weight', 'classifier.3.weight'),
('nlvr2_classifier.3.bias', 'classifier.3.bias'),
] )
else:
pass
return rename_keys
def _UpperCAmelCase ( a : Dict , a : Tuple ) -> Dict:
"""simple docstring"""
for i in range(config.num_hidden_layers ):
lowercase_ : Optional[int] = 'vilt.'
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowercase_ : str = state_dict.pop(f"transformer.blocks.{i}.attn.qkv.weight" )
lowercase_ : int = state_dict.pop(f"transformer.blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
lowercase_ : Dict = in_proj_weight[
: config.hidden_size, :
]
lowercase_ : List[str] = in_proj_bias[: config.hidden_size]
lowercase_ : int = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowercase_ : Optional[int] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowercase_ : Tuple = in_proj_weight[
-config.hidden_size :, :
]
lowercase_ : Dict = in_proj_bias[-config.hidden_size :]
def _UpperCAmelCase ( a : List[str] ) -> Optional[int]:
"""simple docstring"""
lowercase_ : Union[str, Any] = ['head.weight', 'head.bias']
for k in ignore_keys:
state_dict.pop(a , a )
def _UpperCAmelCase ( a : Optional[Any] , a : Tuple , a : Union[str, Any] ) -> Tuple:
"""simple docstring"""
lowercase_ : List[Any] = dct.pop(a )
lowercase_ : Dict = val
@torch.no_grad()
def _UpperCAmelCase ( a : List[Any] , a : List[Any] ) -> Optional[Any]:
"""simple docstring"""
lowercase_ : str = ViltConfig(image_size=3_8_4 , patch_size=3_2 , tie_word_embeddings=a )
lowercase_ : int = False
lowercase_ : Union[str, Any] = False
lowercase_ : List[str] = False
lowercase_ : str = False
if "vqa" in checkpoint_url:
lowercase_ : str = True
lowercase_ : Optional[int] = 3_1_2_9
lowercase_ : Any = 'huggingface/label-files'
lowercase_ : Optional[Any] = 'vqa2-id2label.json'
lowercase_ : int = json.load(open(hf_hub_download(a , a , repo_type='dataset' ) , 'r' ) )
lowercase_ : Optional[int] = {int(a ): v for k, v in idalabel.items()}
lowercase_ : List[Any] = idalabel
lowercase_ : str = {v: k for k, v in idalabel.items()}
lowercase_ : List[Any] = ViltForQuestionAnswering(a )
elif "nlvr" in checkpoint_url:
lowercase_ : Dict = True
lowercase_ : List[str] = 2
lowercase_ : Tuple = {0: 'False', 1: 'True'}
lowercase_ : Optional[int] = {v: k for k, v in config.idalabel.items()}
lowercase_ : int = 3
lowercase_ : Any = ViltForImagesAndTextClassification(a )
elif "irtr" in checkpoint_url:
lowercase_ : Union[str, Any] = True
lowercase_ : Dict = ViltForImageAndTextRetrieval(a )
elif "mlm_itm" in checkpoint_url:
lowercase_ : int = True
lowercase_ : Tuple = ViltForMaskedLM(a )
else:
raise ValueError('Unknown model type' )
# load state_dict of original model, remove and rename some keys
lowercase_ : List[Any] = torch.hub.load_state_dict_from_url(a , map_location='cpu' )['state_dict']
lowercase_ : Union[str, Any] = create_rename_keys(a , a , a , a )
for src, dest in rename_keys:
rename_key(a , a , a )
read_in_q_k_v(a , a )
if mlm_model or irtr_model:
lowercase_ : str = ['itm_score.fc.weight', 'itm_score.fc.bias']
for k in ignore_keys:
state_dict.pop(a , a )
# load state dict into HuggingFace model
model.eval()
if mlm_model:
lowercase_ , lowercase_ : Dict = model.load_state_dict(a , strict=a )
assert missing_keys == ["mlm_score.decoder.bias"]
else:
model.load_state_dict(a )
# Define processor
lowercase_ : Optional[int] = ViltImageProcessor(size=3_8_4 )
lowercase_ : Optional[int] = BertTokenizer.from_pretrained('bert-base-uncased' )
lowercase_ : Any = ViltProcessor(a , a )
# Forward pass on example inputs (image + text)
if nlvr_model:
lowercase_ : Union[str, Any] = Image.open(requests.get('https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg' , stream=a ).raw )
lowercase_ : Optional[Any] = Image.open(requests.get('https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg' , stream=a ).raw )
lowercase_ : Any = (
'The left image contains twice the number of dogs as the right image, and at least two dogs in total are'
' standing.'
)
lowercase_ : Union[str, Any] = processor(a , a , return_tensors='pt' )
lowercase_ : List[str] = processor(a , a , return_tensors='pt' )
lowercase_ : Union[str, Any] = model(
input_ids=encoding_a.input_ids , pixel_values=encoding_a.pixel_values , pixel_values_a=encoding_a.pixel_values , )
else:
lowercase_ : List[str] = Image.open(requests.get('http://images.cocodataset.org/val2017/000000039769.jpg' , stream=a ).raw )
if mlm_model:
lowercase_ : Dict = 'a bunch of [MASK] laying on a [MASK].'
else:
lowercase_ : List[Any] = 'How many cats are there?'
lowercase_ : List[Any] = processor(a , a , return_tensors='pt' )
lowercase_ : Optional[int] = model(**a )
# Verify outputs
if mlm_model:
lowercase_ : Union[str, Any] = torch.Size([1, 1_1, 3_0_5_2_2] )
lowercase_ : Optional[Any] = torch.tensor([-12.50_61, -12.51_23, -12.51_74] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , a , atol=1e-4 )
# verify masked token prediction equals "cats"
lowercase_ : int = outputs.logits[0, 4, :].argmax(-1 ).item()
assert tokenizer.decode([predicted_id] ) == "cats"
elif vqa_model:
lowercase_ : Optional[Any] = torch.Size([1, 3_1_2_9] )
lowercase_ : Tuple = torch.tensor([-15.94_95, -18.14_72, -10.30_41] )
assert torch.allclose(outputs.logits[0, :3] , a , atol=1e-4 )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , a , atol=1e-4 )
# verify vqa prediction equals "2"
lowercase_ : Union[str, Any] = outputs.logits.argmax(-1 ).item()
assert model.config.idalabel[predicted_idx] == "2"
elif nlvr_model:
lowercase_ : Optional[Any] = torch.Size([1, 2] )
lowercase_ : Optional[Any] = torch.tensor([-2.87_21, 2.12_91] )
assert torch.allclose(outputs.logits[0, :3] , a , atol=1e-4 )
assert outputs.logits.shape == expected_shape
Path(a ).mkdir(exist_ok=a )
print(f"Saving model and processor to {pytorch_dump_folder_path}" )
model.save_pretrained(a )
processor.save_pretrained(a )
if __name__ == "__main__":
A: Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://github.com/dandelin/ViLT/releases/download/200k/vilt_200k_mlm_itm.ckpt",
type=str,
help="URL of the checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
A: Union[str, Any] = parser.parse_args()
convert_vilt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 7 | 0 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, MBartConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFMBartForConditionalGeneration, TFMBartModel
@require_tf
class __magic_name__ :
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = MBartConfig
SCREAMING_SNAKE_CASE_ : List[Any] = {}
SCREAMING_SNAKE_CASE_ : str = """gelu"""
def __init__( self , _lowercase , _lowercase=13 , _lowercase=7 , _lowercase=True , _lowercase=False , _lowercase=99 , _lowercase=32 , _lowercase=2 , _lowercase=4 , _lowercase=37 , _lowercase=0.1 , _lowercase=0.1 , _lowercase=20 , _lowercase=2 , _lowercase=1 , _lowercase=0 , ) -> Optional[Any]:
lowercase_ : Union[str, Any] = parent
lowercase_ : List[str] = batch_size
lowercase_ : Union[str, Any] = seq_length
lowercase_ : Optional[Any] = is_training
lowercase_ : int = use_labels
lowercase_ : Dict = vocab_size
lowercase_ : Optional[Any] = hidden_size
lowercase_ : Optional[int] = num_hidden_layers
lowercase_ : Optional[Any] = num_attention_heads
lowercase_ : List[str] = intermediate_size
lowercase_ : int = hidden_dropout_prob
lowercase_ : int = attention_probs_dropout_prob
lowercase_ : Union[str, Any] = max_position_embeddings
lowercase_ : Tuple = eos_token_id
lowercase_ : int = pad_token_id
lowercase_ : List[str] = bos_token_id
def lowerCamelCase__ ( self ) -> Union[str, Any]:
lowercase_ : Any = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
lowercase_ : List[str] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
lowercase_ : Dict = tf.concat([input_ids, eos_tensor] , axis=1 )
lowercase_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase_ : Dict = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
lowercase_ : int = prepare_mbart_inputs_dict(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
return config, inputs_dict
def lowerCamelCase__ ( self , _lowercase , _lowercase ) -> Any:
lowercase_ : Any = TFMBartModel(config=UpperCAmelCase_ ).get_decoder()
lowercase_ : Optional[int] = inputs_dict['input_ids']
lowercase_ : Dict = input_ids[:1, :]
lowercase_ : Union[str, Any] = inputs_dict['attention_mask'][:1, :]
lowercase_ : Optional[Any] = inputs_dict['head_mask']
lowercase_ : int = 1
# first forward pass
lowercase_ : Dict = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , head_mask=UpperCAmelCase_ , use_cache=UpperCAmelCase_ )
lowercase_ , lowercase_ : Dict = outputs.to_tuple()
lowercase_ : List[str] = past_key_values[1]
def _UpperCAmelCase ( a : Tuple , a : Dict , a : Optional[Any] , a : Optional[int]=None , a : Any=None , a : List[str]=None , a : int=None , a : Tuple=None , ) -> List[str]:
"""simple docstring"""
if attention_mask is None:
lowercase_ : Optional[Any] = tf.cast(tf.math.not_equal(_snake_case , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
lowercase_ : List[Any] = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
lowercase_ : Any = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
lowercase_ : List[Any] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
lowercase_ : List[Any] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = (TFMBartForConditionalGeneration, TFMBartModel) if is_tf_available() else ()
SCREAMING_SNAKE_CASE_ : Dict = (TFMBartForConditionalGeneration,) if is_tf_available() else ()
SCREAMING_SNAKE_CASE_ : List[Any] = (
{
"""conversational""": TFMBartForConditionalGeneration,
"""feature-extraction""": TFMBartModel,
"""summarization""": TFMBartForConditionalGeneration,
"""text2text-generation""": TFMBartForConditionalGeneration,
"""translation""": TFMBartForConditionalGeneration,
}
if is_tf_available()
else {}
)
SCREAMING_SNAKE_CASE_ : Optional[Any] = True
SCREAMING_SNAKE_CASE_ : int = False
SCREAMING_SNAKE_CASE_ : Optional[Any] = False
def lowerCamelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) -> Any:
if pipeline_test_casse_name != "FeatureExtractionPipelineTests":
# Exception encountered when calling layer '...'
return True
return False
def lowerCamelCase__ ( self ) -> int:
lowercase_ : Dict = TFMBartModelTester(self )
lowercase_ : List[str] = ConfigTester(self , config_class=UpperCAmelCase_ )
def lowerCamelCase__ ( self ) -> Union[str, Any]:
self.config_tester.run_common_tests()
def lowerCamelCase__ ( self ) -> Union[str, Any]:
lowercase_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*UpperCAmelCase_ )
@require_sentencepiece
@require_tokenizers
@require_tf
class __magic_name__ ( unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = [
""" UN Chief Says There Is No Military Solution in Syria""",
]
SCREAMING_SNAKE_CASE_ : Any = [
"""Şeful ONU declară că nu există o soluţie militară în Siria""",
]
SCREAMING_SNAKE_CASE_ : Optional[Any] = """facebook/mbart-large-en-ro"""
@cached_property
def lowerCamelCase__ ( self ) -> Optional[int]:
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def lowerCamelCase__ ( self ) -> str:
lowercase_ : str = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def lowerCamelCase__ ( self , **_lowercase ) -> Optional[Any]:
lowercase_ : Union[str, Any] = self.translate_src_text(**UpperCAmelCase_ )
self.assertListEqual(self.expected_text , UpperCAmelCase_ )
def lowerCamelCase__ ( self , **_lowercase ) -> List[Any]:
lowercase_ : Tuple = self.tokenizer(self.src_text , **UpperCAmelCase_ , return_tensors='tf' )
lowercase_ : Union[str, Any] = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 )
lowercase_ : Dict = self.tokenizer.batch_decode(UpperCAmelCase_ , skip_special_tokens=UpperCAmelCase_ )
return generated_words
@slow
def lowerCamelCase__ ( self ) -> List[Any]:
self._assert_generated_batch_equal_expected()
| 710 |
'''simple docstring'''
def _UpperCAmelCase ( a : list ) -> list:
"""simple docstring"""
for i in range(len(a ) - 1 , 0 , -1 ):
lowercase_ : Any = False
for j in range(a , 0 , -1 ):
if unsorted[j] < unsorted[j - 1]:
lowercase_ , lowercase_ : Any = unsorted[j - 1], unsorted[j]
lowercase_ : int = True
for j in range(a ):
if unsorted[j] > unsorted[j + 1]:
lowercase_ , lowercase_ : Union[str, Any] = unsorted[j + 1], unsorted[j]
lowercase_ : Optional[Any] = True
if not swapped:
break
return unsorted
if __name__ == "__main__":
import doctest
doctest.testmod()
A: Union[str, Any] = input("Enter numbers separated by a comma:\n").strip()
A: Tuple = [int(item) for item in user_input.split(",")]
print(f"""{cocktail_shaker_sort(unsorted) = }""")
| 7 | 0 |
'''simple docstring'''
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
import numpy as np
import PIL
from PIL import Image
from ...utils import BaseOutput, is_torch_available, is_transformers_available
@dataclass
class __magic_name__ ( UpperCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = 4_2
SCREAMING_SNAKE_CASE_ : Tuple = 4_2
if is_transformers_available() and is_torch_available():
from .pipeline_semantic_stable_diffusion import SemanticStableDiffusionPipeline
| 711 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class __magic_name__ ( metaclass=UpperCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = ['transformers', 'torch', 'note_seq']
def __init__( self , *_lowercase , **_lowercase ) -> Dict:
requires_backends(self , ['transformers', 'torch', 'note_seq'] )
@classmethod
def lowerCamelCase__ ( cls , *_lowercase , **_lowercase ) -> List[str]:
requires_backends(cls , ['transformers', 'torch', 'note_seq'] )
@classmethod
def lowerCamelCase__ ( cls , *_lowercase , **_lowercase ) -> Dict:
requires_backends(cls , ['transformers', 'torch', 'note_seq'] )
| 7 | 0 |
def _UpperCAmelCase ( a : int , a : Optional[Any] ) -> str:
"""simple docstring"""
if not isinstance(__UpperCamelCase , __UpperCamelCase ):
raise ValueError('iterations must be defined as integers' )
if not isinstance(__UpperCamelCase , __UpperCamelCase ) or not number >= 1:
raise ValueError(
'starting number must be\n and integer and be more than 0' )
if not iterations >= 1:
raise ValueError('Iterations must be done more than 0 times to play FizzBuzz' )
lowercase_ : List[str] = ''
while number <= iterations:
if number % 3 == 0:
out += "Fizz"
if number % 5 == 0:
out += "Buzz"
if 0 not in (number % 3, number % 5):
out += str(__UpperCamelCase )
# print(out)
number += 1
out += " "
return out
if __name__ == "__main__":
import doctest
doctest.testmod()
| 712 |
'''simple docstring'''
def _UpperCAmelCase ( a : str , a : str ) -> float:
"""simple docstring"""
def get_matched_characters(a : str , a : str ) -> str:
lowercase_ : Union[str, Any] = []
lowercase_ : Tuple = min(len(_stra ) , len(_stra ) ) // 2
for i, l in enumerate(_stra ):
lowercase_ : Optional[int] = int(max(0 , i - limit ) )
lowercase_ : Union[str, Any] = int(min(i + limit + 1 , len(_stra ) ) )
if l in _stra[left:right]:
matched.append(a )
lowercase_ : Union[str, Any] = f"{_stra[0:_stra.index(a )]} {_stra[_stra.index(a ) + 1:]}"
return "".join(a )
# matching characters
lowercase_ : Union[str, Any] = get_matched_characters(a , a )
lowercase_ : Optional[Any] = get_matched_characters(a , a )
lowercase_ : Optional[int] = len(a )
# transposition
lowercase_ : Dict = (
len([(ca, ca) for ca, ca in zip(a , a ) if ca != ca] ) // 2
)
if not match_count:
lowercase_ : List[str] = 0.0
else:
lowercase_ : Any = (
1
/ 3
* (
match_count / len(a )
+ match_count / len(a )
+ (match_count - transpositions) / match_count
)
)
# common prefix up to 4 characters
lowercase_ : Optional[Any] = 0
for ca, ca in zip(stra[:4] , stra[:4] ):
if ca == ca:
prefix_len += 1
else:
break
return jaro + 0.1 * prefix_len * (1 - jaro)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(jaro_winkler("hello", "world"))
| 7 | 0 |
from __future__ import annotations
def _UpperCAmelCase ( a : int ) -> Dict:
"""simple docstring"""
lowercase_ : List[Any] = len(a )
# We need to create solution object to save path.
lowercase_ : Optional[Any] = [[0 for _ in range(a )] for _ in range(a )]
lowercase_ : List[str] = run_maze(a , 0 , 0 , a )
if solved:
print('\n'.join(str(a ) for row in solutions ) )
else:
print('No solution exists!' )
return solved
def _UpperCAmelCase ( a : List[Any] , a : Optional[int] , a : Optional[int] , a : Dict ) -> Union[str, Any]:
"""simple docstring"""
lowercase_ : Any = len(a )
# Final check point.
if i == j == (size - 1):
lowercase_ : Optional[int] = 1
return True
lowercase_ : int = (not i < 0) and (not j < 0) # Check lower bounds
lowercase_ : Union[str, Any] = (i < size) and (j < size) # Check upper bounds
if lower_flag and upper_flag:
# check for already visited and block points.
lowercase_ : List[str] = (not solutions[i][j]) and (not maze[i][j])
if block_flag:
# check visited
lowercase_ : str = 1
# check for directions
if (
run_maze(a , i + 1 , a , a )
or run_maze(a , a , j + 1 , a )
or run_maze(a , i - 1 , a , a )
or run_maze(a , a , j - 1 , a )
):
return True
lowercase_ : str = 0
return False
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 713 |
'''simple docstring'''
from __future__ import annotations
def _UpperCAmelCase ( a : int = 4 ) -> list[list[int]]:
"""simple docstring"""
lowercase_ : Tuple = abs(a ) or 4
return [[1 + x + y * row_size for x in range(a )] for y in range(a )]
def _UpperCAmelCase ( a : list[list[int]] ) -> list[list[int]]:
"""simple docstring"""
return reverse_row(transpose(a ) )
# OR.. transpose(reverse_column(matrix))
def _UpperCAmelCase ( a : list[list[int]] ) -> list[list[int]]:
"""simple docstring"""
return reverse_row(reverse_column(a ) )
# OR.. reverse_column(reverse_row(matrix))
def _UpperCAmelCase ( a : list[list[int]] ) -> list[list[int]]:
"""simple docstring"""
return reverse_column(transpose(a ) )
# OR.. transpose(reverse_row(matrix))
def _UpperCAmelCase ( a : list[list[int]] ) -> list[list[int]]:
"""simple docstring"""
lowercase_ : Any = [list(a ) for x in zip(*a )]
return matrix
def _UpperCAmelCase ( a : list[list[int]] ) -> list[list[int]]:
"""simple docstring"""
lowercase_ : List[str] = matrix[::-1]
return matrix
def _UpperCAmelCase ( a : list[list[int]] ) -> list[list[int]]:
"""simple docstring"""
lowercase_ : str = [x[::-1] for x in matrix]
return matrix
def _UpperCAmelCase ( a : list[list[int]] ) -> None:
"""simple docstring"""
for i in matrix:
print(*a )
if __name__ == "__main__":
A: Dict = make_matrix()
print("\norigin:\n")
print_matrix(matrix)
print("\nrotate 90 counterclockwise:\n")
print_matrix(rotate_aa(matrix))
A: List[Any] = make_matrix()
print("\norigin:\n")
print_matrix(matrix)
print("\nrotate 180:\n")
print_matrix(rotate_aaa(matrix))
A: List[str] = make_matrix()
print("\norigin:\n")
print_matrix(matrix)
print("\nrotate 270 counterclockwise:\n")
print_matrix(rotate_aaa(matrix))
| 7 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
A: str = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A: List[str] = ["NllbTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A: Optional[Any] = ["NllbTokenizerFast"]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb import NllbTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb_fast import NllbTokenizerFast
else:
import sys
A: Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) | 714 |
'''simple docstring'''
# Usage:
# ./gen-card-facebook-wmt19.py
import os
from pathlib import Path
def _UpperCAmelCase ( a : Dict , a : Optional[int] , a : Tuple ) -> Optional[int]:
"""simple docstring"""
lowercase_ : Any = {
'en': 'Machine learning is great, isn\'t it?',
'ru': 'Машинное обучение - это здорово, не так ли?',
'de': 'Maschinelles Lernen ist großartig, oder?',
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
lowercase_ : List[str] = {
'ru-en': ['[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)', '39.20'],
'en-ru': ['[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)', '33.47'],
'en-de': ['[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)', '42.83'],
'de-en': ['[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)', '41.35'],
}
lowercase_ : Optional[Any] = f"{src_lang}-{tgt_lang}"
lowercase_ : Optional[Any] = f"\n---\nlanguage: \n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt19\n- facebook\nlicense: apache-2.0\ndatasets:\n- wmt19\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.\n\nFor more details, please see, [Facebook FAIR's WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).\n\nThe abbreviation FSMT stands for FairSeqMachineTranslation\n\nAll four models are available:\n\n* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)\n* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)\n* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)\n* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = \"facebook/wmt19-{src_lang}-{tgt_lang}\"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = \"{texts[src_lang]}\"\ninput_ids = tokenizer.encode(input, return_tensors=\"pt\")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n- The original (and this ported model) doesn't seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)\n\n## Training data\n\nPretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).\n\n## Eval results\n\npair | fairseq | transformers\n-------|---------|----------\n{pair} | {scores[pair][0]} | {scores[pair][1]}\n\nThe score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn't support:\n- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).\n- re-ranking\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=15\nmkdir -p $DATA_DIR\nsacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\nnote: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt19/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)\n\n\n### BibTeX entry and citation info\n\n```bibtex\n@inproceedings{{...,\n year={{2020}},\n title={{Facebook FAIR's WMT19 News Translation Task Submission}},\n author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},\n booktitle={{Proc. of WMT}},\n}}\n```\n\n\n## TODO\n\n- port model ensemble (fairseq uses 4 model checkpoints)\n\n"
os.makedirs(a , exist_ok=a )
lowercase_ : int = os.path.join(a , 'README.md' )
print(f"Generating {path}" )
with open(a , 'w' , encoding='utf-8' ) as f:
f.write(a )
# make sure we are under the root of the project
A: List[str] = Path(__file__).resolve().parent.parent.parent
A: List[str] = repo_dir / "model_cards"
for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
A , A , A: Any = model_name.split("-")
A: int = model_cards_dir / "facebook" / model_name
write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
| 7 | 0 |
import random
def _UpperCAmelCase ( a : List[Any] ) -> bool:
"""simple docstring"""
lowercase_ : List[Any] = num - 1
lowercase_ : Union[str, Any] = 0
while s % 2 == 0:
lowercase_ : List[Any] = s // 2
t += 1
for _ in range(5 ):
lowercase_ : List[str] = random.randrange(2 , num - 1 )
lowercase_ : Optional[int] = pow(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
if v != 1:
lowercase_ : str = 0
while v != (num - 1):
if i == t - 1:
return False
else:
lowercase_ : Any = i + 1
lowercase_ : List[str] = (v**2) % num
return True
def _UpperCAmelCase ( a : List[Any] ) -> bool:
"""simple docstring"""
if num < 2:
return False
lowercase_ : str = [
2,
3,
5,
7,
1_1,
1_3,
1_7,
1_9,
2_3,
2_9,
3_1,
3_7,
4_1,
4_3,
4_7,
5_3,
5_9,
6_1,
6_7,
7_1,
7_3,
7_9,
8_3,
8_9,
9_7,
1_0_1,
1_0_3,
1_0_7,
1_0_9,
1_1_3,
1_2_7,
1_3_1,
1_3_7,
1_3_9,
1_4_9,
1_5_1,
1_5_7,
1_6_3,
1_6_7,
1_7_3,
1_7_9,
1_8_1,
1_9_1,
1_9_3,
1_9_7,
1_9_9,
2_1_1,
2_2_3,
2_2_7,
2_2_9,
2_3_3,
2_3_9,
2_4_1,
2_5_1,
2_5_7,
2_6_3,
2_6_9,
2_7_1,
2_7_7,
2_8_1,
2_8_3,
2_9_3,
3_0_7,
3_1_1,
3_1_3,
3_1_7,
3_3_1,
3_3_7,
3_4_7,
3_4_9,
3_5_3,
3_5_9,
3_6_7,
3_7_3,
3_7_9,
3_8_3,
3_8_9,
3_9_7,
4_0_1,
4_0_9,
4_1_9,
4_2_1,
4_3_1,
4_3_3,
4_3_9,
4_4_3,
4_4_9,
4_5_7,
4_6_1,
4_6_3,
4_6_7,
4_7_9,
4_8_7,
4_9_1,
4_9_9,
5_0_3,
5_0_9,
5_2_1,
5_2_3,
5_4_1,
5_4_7,
5_5_7,
5_6_3,
5_6_9,
5_7_1,
5_7_7,
5_8_7,
5_9_3,
5_9_9,
6_0_1,
6_0_7,
6_1_3,
6_1_7,
6_1_9,
6_3_1,
6_4_1,
6_4_3,
6_4_7,
6_5_3,
6_5_9,
6_6_1,
6_7_3,
6_7_7,
6_8_3,
6_9_1,
7_0_1,
7_0_9,
7_1_9,
7_2_7,
7_3_3,
7_3_9,
7_4_3,
7_5_1,
7_5_7,
7_6_1,
7_6_9,
7_7_3,
7_8_7,
7_9_7,
8_0_9,
8_1_1,
8_2_1,
8_2_3,
8_2_7,
8_2_9,
8_3_9,
8_5_3,
8_5_7,
8_5_9,
8_6_3,
8_7_7,
8_8_1,
8_8_3,
8_8_7,
9_0_7,
9_1_1,
9_1_9,
9_2_9,
9_3_7,
9_4_1,
9_4_7,
9_5_3,
9_6_7,
9_7_1,
9_7_7,
9_8_3,
9_9_1,
9_9_7,
]
if num in low_primes:
return True
for prime in low_primes:
if (num % prime) == 0:
return False
return rabin_miller(_lowerCAmelCase )
def _UpperCAmelCase ( a : str = 1_0_2_4 ) -> int:
"""simple docstring"""
while True:
lowercase_ : Any = random.randrange(2 ** (keysize - 1) , 2 ** (keysize) )
if is_prime_low_num(_lowerCAmelCase ):
return num
if __name__ == "__main__":
A: List[Any] = generate_large_prime()
print(("Prime number:", num))
print(("is_prime_low_num:", is_prime_low_num(num)))
| 715 |
'''simple docstring'''
import json
import logging
import os
import socket
import git
import numpy as np
import torch
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - PID: %(process)d - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
)
A: Tuple = logging.getLogger(__name__)
def _UpperCAmelCase ( a : str ) -> List[Any]:
"""simple docstring"""
lowercase_ : List[str] = git.Repo(search_parent_directories=a )
lowercase_ : Union[str, Any] = {
'repo_id': str(a ),
'repo_sha': str(repo.head.object.hexsha ),
'repo_branch': str(repo.active_branch ),
}
with open(os.path.join(a , 'git_log.json' ) , 'w' ) as f:
json.dump(a , a , indent=4 )
def _UpperCAmelCase ( a : str ) -> Union[str, Any]:
"""simple docstring"""
if params.n_gpu <= 0:
lowercase_ : int = 0
lowercase_ : Union[str, Any] = -1
lowercase_ : List[str] = True
lowercase_ : Optional[Any] = False
return
assert torch.cuda.is_available()
logger.info('Initializing GPUs' )
if params.n_gpu > 1:
assert params.local_rank != -1
lowercase_ : Dict = int(os.environ['WORLD_SIZE'] )
lowercase_ : Union[str, Any] = int(os.environ['N_GPU_NODE'] )
lowercase_ : Optional[int] = int(os.environ['RANK'] )
# number of nodes / node ID
lowercase_ : int = params.world_size // params.n_gpu_per_node
lowercase_ : str = params.global_rank // params.n_gpu_per_node
lowercase_ : Dict = True
assert params.n_nodes == int(os.environ['N_NODES'] )
assert params.node_id == int(os.environ['NODE_RANK'] )
# local job (single GPU)
else:
assert params.local_rank == -1
lowercase_ : str = 1
lowercase_ : Dict = 0
lowercase_ : Tuple = 0
lowercase_ : List[Any] = 0
lowercase_ : int = 1
lowercase_ : Tuple = 1
lowercase_ : str = False
# sanity checks
assert params.n_nodes >= 1
assert 0 <= params.node_id < params.n_nodes
assert 0 <= params.local_rank <= params.global_rank < params.world_size
assert params.world_size == params.n_nodes * params.n_gpu_per_node
# define whether this is the master process / if we are in multi-node distributed mode
lowercase_ : List[str] = params.node_id == 0 and params.local_rank == 0
lowercase_ : Optional[Any] = params.n_nodes > 1
# summary
lowercase_ : int = f"--- Global rank: {params.global_rank} - "
logger.info(PREFIX + 'Number of nodes: %i' % params.n_nodes )
logger.info(PREFIX + 'Node ID : %i' % params.node_id )
logger.info(PREFIX + 'Local rank : %i' % params.local_rank )
logger.info(PREFIX + 'World size : %i' % params.world_size )
logger.info(PREFIX + 'GPUs per node : %i' % params.n_gpu_per_node )
logger.info(PREFIX + 'Master : %s' % str(params.is_master ) )
logger.info(PREFIX + 'Multi-node : %s' % str(params.multi_node ) )
logger.info(PREFIX + 'Multi-GPU : %s' % str(params.multi_gpu ) )
logger.info(PREFIX + 'Hostname : %s' % socket.gethostname() )
# set GPU device
torch.cuda.set_device(params.local_rank )
# initialize multi-GPU
if params.multi_gpu:
logger.info('Initializing PyTorch distributed' )
torch.distributed.init_process_group(
init_method='env://' , backend='nccl' , )
def _UpperCAmelCase ( a : Dict ) -> Optional[int]:
"""simple docstring"""
np.random.seed(args.seed )
torch.manual_seed(args.seed )
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed )
| 7 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
A: Dict = {'''configuration_mbart''': ['''MBART_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MBartConfig''', '''MBartOnnxConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A: Tuple = ['''MBartTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A: Union[str, Any] = ['''MBartTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A: Dict = [
'''MBART_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MBartForCausalLM''',
'''MBartForConditionalGeneration''',
'''MBartForQuestionAnswering''',
'''MBartForSequenceClassification''',
'''MBartModel''',
'''MBartPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A: int = [
'''TFMBartForConditionalGeneration''',
'''TFMBartModel''',
'''TFMBartPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A: Optional[int] = [
'''FlaxMBartForConditionalGeneration''',
'''FlaxMBartForQuestionAnswering''',
'''FlaxMBartForSequenceClassification''',
'''FlaxMBartModel''',
'''FlaxMBartPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mbart import MBART_PRETRAINED_CONFIG_ARCHIVE_MAP, MBartConfig, MBartOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart import MBartTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart_fast import MBartTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mbart import (
MBART_PRETRAINED_MODEL_ARCHIVE_LIST,
MBartForCausalLM,
MBartForConditionalGeneration,
MBartForQuestionAnswering,
MBartForSequenceClassification,
MBartModel,
MBartPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mbart import TFMBartForConditionalGeneration, TFMBartModel, TFMBartPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mbart import (
FlaxMBartForConditionalGeneration,
FlaxMBartForQuestionAnswering,
FlaxMBartForSequenceClassification,
FlaxMBartModel,
FlaxMBartPreTrainedModel,
)
else:
import sys
A: Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 716 |
'''simple docstring'''
import os
from distutils.util import strtobool
def _UpperCAmelCase ( a : Any , a : int ) -> Any:
"""simple docstring"""
for e in env_keys:
lowercase_ : Optional[Any] = int(os.environ.get(a , -1 ) )
if val >= 0:
return val
return default
def _UpperCAmelCase ( a : List[Any] , a : Dict=False ) -> Optional[Any]:
"""simple docstring"""
lowercase_ : Optional[int] = os.environ.get(a , str(a ) )
return strtobool(a ) == 1 # As its name indicates `strtobool` actually returns an int...
def _UpperCAmelCase ( a : List[Any] , a : Dict="no" ) -> str:
"""simple docstring"""
lowercase_ : List[Any] = os.environ.get(a , str(a ) )
return value
| 7 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A: str = {
"configuration_instructblip": [
"INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"InstructBlipConfig",
"InstructBlipQFormerConfig",
"InstructBlipVisionConfig",
],
"processing_instructblip": ["InstructBlipProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A: Optional[int] = [
"INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"InstructBlipQFormerModel",
"InstructBlipPreTrainedModel",
"InstructBlipForConditionalGeneration",
"InstructBlipVisionModel",
]
if TYPE_CHECKING:
from .configuration_instructblip import (
INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
InstructBlipConfig,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
)
from .processing_instructblip import InstructBlipProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_instructblip import (
INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
InstructBlipForConditionalGeneration,
InstructBlipPreTrainedModel,
InstructBlipQFormerModel,
InstructBlipVisionModel,
)
else:
import sys
A: Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 717 |
'''simple docstring'''
from typing import Dict, Optional
import numpy as np
import datasets
A: int = "\nIoU is the area of overlap between the predicted segmentation and the ground truth divided by the area of union\nbetween the predicted segmentation and the ground truth. For binary (two classes) or multi-class segmentation,\nthe mean IoU of the image is calculated by taking the IoU of each class and averaging them.\n"
A: List[str] = "\nArgs:\n predictions (`List[ndarray]`):\n List of predicted segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n references (`List[ndarray]`):\n List of ground truth segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n num_labels (`int`):\n Number of classes (categories).\n ignore_index (`int`):\n Index that will be ignored during evaluation.\n nan_to_num (`int`, *optional*):\n If specified, NaN values will be replaced by the number defined by the user.\n label_map (`dict`, *optional*):\n If specified, dictionary mapping old label indices to new label indices.\n reduce_labels (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background,\n and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by 255.\n\nReturns:\n `Dict[str, float | ndarray]` comprising various elements:\n - *mean_iou* (`float`):\n Mean Intersection-over-Union (IoU averaged over all categories).\n - *mean_accuracy* (`float`):\n Mean accuracy (averaged over all categories).\n - *overall_accuracy* (`float`):\n Overall accuracy on all images.\n - *per_category_accuracy* (`ndarray` of shape `(num_labels,)`):\n Per category accuracy.\n - *per_category_iou* (`ndarray` of shape `(num_labels,)`):\n Per category IoU.\n\nExamples:\n\n >>> import numpy as np\n\n >>> mean_iou = datasets.load_metric(\"mean_iou\")\n\n >>> # suppose one has 3 different segmentation maps predicted\n >>> predicted_1 = np.array([[1, 2], [3, 4], [5, 255]])\n >>> actual_1 = np.array([[0, 3], [5, 4], [6, 255]])\n\n >>> predicted_2 = np.array([[2, 7], [9, 2], [3, 6]])\n >>> actual_2 = np.array([[1, 7], [9, 2], [3, 6]])\n\n >>> predicted_3 = np.array([[2, 2, 3], [8, 2, 4], [3, 255, 2]])\n >>> actual_3 = np.array([[1, 2, 2], [8, 2, 1], [3, 255, 1]])\n\n >>> predicted = [predicted_1, predicted_2, predicted_3]\n >>> ground_truth = [actual_1, actual_2, actual_3]\n\n >>> results = mean_iou.compute(predictions=predicted, references=ground_truth, num_labels=10, ignore_index=255, reduce_labels=False)\n >>> print(results) # doctest: +NORMALIZE_WHITESPACE\n {'mean_iou': 0.47750000000000004, 'mean_accuracy': 0.5916666666666666, 'overall_accuracy': 0.5263157894736842, 'per_category_iou': array([0. , 0. , 0.375, 0.4 , 0.5 , 0. , 0.5 , 1. , 1. , 1. ]), 'per_category_accuracy': array([0. , 0. , 0.75 , 0.66666667, 1. , 0. , 0.5 , 1. , 1. , 1. ])}\n"
A: Union[str, Any] = "\\n@software{MMSegmentation_Contributors_OpenMMLab_Semantic_Segmentation_2020,\nauthor = {{MMSegmentation Contributors}},\nlicense = {Apache-2.0},\nmonth = {7},\ntitle = {{OpenMMLab Semantic Segmentation Toolbox and Benchmark}},\nurl = {https://github.com/open-mmlab/mmsegmentation},\nyear = {2020}\n}"
def _UpperCAmelCase ( a : str , a : Union[str, Any] , a : Dict , a : bool , a : Optional[Dict[int, int]] = None , a : bool = False , ) -> str:
"""simple docstring"""
if label_map is not None:
for old_id, new_id in label_map.items():
lowercase_ : Union[str, Any] = new_id
# turn into Numpy arrays
lowercase_ : List[Any] = np.array(a )
lowercase_ : Optional[Any] = np.array(a )
if reduce_labels:
lowercase_ : Any = 2_5_5
lowercase_ : Dict = label - 1
lowercase_ : List[Any] = 2_5_5
lowercase_ : Any = label != ignore_index
lowercase_ : List[Any] = np.not_equal(a , a )
lowercase_ : Optional[int] = pred_label[mask]
lowercase_ : Union[str, Any] = np.array(a )[mask]
lowercase_ : Optional[int] = pred_label[pred_label == label]
lowercase_ : Optional[int] = np.histogram(a , bins=a , range=(0, num_labels - 1) )[0]
lowercase_ : Optional[int] = np.histogram(a , bins=a , range=(0, num_labels - 1) )[0]
lowercase_ : Dict = np.histogram(a , bins=a , range=(0, num_labels - 1) )[0]
lowercase_ : Optional[Any] = area_pred_label + area_label - area_intersect
return area_intersect, area_union, area_pred_label, area_label
def _UpperCAmelCase ( a : int , a : Optional[Any] , a : Optional[int] , a : bool , a : Optional[Dict[int, int]] = None , a : bool = False , ) -> Dict:
"""simple docstring"""
lowercase_ : Dict = np.zeros((num_labels,) , dtype=np.floataa )
lowercase_ : List[str] = np.zeros((num_labels,) , dtype=np.floataa )
lowercase_ : List[Any] = np.zeros((num_labels,) , dtype=np.floataa )
lowercase_ : str = np.zeros((num_labels,) , dtype=np.floataa )
for result, gt_seg_map in zip(a , a ):
lowercase_ , lowercase_ , lowercase_ , lowercase_ : Tuple = intersect_and_union(
a , a , a , a , a , a )
total_area_intersect += area_intersect
total_area_union += area_union
total_area_pred_label += area_pred_label
total_area_label += area_label
return total_area_intersect, total_area_union, total_area_pred_label, total_area_label
def _UpperCAmelCase ( a : Optional[Any] , a : List[str] , a : Optional[Any] , a : bool , a : Optional[int] = None , a : Optional[Dict[int, int]] = None , a : bool = False , ) -> Optional[int]:
"""simple docstring"""
lowercase_ , lowercase_ , lowercase_ , lowercase_ : Optional[Any] = total_intersect_and_union(
a , a , a , a , a , a )
# compute metrics
lowercase_ : str = {}
lowercase_ : str = total_area_intersect.sum() / total_area_label.sum()
lowercase_ : Optional[Any] = total_area_intersect / total_area_union
lowercase_ : List[Any] = total_area_intersect / total_area_label
lowercase_ : Any = np.nanmean(a )
lowercase_ : Optional[Any] = np.nanmean(a )
lowercase_ : int = all_acc
lowercase_ : Union[str, Any] = iou
lowercase_ : Optional[Any] = acc
if nan_to_num is not None:
lowercase_ : Optional[int] = {metric: np.nan_to_num(a , nan=a ) for metric, metric_value in metrics.items()}
return metrics
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class __magic_name__ ( datasets.Metric ):
"""simple docstring"""
def lowerCamelCase__ ( self ) -> int:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
# 1st Seq - height dim, 2nd - width dim
{
'predictions': datasets.Sequence(datasets.Sequence(datasets.Value('uint16' ) ) ),
'references': datasets.Sequence(datasets.Sequence(datasets.Value('uint16' ) ) ),
} ) , reference_urls=[
'https://github.com/open-mmlab/mmsegmentation/blob/71c201b1813267d78764f306a297ca717827c4bf/mmseg/core/evaluation/metrics.py'
] , )
def lowerCamelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase = None , _lowercase = None , _lowercase = False , ) -> Tuple:
lowercase_ : Optional[int] = mean_iou(
results=_lowercase , gt_seg_maps=_lowercase , num_labels=_lowercase , ignore_index=_lowercase , nan_to_num=_lowercase , label_map=_lowercase , reduce_labels=_lowercase , )
return iou_result
| 7 | 0 |
'''simple docstring'''
import shutil
import tempfile
import unittest
from unittest.mock import patch
from transformers import (
DefaultFlowCallback,
IntervalStrategy,
PrinterCallback,
ProgressCallback,
Trainer,
TrainerCallback,
TrainingArguments,
is_torch_available,
)
from transformers.testing_utils import require_torch
if is_torch_available():
from transformers.trainer import DEFAULT_CALLBACKS
from .test_trainer import RegressionDataset, RegressionModelConfig, RegressionPreTrainedModel
class __magic_name__ ( __a ):
"""simple docstring"""
def __init__( self ) -> List[Any]:
lowercase_ : Tuple = []
def lowerCamelCase__ ( self , _lowercase , _lowercase , _lowercase , **_lowercase ) -> int:
self.events.append('on_init_end' )
def lowerCamelCase__ ( self , _lowercase , _lowercase , _lowercase , **_lowercase ) -> List[str]:
self.events.append('on_train_begin' )
def lowerCamelCase__ ( self , _lowercase , _lowercase , _lowercase , **_lowercase ) -> Dict:
self.events.append('on_train_end' )
def lowerCamelCase__ ( self , _lowercase , _lowercase , _lowercase , **_lowercase ) -> Tuple:
self.events.append('on_epoch_begin' )
def lowerCamelCase__ ( self , _lowercase , _lowercase , _lowercase , **_lowercase ) -> List[Any]:
self.events.append('on_epoch_end' )
def lowerCamelCase__ ( self , _lowercase , _lowercase , _lowercase , **_lowercase ) -> Optional[int]:
self.events.append('on_step_begin' )
def lowerCamelCase__ ( self , _lowercase , _lowercase , _lowercase , **_lowercase ) -> Any:
self.events.append('on_step_end' )
def lowerCamelCase__ ( self , _lowercase , _lowercase , _lowercase , **_lowercase ) -> List[Any]:
self.events.append('on_evaluate' )
def lowerCamelCase__ ( self , _lowercase , _lowercase , _lowercase , **_lowercase ) -> Tuple:
self.events.append('on_predict' )
def lowerCamelCase__ ( self , _lowercase , _lowercase , _lowercase , **_lowercase ) -> str:
self.events.append('on_save' )
def lowerCamelCase__ ( self , _lowercase , _lowercase , _lowercase , **_lowercase ) -> Optional[Any]:
self.events.append('on_log' )
def lowerCamelCase__ ( self , _lowercase , _lowercase , _lowercase , **_lowercase ) -> List[Any]:
self.events.append('on_prediction_step' )
@require_torch
class __magic_name__ ( unittest.TestCase ):
"""simple docstring"""
def lowerCamelCase__ ( self ) -> Union[str, Any]:
lowercase_ : Union[str, Any] = tempfile.mkdtemp()
def lowerCamelCase__ ( self ) -> Any:
shutil.rmtree(self.output_dir )
def lowerCamelCase__ ( self , _lowercase=0 , _lowercase=0 , _lowercase=64 , _lowercase=64 , _lowercase=None , _lowercase=False , **_lowercase ) -> Union[str, Any]:
lowercase_ : Any = RegressionDataset(length=lowerCAmelCase_ )
lowercase_ : Optional[int] = RegressionDataset(length=lowerCAmelCase_ )
lowercase_ : Any = RegressionModelConfig(a=lowerCAmelCase_ , b=lowerCAmelCase_ )
lowercase_ : Optional[int] = RegressionPreTrainedModel(lowerCAmelCase_ )
lowercase_ : Tuple = TrainingArguments(self.output_dir , disable_tqdm=lowerCAmelCase_ , report_to=[] , **lowerCAmelCase_ )
return Trainer(
lowerCAmelCase_ , lowerCAmelCase_ , train_dataset=lowerCAmelCase_ , eval_dataset=lowerCAmelCase_ , callbacks=lowerCAmelCase_ , )
def lowerCamelCase__ ( self , _lowercase , _lowercase ) -> Optional[Any]:
self.assertEqual(len(lowerCAmelCase_ ) , len(lowerCAmelCase_ ) )
# Order doesn't matter
lowercase_ : Optional[int] = sorted(lowerCAmelCase_ , key=lambda _lowercase : cb.__name__ if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else cb.__class__.__name__ )
lowercase_ : List[str] = sorted(lowerCAmelCase_ , key=lambda _lowercase : cb.__name__ if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else cb.__class__.__name__ )
for cba, cba in zip(lowerCAmelCase_ , lowerCAmelCase_ ):
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ )
elif isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
self.assertEqual(lowerCAmelCase_ , cba.__class__ )
elif not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
self.assertEqual(cba.__class__ , lowerCAmelCase_ )
else:
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ )
def lowerCamelCase__ ( self , _lowercase ) -> Optional[Any]:
lowercase_ : Tuple = ['on_init_end', 'on_train_begin']
lowercase_ : int = 0
lowercase_ : List[str] = len(trainer.get_eval_dataloader() )
lowercase_ : Optional[Any] = ['on_prediction_step'] * len(trainer.get_eval_dataloader() ) + ['on_log', 'on_evaluate']
for _ in range(trainer.state.num_train_epochs ):
expected_events.append('on_epoch_begin' )
for _ in range(lowerCAmelCase_ ):
step += 1
expected_events += ["on_step_begin", "on_step_end"]
if step % trainer.args.logging_steps == 0:
expected_events.append('on_log' )
if trainer.args.evaluation_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0:
expected_events += evaluation_events.copy()
if step % trainer.args.save_steps == 0:
expected_events.append('on_save' )
expected_events.append('on_epoch_end' )
if trainer.args.evaluation_strategy == IntervalStrategy.EPOCH:
expected_events += evaluation_events.copy()
expected_events += ["on_log", "on_train_end"]
return expected_events
def lowerCamelCase__ ( self ) -> List[str]:
lowercase_ : List[Any] = self.get_trainer()
lowercase_ : Tuple = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , lowerCAmelCase_ )
# Callbacks passed at init are added to the default callbacks
lowercase_ : str = self.get_trainer(callbacks=[MyTestTrainerCallback] )
expected_callbacks.append(lowerCAmelCase_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , lowerCAmelCase_ )
# TrainingArguments.disable_tqdm controls if use ProgressCallback or PrinterCallback
lowercase_ : Tuple = self.get_trainer(disable_tqdm=lowerCAmelCase_ )
lowercase_ : List[str] = DEFAULT_CALLBACKS.copy() + [PrinterCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , lowerCAmelCase_ )
def lowerCamelCase__ ( self ) -> List[Any]:
lowercase_ : Optional[Any] = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
lowercase_ : str = self.get_trainer()
# We can add, pop, or remove by class name
trainer.remove_callback(lowerCAmelCase_ )
expected_callbacks.remove(lowerCAmelCase_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , lowerCAmelCase_ )
lowercase_ : Tuple = self.get_trainer()
lowercase_ : Optional[Any] = trainer.pop_callback(lowerCAmelCase_ )
self.assertEqual(cb.__class__ , lowerCAmelCase_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , lowerCAmelCase_ )
trainer.add_callback(lowerCAmelCase_ )
expected_callbacks.insert(0 , lowerCAmelCase_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , lowerCAmelCase_ )
# We can also add, pop, or remove by instance
lowercase_ : List[Any] = self.get_trainer()
lowercase_ : Union[str, Any] = trainer.callback_handler.callbacks[0]
trainer.remove_callback(lowerCAmelCase_ )
expected_callbacks.remove(lowerCAmelCase_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , lowerCAmelCase_ )
lowercase_ : int = self.get_trainer()
lowercase_ : Tuple = trainer.callback_handler.callbacks[0]
lowercase_ : Optional[Any] = trainer.pop_callback(lowerCAmelCase_ )
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , lowerCAmelCase_ )
trainer.add_callback(lowerCAmelCase_ )
expected_callbacks.insert(0 , lowerCAmelCase_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , lowerCAmelCase_ )
def lowerCamelCase__ ( self ) -> Optional[int]:
import warnings
# XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested
warnings.simplefilter(action='ignore' , category=lowerCAmelCase_ )
lowercase_ : Tuple = self.get_trainer(callbacks=[MyTestTrainerCallback] )
trainer.train()
lowercase_ : List[str] = trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowerCAmelCase_ , self.get_expected_events(lowerCAmelCase_ ) )
# Independent log/save/eval
lowercase_ : Optional[Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] , logging_steps=5 )
trainer.train()
lowercase_ : Tuple = trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowerCAmelCase_ , self.get_expected_events(lowerCAmelCase_ ) )
lowercase_ : List[Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] , save_steps=5 )
trainer.train()
lowercase_ : Union[str, Any] = trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowerCAmelCase_ , self.get_expected_events(lowerCAmelCase_ ) )
lowercase_ : str = self.get_trainer(callbacks=[MyTestTrainerCallback] , eval_steps=5 , evaluation_strategy='steps' )
trainer.train()
lowercase_ : Union[str, Any] = trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowerCAmelCase_ , self.get_expected_events(lowerCAmelCase_ ) )
lowercase_ : Dict = self.get_trainer(callbacks=[MyTestTrainerCallback] , evaluation_strategy='epoch' )
trainer.train()
lowercase_ : List[str] = trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowerCAmelCase_ , self.get_expected_events(lowerCAmelCase_ ) )
# A bit of everything
lowercase_ : Optional[Any] = self.get_trainer(
callbacks=[MyTestTrainerCallback] , logging_steps=3 , save_steps=10 , eval_steps=5 , evaluation_strategy='steps' , )
trainer.train()
lowercase_ : List[Any] = trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowerCAmelCase_ , self.get_expected_events(lowerCAmelCase_ ) )
# warning should be emitted for duplicated callbacks
with patch('transformers.trainer_callback.logger.warning' ) as warn_mock:
lowercase_ : Dict = self.get_trainer(
callbacks=[MyTestTrainerCallback, MyTestTrainerCallback] , )
assert str(lowerCAmelCase_ ) in warn_mock.call_args[0][0]
| 718 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A: Dict = logging.get_logger(__name__)
A: Optional[Any] = {
"google/vit-base-patch16-224": "https://huggingface.co/vit-base-patch16-224/resolve/main/config.json",
# See all ViT models at https://huggingface.co/models?filter=vit
}
class __magic_name__ ( UpperCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = 'vit'
def __init__( self , _lowercase=768 , _lowercase=12 , _lowercase=12 , _lowercase=3072 , _lowercase="gelu" , _lowercase=0.0 , _lowercase=0.0 , _lowercase=0.02 , _lowercase=1E-1_2 , _lowercase=224 , _lowercase=16 , _lowercase=3 , _lowercase=True , _lowercase=16 , **_lowercase , ) -> List[str]:
super().__init__(**_lowercase )
lowercase_ : Optional[int] = hidden_size
lowercase_ : str = num_hidden_layers
lowercase_ : str = num_attention_heads
lowercase_ : int = intermediate_size
lowercase_ : List[Any] = hidden_act
lowercase_ : Any = hidden_dropout_prob
lowercase_ : List[str] = attention_probs_dropout_prob
lowercase_ : str = initializer_range
lowercase_ : List[str] = layer_norm_eps
lowercase_ : Any = image_size
lowercase_ : Tuple = patch_size
lowercase_ : Optional[Any] = num_channels
lowercase_ : str = qkv_bias
lowercase_ : List[str] = encoder_stride
class __magic_name__ ( UpperCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = version.parse('1.11' )
@property
def lowerCamelCase__ ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def lowerCamelCase__ ( self ) -> float:
return 1E-4
| 7 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A: Optional[Any] = logging.get_logger(__name__)
A: Any = {
'microsoft/trocr-base-handwritten': (
'https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json'
),
# See all TrOCR models at https://huggingface.co/models?filter=trocr
}
class __magic_name__ ( __lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = '''trocr'''
SCREAMING_SNAKE_CASE_ : int = ['''past_key_values''']
SCREAMING_SNAKE_CASE_ : Optional[Any] = {
'''num_attention_heads''': '''decoder_attention_heads''',
'''hidden_size''': '''d_model''',
'''num_hidden_layers''': '''decoder_layers''',
}
def __init__( self , _lowercase=5_0265 , _lowercase=1024 , _lowercase=12 , _lowercase=16 , _lowercase=4096 , _lowercase="gelu" , _lowercase=512 , _lowercase=0.1 , _lowercase=0.0 , _lowercase=0.0 , _lowercase=2 , _lowercase=0.02 , _lowercase=0.0 , _lowercase=True , _lowercase=False , _lowercase=True , _lowercase=True , _lowercase=1 , _lowercase=0 , _lowercase=2 , **_lowercase , ) -> Any:
lowercase_ : Optional[int] = vocab_size
lowercase_ : int = d_model
lowercase_ : Tuple = decoder_layers
lowercase_ : Optional[int] = decoder_attention_heads
lowercase_ : Tuple = decoder_ffn_dim
lowercase_ : List[str] = activation_function
lowercase_ : List[str] = max_position_embeddings
lowercase_ : Optional[Any] = dropout
lowercase_ : Dict = attention_dropout
lowercase_ : Dict = activation_dropout
lowercase_ : Any = init_std
lowercase_ : List[Any] = decoder_layerdrop
lowercase_ : Optional[int] = use_cache
lowercase_ : Tuple = scale_embedding
lowercase_ : int = use_learned_position_embeddings
lowercase_ : str = layernorm_embedding
super().__init__(
pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , decoder_start_token_id=__a , **__a , )
| 719 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A: int = logging.get_logger(__name__)
A: int = {
"bigcode/gpt_bigcode-santacoder": "https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json",
}
class __magic_name__ ( UpperCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = 'gpt_bigcode'
SCREAMING_SNAKE_CASE_ : int = ['past_key_values']
SCREAMING_SNAKE_CASE_ : Any = {
'hidden_size': 'n_embd',
'max_position_embeddings': 'n_positions',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self , _lowercase=5_0257 , _lowercase=1024 , _lowercase=768 , _lowercase=12 , _lowercase=12 , _lowercase=None , _lowercase="gelu_pytorch_tanh" , _lowercase=0.1 , _lowercase=0.1 , _lowercase=0.1 , _lowercase=1E-5 , _lowercase=0.02 , _lowercase=True , _lowercase=True , _lowercase=5_0256 , _lowercase=5_0256 , _lowercase=True , _lowercase=True , _lowercase=True , **_lowercase , ) -> Any:
lowercase_ : Tuple = vocab_size
lowercase_ : str = n_positions
lowercase_ : List[str] = n_embd
lowercase_ : str = n_layer
lowercase_ : Optional[Any] = n_head
lowercase_ : Optional[int] = n_inner
lowercase_ : Union[str, Any] = activation_function
lowercase_ : Dict = resid_pdrop
lowercase_ : str = embd_pdrop
lowercase_ : Optional[Any] = attn_pdrop
lowercase_ : List[Any] = layer_norm_epsilon
lowercase_ : Optional[int] = initializer_range
lowercase_ : List[Any] = scale_attn_weights
lowercase_ : Any = use_cache
lowercase_ : List[str] = attention_softmax_in_fpaa
lowercase_ : Any = scale_attention_softmax_in_fpaa
lowercase_ : Optional[Any] = multi_query
lowercase_ : Optional[Any] = bos_token_id
lowercase_ : Optional[Any] = eos_token_id
super().__init__(bos_token_id=_lowercase , eos_token_id=_lowercase , **_lowercase )
| 7 | 0 |
'''simple docstring'''
from __future__ import annotations
import os
import tempfile
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import is_tensorflow_text_available, is_tf_available
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
from ..test_modeling_tf_common import floats_tensor
from .test_framework_agnostic import GenerationIntegrationTestsMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
AutoTokenizer,
TFAutoModelForCausalLM,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSpeechSeqaSeq,
TFAutoModelForVisionaSeq,
TFBartForConditionalGeneration,
TFLogitsProcessorList,
TFMinLengthLogitsProcessor,
tf_top_k_top_p_filtering,
)
if is_tensorflow_text_available():
import tensorflow_text as text
@require_tf
class __magic_name__ ( unittest.TestCase ):
"""simple docstring"""
def lowerCamelCase__ ( self ) -> List[str]:
lowercase_ : Optional[int] = tf.convert_to_tensor(
[
[
8.2_22_09_91, # 3rd highest value; idx. 0
-0.5_62_00_44,
5.23_22_97_52,
4.0_38_63_93,
-6.8_79_83_78,
-0.54_78_58_02,
-3.2_01_21_53,
2.92_77_71_76,
1.88_17_19_53,
7.35_34_12_76, # 5th highest value; idx. 9
8.43_20_78_33, # 2nd highest value; idx. 10
-9.85_71_18_36,
-5.96_20_92_36,
-1.13_03_91_61,
-7.1_11_52_94,
-0.8_36_96_33,
-5.3_18_64_08,
7.06_42_74_07,
0.81_36_93_44,
-0.82_02_38_17,
-5.9_17_97_96,
0.58_81_34_43,
-6.99_77_84_38,
4.71_55_11_89,
-0.18_77_16_37,
7.44_02_07_59, # 4th highest value; idx. 25
9.38_45_09_87, # 1st highest value; idx. 26
2.12_66_29_41,
-9.32_56_20_38,
2.35_65_25_22,
], # cummulative prob of 5 highest values <= 0.6
[
0.58_42_55_18,
4.53_13_92_38,
-5.57_51_04_64,
-6.28_03_06_99,
-7.19_52_95_03,
-4.02_12_25_51,
1.39_33_70_37,
-6.06_70_70_57,
1.59_48_05_17,
-9.64_31_19,
0.03_90_77_99,
0.67_23_17_62,
-8.88_20_67_26,
6.27_11_59_22, # 4th highest value; idx. 13
2.28_52_07_23,
4.82_76_75_06,
4.30_42_13_68,
8.8_27_53_13, # 2nd highest value; idx. 17
5.44_02_99_58, # 5th highest value; idx. 18
-4.4_73_57_94,
7.38_57_95_36, # 3rd highest value; idx. 20
-2.91_05_16_63,
2.61_94_60_77,
-2.5_67_47_62,
-9.48_95_93_02,
-4.02_92_26_45,
-1.35_41_69_18,
9.67_70_23_23, # 1st highest value; idx. 27
-5.89_47_85_53,
1.85_37_04_67,
], # cummulative prob of 5 highest values <= 0.6
] , dtype=tf.floataa , )
lowercase_ : Optional[Any] = tf.convert_to_tensor(
[[0, 0], [0, 9], [0, 10], [0, 25], [0, 26], [1, 13], [1, 17], [1, 18], [1, 20], [1, 27]] , dtype=tf.intaa , ) # expected non filtered idx as noted above
lowercase_ : List[Any] = tf.convert_to_tensor(
[8.22_20_99, 7.3_53_41_26, 8.43_20_78, 7.4_40_20_75, 9.3_84_51, 6.27_11_59, 8.82_75_31, 5.4_40_29_95, 7.3_85_79_56, 9.67_70_23] , dtype=tf.floataa , ) # expected non filtered values as noted above
lowercase_ : Union[str, Any] = tf_top_k_top_p_filtering(UpperCAmelCase__ , top_k=10 , top_p=0.6 , min_tokens_to_keep=4 )
lowercase_ : int = output[output != -float('inf' )]
lowercase_ : Dict = tf.cast(
tf.where(tf.not_equal(UpperCAmelCase__ , tf.constant(-float('inf' ) , dtype=tf.floataa ) ) ) , dtype=tf.intaa , )
tf.debugging.assert_near(UpperCAmelCase__ , UpperCAmelCase__ , rtol=1E-1_2 )
tf.debugging.assert_equal(UpperCAmelCase__ , UpperCAmelCase__ )
@require_tf
class __magic_name__ ( unittest.TestCase, UpperCAmelCase_ ):
"""simple docstring"""
if is_tf_available():
SCREAMING_SNAKE_CASE_ : int = {
'AutoModelForCausalLM': TFAutoModelForCausalLM,
'AutoModelForSpeechSeq2Seq': TFAutoModelForSpeechSeqaSeq,
'AutoModelForSeq2SeqLM': TFAutoModelForSeqaSeqLM,
'AutoModelForVision2Seq': TFAutoModelForVisionaSeq,
'LogitsProcessorList': TFLogitsProcessorList,
'MinLengthLogitsProcessor': TFMinLengthLogitsProcessor,
'create_tensor_fn': tf.convert_to_tensor,
'floats_tensor': floats_tensor,
'return_tensors': 'tf',
}
@slow
def lowerCamelCase__ ( self ) -> Optional[Any]:
# TF-only test: tf.saved_model export
lowercase_ : Tuple = TFAutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
lowercase_ : Union[str, Any] = 2
lowercase_ : List[Any] = 2
class __magic_name__ ( tf.Module ):
"""simple docstring"""
def __init__( self , _lowercase ) -> Dict:
super(UpperCAmelCase__ , self ).__init__()
lowercase_ : Any = model
@tf.function(
input_signature=(
tf.TensorSpec((None, input_length) , tf.intaa , name='input_ids' ),
tf.TensorSpec((None, input_length) , tf.intaa , name='attention_mask' ),
) , jit_compile=UpperCAmelCase__ , )
def lowerCamelCase__ ( self , _lowercase , _lowercase ) -> int:
lowercase_ : Dict = self.model.generate(
input_ids=UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , max_new_tokens=UpperCAmelCase__ , return_dict_in_generate=UpperCAmelCase__ , )
return {"sequences": outputs["sequences"]}
lowercase_ : Optional[int] = [[2, 0], [102, 103]]
lowercase_ : Dict = [[1, 0], [1, 1]]
lowercase_ : Tuple = DummyModel(model=UpperCAmelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(UpperCAmelCase__ , UpperCAmelCase__ , signatures={'serving_default': dummy_model.serving} )
lowercase_ : Dict = tf.saved_model.load(UpperCAmelCase__ ).signatures['serving_default']
for batch_size in range(1 , len(UpperCAmelCase__ ) + 1 ):
lowercase_ : Optional[int] = {
'input_ids': tf.constant(dummy_input_ids[:batch_size] ),
'attention_mask': tf.constant(dummy_attention_masks[:batch_size] ),
}
lowercase_ : Tuple = serving_func(**UpperCAmelCase__ )['sequences']
lowercase_ : Dict = test_model.generate(**UpperCAmelCase__ , max_new_tokens=UpperCAmelCase__ )
tf.debugging.assert_equal(UpperCAmelCase__ , UpperCAmelCase__ )
@slow
def lowerCamelCase__ ( self ) -> Optional[int]:
# TF-only test: tf.saved_model export
lowercase_ : str = TFAutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
lowercase_ : List[Any] = 1
lowercase_ : Any = 2
class __magic_name__ ( tf.Module ):
"""simple docstring"""
def __init__( self , _lowercase ) -> List[str]:
super(UpperCAmelCase__ , self ).__init__()
lowercase_ : Dict = model
@tf.function(
input_signature=(
tf.TensorSpec((batch_size, None) , tf.intaa , name='input_ids' ),
tf.TensorSpec((batch_size, None) , tf.intaa , name='attention_mask' ),
) , jit_compile=UpperCAmelCase__ , )
def lowerCamelCase__ ( self , _lowercase , _lowercase ) -> int:
lowercase_ : Optional[Any] = self.model.generate(
input_ids=UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , max_new_tokens=UpperCAmelCase__ , return_dict_in_generate=UpperCAmelCase__ , )
return {"sequences": outputs["sequences"]}
lowercase_ : Tuple = [[2], [102, 103]]
lowercase_ : Dict = [[1], [1, 1]]
lowercase_ : List[str] = DummyModel(model=UpperCAmelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(UpperCAmelCase__ , UpperCAmelCase__ , signatures={'serving_default': dummy_model.serving} )
lowercase_ : List[str] = tf.saved_model.load(UpperCAmelCase__ ).signatures['serving_default']
for input_row in range(len(UpperCAmelCase__ ) ):
lowercase_ : Optional[int] = {
'input_ids': tf.constant([dummy_input_ids[input_row]] ),
'attention_mask': tf.constant([dummy_attention_masks[input_row]] ),
}
lowercase_ : Optional[Any] = serving_func(**UpperCAmelCase__ )['sequences']
lowercase_ : int = test_model.generate(**UpperCAmelCase__ , max_new_tokens=UpperCAmelCase__ )
tf.debugging.assert_equal(UpperCAmelCase__ , UpperCAmelCase__ )
@slow
@require_tensorflow_text
def lowerCamelCase__ ( self ) -> int:
# TF-only test: tf.saved_model export
with tempfile.TemporaryDirectory() as tmp_dir:
# file needed to load the TF tokenizer
hf_hub_download(repo_id='google/flan-t5-small' , filename='spiece.model' , local_dir=UpperCAmelCase__ )
class __magic_name__ ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self ) -> Any:
super().__init__()
lowercase_ : Optional[Any] = text.SentencepieceTokenizer(
model=tf.io.gfile.GFile(os.path.join(UpperCAmelCase__ , 'spiece.model' ) , 'rb' ).read() )
lowercase_ : Optional[Any] = TFAutoModelForSeqaSeqLM.from_pretrained('hf-internal-testing/tiny-random-t5' )
def lowerCamelCase__ ( self , _lowercase , *_lowercase , **_lowercase ) -> List[str]:
lowercase_ : int = self.tokenizer.tokenize(UpperCAmelCase__ )
lowercase_ , lowercase_ : List[str] = text.pad_model_inputs(
UpperCAmelCase__ , max_seq_length=64 , pad_value=self.model.config.pad_token_id )
lowercase_ : Dict = self.model.generate(input_ids=UpperCAmelCase__ , attention_mask=UpperCAmelCase__ )
return self.tokenizer.detokenize(UpperCAmelCase__ )
lowercase_ : Any = CompleteSentenceTransformer()
lowercase_ : Any = tf.keras.layers.Input(shape=(1,) , dtype=tf.string , name='inputs' )
lowercase_ : str = complete_model(UpperCAmelCase__ )
lowercase_ : Tuple = tf.keras.Model(UpperCAmelCase__ , UpperCAmelCase__ )
keras_model.save(UpperCAmelCase__ )
def lowerCamelCase__ ( self ) -> Tuple:
# Has PT equivalent: this test relies on random sampling
lowercase_ : Any = {
'do_sample': True,
'num_beams': 1,
'top_p': 0.7,
'top_k': 10,
'temperature': 0.7,
}
lowercase_ : int = 14
lowercase_ : Optional[Any] = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
lowercase_ : Tuple = 'Hello, my dog is cute and'
lowercase_ : Tuple = tokenizer(UpperCAmelCase__ , return_tensors='tf' )
lowercase_ : Tuple = TFAutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
lowercase_ : Tuple = 638
# forces the generation to happen on CPU, to avoid GPU-related quirks
with tf.device(':/CPU:0' ):
tf.random.set_seed(0 )
lowercase_ : List[Any] = model.generate(**UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , **UpperCAmelCase__ )
self.assertTrue(expectation == len(generated_tokens[0] ) )
lowercase_ : Optional[Any] = [638, 198]
with tf.device(':/CPU:0' ):
tf.random.set_seed(0 )
lowercase_ : Any = model.generate(**UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , **UpperCAmelCase__ )
self.assertTrue(expectation == len(generated_tokens[0] ) )
def lowerCamelCase__ ( self ) -> List[str]:
# Has PT equivalent: ample use of framework-specific code
lowercase_ : str = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-bart' )
lowercase_ : Optional[Any] = 'Hugging Face is a technology company based in New York and Paris.'
lowercase_ : List[str] = bart_tokenizer(UpperCAmelCase__ , return_tensors='tf' ).input_ids
lowercase_ : Tuple = TFBartForConditionalGeneration.from_pretrained('hf-internal-testing/tiny-random-bart' )
lowercase_ : Optional[int] = bart_model.generate(UpperCAmelCase__ ).numpy()
class __magic_name__ ( UpperCAmelCase_ ):
"""simple docstring"""
def lowerCamelCase__ ( self , _lowercase , _lowercase=None , **_lowercase ) -> List[str]:
return super().call(UpperCAmelCase__ , **UpperCAmelCase__ )
lowercase_ : Tuple = FakeBart.from_pretrained('hf-internal-testing/tiny-random-bart' )
lowercase_ : Optional[int] = bart_model.generate(UpperCAmelCase__ , foo='bar' ).numpy()
self.assertTrue(np.array_equal(UpperCAmelCase__ , UpperCAmelCase__ ) )
class __magic_name__ ( bart_model.model.encoder.__class__ ):
"""simple docstring"""
def lowerCamelCase__ ( self , _lowercase , **_lowercase ) -> Any:
return super().call(UpperCAmelCase__ , **UpperCAmelCase__ )
lowercase_ : Tuple = FakeEncoder(bart_model.config , bart_model.model.shared )
lowercase_ : List[Any] = fake_encoder
# Normal generation still works (the output will be different because the encoder weights are different)
lowercase_ : List[str] = bart_model.generate(UpperCAmelCase__ ).numpy()
with self.assertRaises(UpperCAmelCase__ ):
# FakeEncoder.call() accepts **kwargs -> no filtering -> value error due to unexpected input "foo"
bart_model.generate(UpperCAmelCase__ , foo='bar' )
| 720 |
'''simple docstring'''
import unittest
from diffusers.pipelines.pipeline_utils import is_safetensors_compatible
class __magic_name__ ( unittest.TestCase ):
"""simple docstring"""
def lowerCamelCase__ ( self ) -> Optional[Any]:
lowercase_ : Tuple = [
'safety_checker/pytorch_model.bin',
'safety_checker/model.safetensors',
'vae/diffusion_pytorch_model.bin',
'vae/diffusion_pytorch_model.safetensors',
'text_encoder/pytorch_model.bin',
'text_encoder/model.safetensors',
'unet/diffusion_pytorch_model.bin',
'unet/diffusion_pytorch_model.safetensors',
]
self.assertTrue(is_safetensors_compatible(_lowercase ) )
def lowerCamelCase__ ( self ) -> Dict:
lowercase_ : Tuple = [
'unet/diffusion_pytorch_model.bin',
'unet/diffusion_pytorch_model.safetensors',
]
self.assertTrue(is_safetensors_compatible(_lowercase ) )
def lowerCamelCase__ ( self ) -> str:
lowercase_ : int = [
'safety_checker/pytorch_model.bin',
'safety_checker/model.safetensors',
'vae/diffusion_pytorch_model.bin',
'vae/diffusion_pytorch_model.safetensors',
'text_encoder/pytorch_model.bin',
'text_encoder/model.safetensors',
'unet/diffusion_pytorch_model.bin',
# Removed: 'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(_lowercase ) )
def lowerCamelCase__ ( self ) -> Any:
lowercase_ : Dict = [
'text_encoder/pytorch_model.bin',
'text_encoder/model.safetensors',
]
self.assertTrue(is_safetensors_compatible(_lowercase ) )
def lowerCamelCase__ ( self ) -> Optional[Any]:
lowercase_ : List[Any] = [
'safety_checker/pytorch_model.bin',
'safety_checker/model.safetensors',
'vae/diffusion_pytorch_model.bin',
'vae/diffusion_pytorch_model.safetensors',
'text_encoder/pytorch_model.bin',
# Removed: 'text_encoder/model.safetensors',
'unet/diffusion_pytorch_model.bin',
'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(_lowercase ) )
def lowerCamelCase__ ( self ) -> Optional[int]:
lowercase_ : str = [
'safety_checker/pytorch_model.fp16.bin',
'safety_checker/model.fp16.safetensors',
'vae/diffusion_pytorch_model.fp16.bin',
'vae/diffusion_pytorch_model.fp16.safetensors',
'text_encoder/pytorch_model.fp16.bin',
'text_encoder/model.fp16.safetensors',
'unet/diffusion_pytorch_model.fp16.bin',
'unet/diffusion_pytorch_model.fp16.safetensors',
]
lowercase_ : Union[str, Any] = 'fp16'
self.assertTrue(is_safetensors_compatible(_lowercase , variant=_lowercase ) )
def lowerCamelCase__ ( self ) -> int:
lowercase_ : Optional[int] = [
'unet/diffusion_pytorch_model.fp16.bin',
'unet/diffusion_pytorch_model.fp16.safetensors',
]
lowercase_ : Dict = 'fp16'
self.assertTrue(is_safetensors_compatible(_lowercase , variant=_lowercase ) )
def lowerCamelCase__ ( self ) -> int:
# pass variant but use the non-variant filenames
lowercase_ : Optional[int] = [
'unet/diffusion_pytorch_model.bin',
'unet/diffusion_pytorch_model.safetensors',
]
lowercase_ : Dict = 'fp16'
self.assertTrue(is_safetensors_compatible(_lowercase , variant=_lowercase ) )
def lowerCamelCase__ ( self ) -> Union[str, Any]:
lowercase_ : int = [
'safety_checker/pytorch_model.fp16.bin',
'safety_checker/model.fp16.safetensors',
'vae/diffusion_pytorch_model.fp16.bin',
'vae/diffusion_pytorch_model.fp16.safetensors',
'text_encoder/pytorch_model.fp16.bin',
'text_encoder/model.fp16.safetensors',
'unet/diffusion_pytorch_model.fp16.bin',
# Removed: 'unet/diffusion_pytorch_model.fp16.safetensors',
]
lowercase_ : Dict = 'fp16'
self.assertFalse(is_safetensors_compatible(_lowercase , variant=_lowercase ) )
def lowerCamelCase__ ( self ) -> int:
lowercase_ : str = [
'text_encoder/pytorch_model.fp16.bin',
'text_encoder/model.fp16.safetensors',
]
lowercase_ : str = 'fp16'
self.assertTrue(is_safetensors_compatible(_lowercase , variant=_lowercase ) )
def lowerCamelCase__ ( self ) -> List[str]:
# pass variant but use the non-variant filenames
lowercase_ : List[Any] = [
'text_encoder/pytorch_model.bin',
'text_encoder/model.safetensors',
]
lowercase_ : Dict = 'fp16'
self.assertTrue(is_safetensors_compatible(_lowercase , variant=_lowercase ) )
def lowerCamelCase__ ( self ) -> List[str]:
lowercase_ : Union[str, Any] = [
'safety_checker/pytorch_model.fp16.bin',
'safety_checker/model.fp16.safetensors',
'vae/diffusion_pytorch_model.fp16.bin',
'vae/diffusion_pytorch_model.fp16.safetensors',
'text_encoder/pytorch_model.fp16.bin',
# 'text_encoder/model.fp16.safetensors',
'unet/diffusion_pytorch_model.fp16.bin',
'unet/diffusion_pytorch_model.fp16.safetensors',
]
lowercase_ : Dict = 'fp16'
self.assertFalse(is_safetensors_compatible(_lowercase , variant=_lowercase ) )
| 7 | 0 |
'''simple docstring'''
from manim import *
class __magic_name__ ( __a ):
"""simple docstring"""
def lowerCamelCase__ ( self ) -> str:
lowercase_ : Any = Rectangle(height=0.5 , width=0.5 )
lowercase_ : Any = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
lowercase_ : Union[str, Any] = [mem.copy() for i in range(6 )]
lowercase_ : Any = [mem.copy() for i in range(6 )]
lowercase_ : Optional[Any] = VGroup(*A__ ).arrange(A__ , buff=0 )
lowercase_ : Tuple = VGroup(*A__ ).arrange(A__ , buff=0 )
lowercase_ : Union[str, Any] = VGroup(A__ , A__ ).arrange(A__ , buff=0 )
lowercase_ : Dict = Text('CPU' , font_size=24 )
lowercase_ : str = Group(A__ , A__ ).arrange(A__ , buff=0.5 , aligned_edge=A__ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(A__ )
lowercase_ : int = [mem.copy() for i in range(1 )]
lowercase_ : Dict = VGroup(*A__ ).arrange(A__ , buff=0 )
lowercase_ : str = Text('GPU' , font_size=24 )
lowercase_ : Union[str, Any] = Group(A__ , A__ ).arrange(A__ , buff=0.5 , aligned_edge=A__ )
gpu.align_to(A__ , A__ )
gpu.set_x(gpu.get_x() - 1 )
self.add(A__ )
lowercase_ : Optional[Any] = [mem.copy() for i in range(6 )]
lowercase_ : int = VGroup(*A__ ).arrange(A__ , buff=0 )
lowercase_ : Any = Text('Model' , font_size=24 )
lowercase_ : Optional[int] = Group(A__ , A__ ).arrange(A__ , buff=0.5 , aligned_edge=A__ )
model.move_to([3, -1.0, 0] )
self.play(
Create(A__ , run_time=1 ) , Create(A__ , run_time=1 ) , Create(A__ , run_time=1 ) , )
lowercase_ : List[str] = MarkupText(
f"First, an empty model skeleton is loaded\ninto <span fgcolor='{YELLOW}'>memory</span> without using much RAM." , font_size=24 , )
lowercase_ : Optional[Any] = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
lowercase_ : List[Any] = MarkupText(
f"<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
step_a.move_to([2, 2, 0] )
self.play(Write(A__ , run_time=2.5 ) , Write(A__ ) , Write(A__ ) )
self.add(A__ )
lowercase_ : int = []
lowercase_ : str = []
lowercase_ : Union[str, Any] = []
for i, rect in enumerate(A__ ):
lowercase_ : Optional[Any] = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(A__ , opacity=0.7 )
cpu_target.move_to(A__ )
cpu_target.generate_target()
lowercase_ : Tuple = 0.46 / 4
lowercase_ : Any = 0.46 / 3
if i == 0:
cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=A__ )
cpu_target.target.set_x(cpu_target.target.get_x() + 0.1 )
elif i == 3:
cpu_target.target.next_to(cpu_targs[0].target , direction=A__ , buff=0.0 )
else:
cpu_target.target.next_to(cpu_targs[i - 1].target , direction=A__ , buff=0.0 )
cpu_targs.append(A__ )
first_animations.append(rect.animate(run_time=0.5 ).set_stroke(A__ ) )
second_animations.append(MoveToTarget(A__ , run_time=1.5 ) )
self.play(*A__ )
self.play(*A__ )
self.wait()
| 721 |
'''simple docstring'''
import argparse
A: List[Any] = "docs/source/_static/js/custom.js"
def _UpperCAmelCase ( a : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
with open(a , encoding='utf-8' , newline='\n' ) as f:
lowercase_ : List[Any] = f.readlines()
lowercase_ : Dict = 0
# First let's put the right version
while not lines[index].startswith('const stableVersion =' ):
index += 1
lowercase_ : Dict = f"const stableVersion = \"v{version}\"\n"
# Then update the dictionary
while not lines[index].startswith('const versionMapping = {' ):
index += 1
# We go until the end
while not lines[index].startswith('}' ):
index += 1
# We add the new version at the end
lines[index - 1] += f" \"v{version}\": \"v{version}\",\n"
with open(a , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.writelines(a )
if __name__ == "__main__":
A: str = argparse.ArgumentParser()
parser.add_argument("--version", help="Release version.")
A: List[str] = parser.parse_args()
update_custom_js(args.version)
| 7 | 0 |
'''simple docstring'''
import random
from typing import Any
def _UpperCAmelCase ( a : Optional[int] ) -> list[Any]:
"""simple docstring"""
for _ in range(len(_SCREAMING_SNAKE_CASE ) ):
lowercase_ : Union[str, Any] = random.randint(0 , len(_SCREAMING_SNAKE_CASE ) - 1 )
lowercase_ : List[Any] = random.randint(0 , len(_SCREAMING_SNAKE_CASE ) - 1 )
lowercase_ , lowercase_ : Optional[int] = data[b], data[a]
return data
if __name__ == "__main__":
A: Tuple = [0, 1, 2, 3, 4, 5, 6, 7]
A: List[str] = ["python", "says", "hello", "!"]
print("Fisher-Yates Shuffle:")
print("List", integers, strings)
print("FY Shuffle", fisher_yates_shuffle(integers), fisher_yates_shuffle(strings))
| 700 |
'''simple docstring'''
def _UpperCAmelCase ( a : list[list[float]] ) -> list[list[float]]:
"""simple docstring"""
lowercase_ : list[list[float]] = []
for data in source_data:
for i, el in enumerate(a ):
if len(a ) < i + 1:
data_lists.append([] )
data_lists[i].append(float(a ) )
return data_lists
def _UpperCAmelCase ( a : list[list[float]] , a : list[int] ) -> list[list[float]]:
"""simple docstring"""
lowercase_ : list[list[float]] = []
for dlist, weight in zip(a , a ):
lowercase_ : Tuple = min(a )
lowercase_ : Any = max(a )
lowercase_ : list[float] = []
# for weight 0 score is 1 - actual score
if weight == 0:
for item in dlist:
try:
score.append(1 - ((item - mind) / (maxd - mind)) )
except ZeroDivisionError:
score.append(1 )
elif weight == 1:
for item in dlist:
try:
score.append((item - mind) / (maxd - mind) )
except ZeroDivisionError:
score.append(0 )
# weight not 0 or 1
else:
lowercase_ : str = f"Invalid weight of {weight:f} provided"
raise ValueError(a )
score_lists.append(a )
return score_lists
def _UpperCAmelCase ( a : list[list[float]] ) -> list[float]:
"""simple docstring"""
lowercase_ : list[float] = [0 for i in range(len(score_lists[0] ) )]
for slist in score_lists:
for j, ele in enumerate(a ):
lowercase_ : List[Any] = final_scores[j] + ele
return final_scores
def _UpperCAmelCase ( a : list[list[float]] , a : list[int] ) -> list[list[float]]:
"""simple docstring"""
lowercase_ : int = get_data(a )
lowercase_ : Optional[int] = calculate_each_score(a , a )
lowercase_ : Dict = generate_final_scores(a )
# append scores to source data
for i, ele in enumerate(a ):
source_data[i].append(a )
return source_data
| 7 | 0 |
'''simple docstring'''
import argparse
import torch
from transformers import LxmertConfig, LxmertForPreTraining, load_tf_weights_in_lxmert
from transformers.utils import logging
logging.set_verbosity_info()
def _UpperCAmelCase ( a : int , a : Optional[Any] , a : Optional[Any] ) -> List[Any]:
"""simple docstring"""
lowercase_ : Optional[Any] = LxmertConfig.from_json_file(lowercase__ )
print(f"Building PyTorch model from configuration: {config}" )
lowercase_ : Union[str, Any] = LxmertForPreTraining(lowercase__ )
# Load weights from tf checkpoint
load_tf_weights_in_lxmert(lowercase__ , lowercase__ , lowercase__ )
# Save pytorch-model
print(f"Save PyTorch model to {pytorch_dump_path}" )
torch.save(model.state_dict() , lowercase__ )
if __name__ == "__main__":
A: List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
A: Any = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 701 |
'''simple docstring'''
def _UpperCAmelCase ( a : int , a : int ) -> int:
"""simple docstring"""
while second != 0:
lowercase_ : Any = first & second
first ^= second
lowercase_ : List[str] = c << 1
return first
if __name__ == "__main__":
import doctest
doctest.testmod()
A: Union[str, Any] = int(input("Enter the first number: ").strip())
A: Union[str, Any] = int(input("Enter the second number: ").strip())
print(f"""{add(first, second) = }""")
| 7 | 0 |
'''simple docstring'''
import json
import os
from collections import Counter
import torch
import torchvision
import torchvision.transforms as transforms
from PIL import Image
from torch import nn
from torch.utils.data import Dataset
A: Union[str, Any] = {1: (1, 1), 2: (2, 1), 3: (3, 1), 4: (2, 2), 5: (5, 1), 6: (3, 2), 7: (7, 1), 8: (4, 2), 9: (3, 3)}
class __magic_name__ ( nn.Module ):
"""simple docstring"""
def __init__( self , _lowercase ) -> str:
super().__init__()
lowercase_ : Dict = torchvision.models.resnetaaa(pretrained=lowercase__ )
lowercase_ : Optional[Any] = list(model.children() )[:-2]
lowercase_ : Union[str, Any] = nn.Sequential(*lowercase__ )
lowercase_ : str = nn.AdaptiveAvgPoolad(POOLING_BREAKDOWN[args.num_image_embeds] )
def lowerCamelCase__ ( self , _lowercase ) -> str:
lowercase_ : Tuple = self.pool(self.model(lowercase__ ) )
lowercase_ : Union[str, Any] = torch.flatten(lowercase__ , start_dim=2 )
lowercase_ : List[Any] = out.transpose(1 , 2 ).contiguous()
return out # BxNx2048
class __magic_name__ ( a__ ):
"""simple docstring"""
def __init__( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) -> int:
lowercase_ : List[Any] = [json.loads(lowercase__ ) for l in open(lowercase__ )]
lowercase_ : Optional[Any] = os.path.dirname(lowercase__ )
lowercase_ : Dict = tokenizer
lowercase_ : Any = labels
lowercase_ : Tuple = len(lowercase__ )
lowercase_ : Tuple = max_seq_length
lowercase_ : Optional[Any] = transforms
def __len__( self ) -> Optional[int]:
return len(self.data )
def __getitem__( self , _lowercase ) -> Optional[int]:
lowercase_ : Tuple = torch.LongTensor(self.tokenizer.encode(self.data[index]['text'] , add_special_tokens=lowercase__ ) )
lowercase_ : Dict = sentence[0], sentence[1:-1], sentence[-1]
lowercase_ : int = sentence[: self.max_seq_length]
lowercase_ : Union[str, Any] = torch.zeros(self.n_classes )
lowercase_ : Any = 1
lowercase_ : List[str] = Image.open(os.path.join(self.data_dir , self.data[index]['img'] ) ).convert('RGB' )
lowercase_ : Optional[Any] = self.transforms(lowercase__ )
return {
"image_start_token": start_token,
"image_end_token": end_token,
"sentence": sentence,
"image": image,
"label": label,
}
def lowerCamelCase__ ( self ) -> Optional[int]:
lowercase_ : Optional[Any] = Counter()
for row in self.data:
label_freqs.update(row['label'] )
return label_freqs
def _UpperCAmelCase ( a : Any ) -> List[str]:
"""simple docstring"""
lowercase_ : Optional[Any] = [len(row['sentence'] ) for row in batch]
lowercase_ : List[str] = len(lowerCAmelCase_ ), max(lowerCAmelCase_ )
lowercase_ : Dict = torch.zeros(lowerCAmelCase_ , lowerCAmelCase_ , dtype=torch.long )
lowercase_ : Tuple = torch.zeros(lowerCAmelCase_ , lowerCAmelCase_ , dtype=torch.long )
for i_batch, (input_row, length) in enumerate(zip(lowerCAmelCase_ , lowerCAmelCase_ ) ):
lowercase_ : Dict = input_row['''sentence''']
lowercase_ : str = 1
lowercase_ : Optional[int] = torch.stack([row['image'] for row in batch] )
lowercase_ : str = torch.stack([row['label'] for row in batch] )
lowercase_ : Dict = torch.stack([row['image_start_token'] for row in batch] )
lowercase_ : Union[str, Any] = torch.stack([row['image_end_token'] for row in batch] )
return text_tensor, mask_tensor, img_tensor, img_start_token, img_end_token, tgt_tensor
def _UpperCAmelCase ( ) -> Tuple:
"""simple docstring"""
return [
"Crime",
"Drama",
"Thriller",
"Action",
"Comedy",
"Romance",
"Documentary",
"Short",
"Mystery",
"History",
"Family",
"Adventure",
"Fantasy",
"Sci-Fi",
"Western",
"Horror",
"Sport",
"War",
"Music",
"Musical",
"Animation",
"Biography",
"Film-Noir",
]
def _UpperCAmelCase ( ) -> Optional[Any]:
"""simple docstring"""
return transforms.Compose(
[
transforms.Resize(2_5_6 ),
transforms.CenterCrop(2_2_4 ),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.46_77_70_44, 0.44_53_14_29, 0.40_66_10_17] , std=[0.12_22_19_94, 0.12_14_58_35, 0.14_38_04_69] , ),
] )
| 702 |
'''simple docstring'''
class __magic_name__ :
"""simple docstring"""
def __init__( self , _lowercase ) -> Union[str, Any]:
lowercase_ : Dict = n
lowercase_ : Dict = [None] * self.n
lowercase_ : Tuple = 0 # index of the first element
lowercase_ : List[Any] = 0
lowercase_ : List[Any] = 0
def __len__( self ) -> int:
return self.size
def lowerCamelCase__ ( self ) -> bool:
return self.size == 0
def lowerCamelCase__ ( self ) -> List[Any]:
return False if self.is_empty() else self.array[self.front]
def lowerCamelCase__ ( self , _lowercase ) -> Any:
if self.size >= self.n:
raise Exception('QUEUE IS FULL' )
lowercase_ : Tuple = data
lowercase_ : List[Any] = (self.rear + 1) % self.n
self.size += 1
return self
def lowerCamelCase__ ( self ) -> Any:
if self.size == 0:
raise Exception('UNDERFLOW' )
lowercase_ : Dict = self.array[self.front]
lowercase_ : Tuple = None
lowercase_ : int = (self.front + 1) % self.n
self.size -= 1
return temp
| 7 | 0 |
'''simple docstring'''
from ...processing_utils import ProcessorMixin
class __magic_name__ ( _UpperCAmelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ["""image_processor""", """feature_extractor"""]
SCREAMING_SNAKE_CASE_ : List[str] = """TvltImageProcessor"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = """TvltFeatureExtractor"""
def __init__( self , _lowercase , _lowercase ) -> Tuple:
super().__init__(image_processor=lowercase__ , feature_extractor=lowercase__ )
lowercase_ : str = image_processor
lowercase_ : Dict = feature_extractor
def __call__( self , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=False , _lowercase=False , *_lowercase , **_lowercase , ) -> Optional[Any]:
if images is None and audio is None:
raise ValueError('You need to specify either an `images` or `audio` input to process.' )
lowercase_ : Optional[Any] = None
if images is not None:
lowercase_ : str = self.image_processor(lowercase__ , mask_pixel=lowercase__ , *lowercase__ , **lowercase__ )
if images_mixed is not None:
lowercase_ : Union[str, Any] = self.image_processor(lowercase__ , is_mixed=lowercase__ , *lowercase__ , **lowercase__ )
if audio is not None:
lowercase_ : Tuple = self.feature_extractor(
lowercase__ , *lowercase__ , sampling_rate=lowercase__ , mask_audio=lowercase__ , **lowercase__ )
lowercase_ : int = {}
if audio is not None:
output_dict.update(lowercase__ )
if images is not None:
output_dict.update(lowercase__ )
if images_mixed_dict is not None:
output_dict.update(lowercase__ )
return output_dict
@property
def lowerCamelCase__ ( self ) -> List[str]:
lowercase_ : List[Any] = self.image_processor.model_input_names
lowercase_ : Union[str, Any] = self.feature_extractor.model_input_names
return list(dict.fromkeys(image_processor_input_names + feature_extractor_input_names ) )
| 703 |
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
A: List[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
A: Union[str, Any] = "\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline\n >>> from diffusers.utils import load_image\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... \"kandinsky-community/kandinsky-2-2-prior\", torch_dtype=torch.float16\n ... )\n >>> pipe_prior.to(\"cuda\")\n\n >>> prompt = \"A red cartoon frog, 4k\"\n >>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False)\n\n >>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained(\n ... \"kandinsky-community/kandinsky-2-2-decoder\", torch_dtype=torch.float16\n ... )\n >>> pipe.to(\"cuda\")\n\n >>> init_image = load_image(\n ... \"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main\"\n ... \"/kandinsky/frog.png\"\n ... )\n\n >>> image = pipe(\n ... image=init_image,\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=100,\n ... strength=0.2,\n ... ).images\n\n >>> image[0].save(\"red_frog.png\")\n ```\n"
def _UpperCAmelCase ( a : Tuple , a : Union[str, Any] , a : List[Any]=8 ) -> Dict:
"""simple docstring"""
lowercase_ : List[Any] = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
lowercase_ : List[str] = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
def _UpperCAmelCase ( a : Any , a : Dict=5_1_2 , a : Optional[Any]=5_1_2 ) -> Tuple:
"""simple docstring"""
lowercase_ : int = pil_image.resize((w, h) , resample=Image.BICUBIC , reducing_gap=1 )
lowercase_ : int = np.array(pil_image.convert('RGB' ) )
lowercase_ : Optional[int] = arr.astype(np.floataa ) / 1_27.5 - 1
lowercase_ : Any = np.transpose(a , [2, 0, 1] )
lowercase_ : Any = torch.from_numpy(a ).unsqueeze(0 )
return image
class __magic_name__ ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self , _lowercase , _lowercase , _lowercase , ) -> List[Any]:
super().__init__()
self.register_modules(
unet=_lowercase , scheduler=_lowercase , movq=_lowercase , )
lowercase_ : Dict = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def lowerCamelCase__ ( self , _lowercase , _lowercase , _lowercase ) -> int:
# get the original timestep using init_timestep
lowercase_ : List[Any] = min(int(num_inference_steps * strength ) , _lowercase )
lowercase_ : Tuple = max(num_inference_steps - init_timestep , 0 )
lowercase_ : Optional[Any] = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def lowerCamelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase=None ) -> Any:
if not isinstance(_lowercase , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(_lowercase )}" )
lowercase_ : Dict = image.to(device=_lowercase , dtype=_lowercase )
lowercase_ : Dict = batch_size * num_images_per_prompt
if image.shape[1] == 4:
lowercase_ : str = image
else:
if isinstance(_lowercase , _lowercase ) and len(_lowercase ) != batch_size:
raise ValueError(
f"You have passed a list of generators of length {len(_lowercase )}, but requested an effective batch"
f" size of {batch_size}. Make sure the batch size matches the length of the generators." )
elif isinstance(_lowercase , _lowercase ):
lowercase_ : List[Any] = [
self.movq.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(_lowercase )
]
lowercase_ : Union[str, Any] = torch.cat(_lowercase , dim=0 )
else:
lowercase_ : Union[str, Any] = self.movq.encode(_lowercase ).latent_dist.sample(_lowercase )
lowercase_ : str = self.movq.config.scaling_factor * init_latents
lowercase_ : int = torch.cat([init_latents] , dim=0 )
lowercase_ : Dict = init_latents.shape
lowercase_ : Dict = randn_tensor(_lowercase , generator=_lowercase , device=_lowercase , dtype=_lowercase )
# get latents
lowercase_ : List[str] = self.scheduler.add_noise(_lowercase , _lowercase , _lowercase )
lowercase_ : Optional[Any] = init_latents
return latents
def lowerCamelCase__ ( self , _lowercase=0 ) -> int:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
lowercase_ : List[Any] = torch.device(f"cuda:{gpu_id}" )
lowercase_ : Optional[Any] = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(_lowercase , _lowercase )
def lowerCamelCase__ ( self , _lowercase=0 ) -> int:
if is_accelerate_available() and is_accelerate_version('>=' , '0.17.0.dev0' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.' )
lowercase_ : List[Any] = torch.device(f"cuda:{gpu_id}" )
if self.device.type != "cpu":
self.to('cpu' , silence_dtype_warnings=_lowercase )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
lowercase_ : Tuple = None
for cpu_offloaded_model in [self.unet, self.movq]:
lowercase_ , lowercase_ : Dict = cpu_offload_with_hook(_lowercase , _lowercase , prev_module_hook=_lowercase )
# We'll offload the last model manually.
lowercase_ : List[str] = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def lowerCamelCase__ ( self ) -> List[str]:
if not hasattr(self.unet , '_hf_hook' ):
return self.device
for module in self.unet.modules():
if (
hasattr(_lowercase , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(_lowercase )
def __call__( self , _lowercase , _lowercase , _lowercase , _lowercase = 512 , _lowercase = 512 , _lowercase = 100 , _lowercase = 4.0 , _lowercase = 0.3 , _lowercase = 1 , _lowercase = None , _lowercase = "pil" , _lowercase = True , ) -> str:
lowercase_ : List[Any] = self._execution_device
lowercase_ : List[Any] = guidance_scale > 1.0
if isinstance(_lowercase , _lowercase ):
lowercase_ : Union[str, Any] = torch.cat(_lowercase , dim=0 )
lowercase_ : Optional[Any] = image_embeds.shape[0]
if isinstance(_lowercase , _lowercase ):
lowercase_ : List[str] = torch.cat(_lowercase , dim=0 )
if do_classifier_free_guidance:
lowercase_ : List[str] = image_embeds.repeat_interleave(_lowercase , dim=0 )
lowercase_ : Dict = negative_image_embeds.repeat_interleave(_lowercase , dim=0 )
lowercase_ : Dict = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=_lowercase )
if not isinstance(_lowercase , _lowercase ):
lowercase_ : Union[str, Any] = [image]
if not all(isinstance(_lowercase , (PIL.Image.Image, torch.Tensor) ) for i in image ):
raise ValueError(
f"Input is in incorrect format: {[type(_lowercase ) for i in image]}. Currently, we only support PIL image and pytorch tensor" )
lowercase_ : List[Any] = torch.cat([prepare_image(_lowercase , _lowercase , _lowercase ) for i in image] , dim=0 )
lowercase_ : Dict = image.to(dtype=image_embeds.dtype , device=_lowercase )
lowercase_ : Dict = self.movq.encode(_lowercase )['latents']
lowercase_ : Optional[Any] = latents.repeat_interleave(_lowercase , dim=0 )
self.scheduler.set_timesteps(_lowercase , device=_lowercase )
lowercase_ , lowercase_ : str = self.get_timesteps(_lowercase , _lowercase , _lowercase )
lowercase_ : int = timesteps[:1].repeat(batch_size * num_images_per_prompt )
lowercase_ , lowercase_ : Union[str, Any] = downscale_height_and_width(_lowercase , _lowercase , self.movq_scale_factor )
lowercase_ : List[str] = self.prepare_latents(
_lowercase , _lowercase , _lowercase , _lowercase , image_embeds.dtype , _lowercase , _lowercase )
for i, t in enumerate(self.progress_bar(_lowercase ) ):
# expand the latents if we are doing classifier free guidance
lowercase_ : int = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowercase_ : str = {'image_embeds': image_embeds}
lowercase_ : str = self.unet(
sample=_lowercase , timestep=_lowercase , encoder_hidden_states=_lowercase , added_cond_kwargs=_lowercase , return_dict=_lowercase , )[0]
if do_classifier_free_guidance:
lowercase_ , lowercase_ : Dict = noise_pred.split(latents.shape[1] , dim=1 )
lowercase_ , lowercase_ : Optional[int] = noise_pred.chunk(2 )
lowercase_ , lowercase_ : Tuple = variance_pred.chunk(2 )
lowercase_ : List[str] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
lowercase_ : Tuple = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , 'variance_type' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
lowercase_ , lowercase_ : List[str] = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
lowercase_ : Dict = self.scheduler.step(
_lowercase , _lowercase , _lowercase , generator=_lowercase , )[0]
# post-processing
lowercase_ : Any = self.movq.decode(_lowercase , force_not_quantize=_lowercase )['sample']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f"Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}" )
if output_type in ["np", "pil"]:
lowercase_ : Dict = image * 0.5 + 0.5
lowercase_ : Dict = image.clamp(0 , 1 )
lowercase_ : int = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
lowercase_ : int = self.numpy_to_pil(_lowercase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_lowercase )
| 7 | 0 |
'''simple docstring'''
def _UpperCAmelCase ( a : int ) -> Tuple: # noqa: E741
"""simple docstring"""
lowercase_ : List[Any] = len(a_ )
lowercase_ : List[str] = 0
lowercase_ : Union[str, Any] = [0] * n
lowercase_ : Optional[int] = [False] * n
lowercase_ : Optional[int] = [False] * n
def dfs(a : List[str] , a : Dict , a : Union[str, Any] , a : List[str] ):
if parent == root:
out_edge_count += 1
lowercase_ : List[Any] = True
lowercase_ : Optional[Any] = at
for to in l[at]:
if to == parent:
pass
elif not visited[to]:
lowercase_ : str = dfs(a_ , a_ , a_ , a_ )
lowercase_ : List[Any] = min(low[at] , low[to] )
# AP found via bridge
if at < low[to]:
lowercase_ : Optional[int] = True
# AP found via cycle
if at == low[to]:
lowercase_ : str = True
else:
lowercase_ : Any = min(low[at] , a_ )
return out_edge_count
for i in range(a_ ):
if not visited[i]:
lowercase_ : Any = 0
lowercase_ : Dict = dfs(a_ , a_ , -1 , a_ )
lowercase_ : List[str] = out_edge_count > 1
for x in range(len(a_ ) ):
if is_art[x] is True:
print(a_ )
# Adjacency list of graph
A: int = {
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
}
compute_ap(data)
| 704 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ..utils import _LazyModule
A: int = {
"config": [
"EXTERNAL_DATA_FORMAT_SIZE_LIMIT",
"OnnxConfig",
"OnnxConfigWithPast",
"OnnxSeq2SeqConfigWithPast",
"PatchingSpec",
],
"convert": ["export", "validate_model_outputs"],
"features": ["FeaturesManager"],
"utils": ["ParameterFormat", "compute_serialized_parameters_size"],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
A: Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 7 | 0 |
'''simple docstring'''
import os
import torch
from ..logging import get_logger
from .constants import FSDP_PYTORCH_VERSION, MODEL_NAME, OPTIMIZER_NAME
from .versions import is_torch_version
if is_torch_version(">=", FSDP_PYTORCH_VERSION):
import torch.distributed.checkpoint as dist_cp
from torch.distributed.checkpoint.default_planner import DefaultLoadPlanner, DefaultSavePlanner
from torch.distributed.checkpoint.optimizer import load_sharded_optimizer_state_dict
from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
A: Dict = get_logger(__name__)
def _UpperCAmelCase ( a : List[Any] , a : str , a : Dict , a : int , a : List[Any]=0 ) -> int:
"""simple docstring"""
os.makedirs(a , exist_ok=a )
with FSDP.state_dict_type(
a , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
lowercase_ : Optional[int] = model.state_dict()
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
lowercase_ : List[Any] = f"{MODEL_NAME}.bin" if model_index == 0 else f"{MODEL_NAME}_{model_index}.bin"
lowercase_ : Dict = os.path.join(a , a )
if accelerator.process_index == 0:
logger.info(f"Saving model to {output_model_file}" )
torch.save(a , a )
logger.info(f"Model saved to {output_model_file}" )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
lowercase_ : Any = (
f"{MODEL_NAME}_rank{accelerator.process_index}.bin"
if model_index == 0
else f"{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin"
)
lowercase_ : str = os.path.join(a , a )
logger.info(f"Saving model to {output_model_file}" )
torch.save(a , a )
logger.info(f"Model saved to {output_model_file}" )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
lowercase_ : Union[str, Any] = os.path.join(a , f"{MODEL_NAME}_{model_index}" )
os.makedirs(a , exist_ok=a )
logger.info(f"Saving model to {ckpt_dir}" )
lowercase_ : Optional[Any] = {'model': state_dict}
dist_cp.save_state_dict(
state_dict=a , storage_writer=dist_cp.FileSystemWriter(a ) , planner=DefaultSavePlanner() , )
logger.info(f"Model saved to {ckpt_dir}" )
def _UpperCAmelCase ( a : Optional[int] , a : List[str] , a : Union[str, Any] , a : Dict , a : List[Any]=0 ) -> List[Any]:
"""simple docstring"""
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
a , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if type(a ) != FSDP and accelerator.process_index != 0:
if not fsdp_plugin.sync_module_states:
raise ValueError(
'Set the `sync_module_states` flag to `True` so that model states are synced across processes when '
'initializing FSDP object' )
return
lowercase_ : str = f"{MODEL_NAME}.bin" if model_index == 0 else f"{MODEL_NAME}_{model_index}.bin"
lowercase_ : List[Any] = os.path.join(a , a )
logger.info(f"Loading model from {input_model_file}" )
lowercase_ : Any = torch.load(a )
logger.info(f"Model loaded from {input_model_file}" )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
lowercase_ : Any = (
f"{MODEL_NAME}_rank{accelerator.process_index}.bin"
if model_index == 0
else f"{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin"
)
lowercase_ : int = os.path.join(a , a )
logger.info(f"Loading model from {input_model_file}" )
lowercase_ : Any = torch.load(a )
logger.info(f"Model loaded from {input_model_file}" )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
lowercase_ : List[str] = (
os.path.join(a , f"{MODEL_NAME}_{model_index}" )
if f"{MODEL_NAME}" not in input_dir
else input_dir
)
logger.info(f"Loading model from {ckpt_dir}" )
lowercase_ : str = {'model': model.state_dict()}
dist_cp.load_state_dict(
state_dict=a , storage_reader=dist_cp.FileSystemReader(a ) , planner=DefaultLoadPlanner() , )
lowercase_ : Tuple = state_dict['model']
logger.info(f"Model loaded from {ckpt_dir}" )
model.load_state_dict(a )
def _UpperCAmelCase ( a : str , a : Any , a : List[Any] , a : Dict , a : int , a : Optional[Any]=0 ) -> Optional[Any]:
"""simple docstring"""
os.makedirs(a , exist_ok=a )
with FSDP.state_dict_type(
a , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
lowercase_ : Any = FSDP.optim_state_dict(a , a )
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if accelerator.process_index == 0:
lowercase_ : List[Any] = (
f"{OPTIMIZER_NAME}.bin" if optimizer_index == 0 else f"{OPTIMIZER_NAME}_{optimizer_index}.bin"
)
lowercase_ : Optional[Any] = os.path.join(a , a )
logger.info(f"Saving Optimizer state to {output_optimizer_file}" )
torch.save(a , a )
logger.info(f"Optimizer state saved in {output_optimizer_file}" )
else:
lowercase_ : str = os.path.join(a , f"{OPTIMIZER_NAME}_{optimizer_index}" )
os.makedirs(a , exist_ok=a )
logger.info(f"Saving Optimizer state to {ckpt_dir}" )
dist_cp.save_state_dict(
state_dict={'optimizer': optim_state} , storage_writer=dist_cp.FileSystemWriter(a ) , planner=DefaultSavePlanner() , )
logger.info(f"Optimizer state saved in {ckpt_dir}" )
def _UpperCAmelCase ( a : Union[str, Any] , a : Dict , a : Union[str, Any] , a : Any , a : Tuple , a : int=0 ) -> Optional[int]:
"""simple docstring"""
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
a , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
lowercase_ : List[str] = None
# below check should work but currently it isn't working (mostly opytorch issue),
# in the meantime disabling it at the cost of excess memory usage
# if accelerator.process_index == 0 or not fsdp_plugin.optim_state_dict_config.rank0_only:
lowercase_ : int = (
f"{OPTIMIZER_NAME}.bin" if optimizer_index == 0 else f"{OPTIMIZER_NAME}_{optimizer_index}.bin"
)
lowercase_ : Union[str, Any] = os.path.join(a , a )
logger.info(f"Loading Optimizer state from {input_optimizer_file}" )
lowercase_ : Tuple = torch.load(a )
logger.info(f"Optimizer state loaded from {input_optimizer_file}" )
else:
lowercase_ : int = (
os.path.join(a , f"{OPTIMIZER_NAME}_{optimizer_index}" )
if f"{OPTIMIZER_NAME}" not in input_dir
else input_dir
)
logger.info(f"Loading Optimizer from {ckpt_dir}" )
lowercase_ : List[Any] = load_sharded_optimizer_state_dict(
model_state_dict=model.state_dict() , optimizer_key='optimizer' , storage_reader=dist_cp.FileSystemReader(a ) , )
lowercase_ : Optional[int] = optim_state['optimizer']
logger.info(f"Optimizer loaded from {ckpt_dir}" )
lowercase_ : Tuple = FSDP.optim_state_dict_to_load(a , a , a )
optimizer.load_state_dict(a )
| 705 |
'''simple docstring'''
import io
import json
import unittest
from parameterized import parameterized
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device
from utils import calculate_bleu
A: Any = get_tests_dir() + "/test_data/fsmt/fsmt_val_data.json"
with io.open(filename, "r", encoding="utf-8") as f:
A: List[Any] = json.load(f)
@require_torch
class __magic_name__ ( unittest.TestCase ):
"""simple docstring"""
def lowerCamelCase__ ( self , _lowercase ) -> Tuple:
return FSMTTokenizer.from_pretrained(_lowercase )
def lowerCamelCase__ ( self , _lowercase ) -> Optional[int]:
lowercase_ : str = FSMTForConditionalGeneration.from_pretrained(_lowercase ).to(_lowercase )
if torch_device == "cuda":
model.half()
return model
@parameterized.expand(
[
['en-ru', 26.0],
['ru-en', 22.0],
['en-de', 22.0],
['de-en', 29.0],
] )
@slow
def lowerCamelCase__ ( self , _lowercase , _lowercase ) -> Optional[int]:
# note: this test is not testing the best performance since it only evals a small batch
# but it should be enough to detect a regression in the output quality
lowercase_ : Optional[Any] = f"facebook/wmt19-{pair}"
lowercase_ : str = self.get_tokenizer(_lowercase )
lowercase_ : Any = self.get_model(_lowercase )
lowercase_ : Any = bleu_data[pair]['src']
lowercase_ : Any = bleu_data[pair]['tgt']
lowercase_ : Dict = tokenizer(_lowercase , return_tensors='pt' , truncation=_lowercase , padding='longest' ).to(_lowercase )
lowercase_ : str = model.generate(
input_ids=batch.input_ids , num_beams=8 , )
lowercase_ : Any = tokenizer.batch_decode(
_lowercase , skip_special_tokens=_lowercase , clean_up_tokenization_spaces=_lowercase )
lowercase_ : Union[str, Any] = calculate_bleu(_lowercase , _lowercase )
print(_lowercase )
self.assertGreaterEqual(scores['bleu'] , _lowercase )
| 7 | 0 |
'''simple docstring'''
from math import loga
def _UpperCAmelCase ( a : Optional[Any] ) -> int:
"""simple docstring"""
if a < 0:
raise ValueError('Input value must be a positive integer' )
elif isinstance(lowercase_ , lowercase_ ):
raise TypeError('Input value must be a \'int\' type' )
return 0 if (a == 0) else int(loga(a & -a ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 706 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A: int = {
"configuration_trajectory_transformer": [
"TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"TrajectoryTransformerConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A: Union[str, Any] = [
"TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TrajectoryTransformerModel",
"TrajectoryTransformerPreTrainedModel",
"load_tf_weights_in_trajectory_transformer",
]
if TYPE_CHECKING:
from .configuration_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TrajectoryTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TrajectoryTransformerModel,
TrajectoryTransformerPreTrainedModel,
load_tf_weights_in_trajectory_transformer,
)
else:
import sys
A: int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 7 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
A: Optional[int] = {
"configuration_layoutlmv2": ["LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP", "LayoutLMv2Config"],
"processing_layoutlmv2": ["LayoutLMv2Processor"],
"tokenization_layoutlmv2": ["LayoutLMv2Tokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A: Any = ["LayoutLMv2TokenizerFast"]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A: str = ["LayoutLMv2FeatureExtractor"]
A: Dict = ["LayoutLMv2ImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A: Dict = [
"LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST",
"LayoutLMv2ForQuestionAnswering",
"LayoutLMv2ForSequenceClassification",
"LayoutLMv2ForTokenClassification",
"LayoutLMv2Layer",
"LayoutLMv2Model",
"LayoutLMv2PreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_layoutlmva import LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor, LayoutLMvaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaLayer,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
else:
import sys
A: Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 707 |
'''simple docstring'''
def _UpperCAmelCase ( a : str ) -> str:
"""simple docstring"""
lowercase_ : Dict = 0
# if input_string is "aba" than new_input_string become "a|b|a"
lowercase_ : Dict = ''
lowercase_ : Any = ''
# append each character + "|" in new_string for range(0, length-1)
for i in input_string[: len(a ) - 1]:
new_input_string += i + "|"
# append last character
new_input_string += input_string[-1]
# we will store the starting and ending of previous furthest ending palindromic
# substring
lowercase_ , lowercase_ : Dict = 0, 0
# length[i] shows the length of palindromic substring with center i
lowercase_ : List[Any] = [1 for i in range(len(a ) )]
# for each character in new_string find corresponding palindromic string
lowercase_ : Dict = 0
for j in range(len(a ) ):
lowercase_ : Tuple = 1 if j > r else min(length[l + r - j] // 2 , r - j + 1 )
while (
j - k >= 0
and j + k < len(a )
and new_input_string[k + j] == new_input_string[j - k]
):
k += 1
lowercase_ : int = 2 * k - 1
# does this string is ending after the previously explored end (that is r) ?
# if yes the update the new r to the last index of this
if j + k - 1 > r:
lowercase_ : Tuple = j - k + 1 # noqa: E741
lowercase_ : Tuple = j + k - 1
# update max_length and start position
if max_length < length[j]:
lowercase_ : Tuple = length[j]
lowercase_ : List[Any] = j
# create that string
lowercase_ : str = new_input_string[start - max_length // 2 : start + max_length // 2 + 1]
for i in s:
if i != "|":
output_string += i
return output_string
if __name__ == "__main__":
import doctest
doctest.testmod()
| 7 | 0 |
'''simple docstring'''
from typing import Dict
import numpy as np
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException
if is_tf_available():
import tensorflow as tf
from ..tf_utils import stable_softmax
if is_torch_available():
import torch
A: List[Any] = logging.get_logger(__name__)
@add_end_docstrings(
_lowerCAmelCase, R'\n top_k (`int`, defaults to 5):\n The number of predictions to return.\n targets (`str` or `List[str]`, *optional*):\n When passed, the model will limit the scores to the passed targets instead of looking up in the whole\n vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting\n token will be used (with a warning, and that might be slower).\n\n ', )
class __magic_name__ ( _lowerCAmelCase ):
"""simple docstring"""
def snake_case__ ( self , _lowercase ) -> np.ndarray:
if self.framework == "tf":
lowercase_ : Any = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()
elif self.framework == "pt":
lowercase_ : Optional[int] = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=_lowerCAmelCase )
else:
raise ValueError('Unsupported framework' )
return masked_index
def snake_case__ ( self , _lowercase ) -> np.ndarray:
lowercase_ : List[str] = self.get_masked_index(_lowerCAmelCase )
lowercase_ : Tuple = np.prod(masked_index.shape )
if numel < 1:
raise PipelineException(
'fill-mask' , self.model.base_model_prefix , f"No mask_token ({self.tokenizer.mask_token}) found on the input" , )
def snake_case__ ( self , _lowercase ) -> str:
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
for model_input in model_inputs:
self._ensure_exactly_one_mask_token(model_input['input_ids'][0] )
else:
for input_ids in model_inputs["input_ids"]:
self._ensure_exactly_one_mask_token(_lowerCAmelCase )
def snake_case__ ( self , _lowercase , _lowercase=None , **_lowercase ) -> Dict[str, GenericTensor]:
if return_tensors is None:
lowercase_ : List[str] = self.framework
lowercase_ : List[Any] = self.tokenizer(_lowerCAmelCase , return_tensors=_lowerCAmelCase )
self.ensure_exactly_one_mask_token(_lowerCAmelCase )
return model_inputs
def snake_case__ ( self , _lowercase ) -> Any:
lowercase_ : Dict = self.model(**_lowerCAmelCase )
lowercase_ : int = model_inputs['input_ids']
return model_outputs
def snake_case__ ( self , _lowercase , _lowercase=5 , _lowercase=None ) -> Tuple:
if target_ids is not None and target_ids.shape[0] < top_k:
lowercase_ : Any = target_ids.shape[0]
lowercase_ : List[str] = model_outputs['input_ids'][0]
lowercase_ : Tuple = model_outputs['logits']
if self.framework == "tf":
lowercase_ : Any = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()[:, 0]
lowercase_ : List[Any] = outputs.numpy()
lowercase_ : Optional[Any] = outputs[0, masked_index, :]
lowercase_ : Union[str, Any] = stable_softmax(_lowerCAmelCase , axis=-1 )
if target_ids is not None:
lowercase_ : Dict = tf.gather_nd(tf.squeeze(_lowerCAmelCase , 0 ) , target_ids.reshape(-1 , 1 ) )
lowercase_ : Optional[int] = tf.expand_dims(_lowerCAmelCase , 0 )
lowercase_ : List[Any] = tf.math.top_k(_lowerCAmelCase , k=_lowerCAmelCase )
lowercase_ , lowercase_ : str = topk.values.numpy(), topk.indices.numpy()
else:
lowercase_ : Union[str, Any] = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=_lowerCAmelCase ).squeeze(-1 )
# Fill mask pipeline supports only one ${mask_token} per sample
lowercase_ : Optional[Any] = outputs[0, masked_index, :]
lowercase_ : Dict = logits.softmax(dim=-1 )
if target_ids is not None:
lowercase_ : str = probs[..., target_ids]
lowercase_ , lowercase_ : Optional[int] = probs.topk(_lowerCAmelCase )
lowercase_ : Tuple = []
lowercase_ : Tuple = values.shape[0] == 1
for i, (_values, _predictions) in enumerate(zip(values.tolist() , predictions.tolist() ) ):
lowercase_ : int = []
for v, p in zip(_values , _predictions ):
# Copy is important since we're going to modify this array in place
lowercase_ : Union[str, Any] = input_ids.numpy().copy()
if target_ids is not None:
lowercase_ : Tuple = target_ids[p].tolist()
lowercase_ : Optional[int] = p
# Filter padding out:
lowercase_ : str = tokens[np.where(tokens != self.tokenizer.pad_token_id )]
# Originally we skip special tokens to give readable output.
# For multi masks though, the other [MASK] would be removed otherwise
# making the output look odd, so we add them back
lowercase_ : Tuple = self.tokenizer.decode(_lowerCAmelCase , skip_special_tokens=_lowerCAmelCase )
lowercase_ : List[Any] = {'score': v, 'token': p, 'token_str': self.tokenizer.decode([p] ), 'sequence': sequence}
row.append(_lowerCAmelCase )
result.append(_lowerCAmelCase )
if single_mask:
return result[0]
return result
def snake_case__ ( self , _lowercase , _lowercase=None ) -> Union[str, Any]:
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
lowercase_ : Tuple = [targets]
try:
lowercase_ : Optional[Any] = self.tokenizer.get_vocab()
except Exception:
lowercase_ : List[Any] = {}
lowercase_ : int = []
for target in targets:
lowercase_ : List[str] = vocab.get(_lowerCAmelCase , _lowerCAmelCase )
if id_ is None:
lowercase_ : str = self.tokenizer(
_lowerCAmelCase , add_special_tokens=_lowerCAmelCase , return_attention_mask=_lowerCAmelCase , return_token_type_ids=_lowerCAmelCase , max_length=1 , truncation=_lowerCAmelCase , )['input_ids']
if len(_lowerCAmelCase ) == 0:
logger.warning(
f"The specified target token `{target}` does not exist in the model vocabulary. "
'We cannot replace it with anything meaningful, ignoring it' )
continue
lowercase_ : List[Any] = input_ids[0]
# XXX: If users encounter this pass
# it becomes pretty slow, so let's make sure
# The warning enables them to fix the input to
# get faster performance.
logger.warning(
f"The specified target token `{target}` does not exist in the model vocabulary. "
f"Replacing with `{self.tokenizer.convert_ids_to_tokens(id_ )}`." )
target_ids.append(id_ )
lowercase_ : List[str] = list(set(_lowerCAmelCase ) )
if len(_lowerCAmelCase ) == 0:
raise ValueError('At least one target must be provided when passed.' )
lowercase_ : Optional[int] = np.array(_lowerCAmelCase )
return target_ids
def snake_case__ ( self , _lowercase=None , _lowercase=None ) -> Tuple:
lowercase_ : Optional[int] = {}
if targets is not None:
lowercase_ : str = self.get_target_ids(_lowerCAmelCase , _lowerCAmelCase )
lowercase_ : Any = target_ids
if top_k is not None:
lowercase_ : Any = top_k
if self.tokenizer.mask_token_id is None:
raise PipelineException(
'fill-mask' , self.model.base_model_prefix , 'The tokenizer does not define a `mask_token`.' )
return {}, {}, postprocess_params
def __call__( self , _lowercase , *_lowercase , **_lowercase ) -> Any:
lowercase_ : List[Any] = super().__call__(_lowerCAmelCase , **_lowerCAmelCase )
if isinstance(_lowerCAmelCase , _lowerCAmelCase ) and len(_lowerCAmelCase ) == 1:
return outputs[0]
return outputs
| 708 |
'''simple docstring'''
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class __magic_name__ ( UpperCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ''
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 'hf-legacy' # "hf://"" is reserved for hffs
def __init__( self , _lowercase = None , _lowercase = None , **_lowercase , ) -> Optional[Any]:
super().__init__(self , **_lowercase )
lowercase_ : int = repo_info
lowercase_ : List[Any] = token
lowercase_ : Union[str, Any] = None
def lowerCamelCase__ ( self ) -> Optional[Any]:
if self.dir_cache is None:
lowercase_ : Optional[Any] = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
lowercase_ : str = {
'name': hf_file.rfilename,
'size': None,
'type': 'file',
}
self.dir_cache.update(
{
str(_lowercase ): {'name': str(_lowercase ), 'size': None, 'type': 'directory'}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def lowerCamelCase__ ( self , _lowercase , _lowercase = "rb" , **_lowercase , ) -> Dict:
if not isinstance(self.repo_info , _lowercase ):
raise NotImplementedError(f"Open is only implemented for dataset repositories, but got {self.repo_info}" )
lowercase_ : Optional[int] = hf_hub_url(self.repo_info.id , _lowercase , revision=self.repo_info.sha )
return fsspec.open(
_lowercase , mode=_lowercase , headers=get_authentication_headers_for_url(_lowercase , use_auth_token=self.token ) , client_kwargs={'trust_env': True} , ).open()
def lowerCamelCase__ ( self , _lowercase , **_lowercase ) -> Tuple:
self._get_dirs()
lowercase_ : str = self._strip_protocol(_lowercase )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(_lowercase )
def lowerCamelCase__ ( self , _lowercase , _lowercase=False , **_lowercase ) -> List[str]:
self._get_dirs()
lowercase_ : List[str] = PurePosixPath(path.strip('/' ) )
lowercase_ : List[str] = {}
for p, f in self.dir_cache.items():
lowercase_ : Tuple = PurePosixPath(p.strip('/' ) )
lowercase_ : Optional[int] = p.parent
if root == path:
lowercase_ : List[str] = f
lowercase_ : List[str] = list(paths.values() )
if detail:
return out
else:
return sorted(f['name'] for f in out )
| 7 | 0 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class __magic_name__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , _lowercase , _lowercase=7 , _lowercase=3 , _lowercase=18 , _lowercase=30 , _lowercase=400 , _lowercase=True , _lowercase=None , _lowercase=True , _lowercase=None , ) -> Dict:
lowercase_ : List[Any] = size if size is not None else {"""shortest_edge""": 20}
lowercase_ : List[Any] = crop_size if crop_size is not None else {"""height""": 18, """width""": 18}
lowercase_ : List[Any] = parent
lowercase_ : Optional[int] = batch_size
lowercase_ : Union[str, Any] = num_channels
lowercase_ : Tuple = image_size
lowercase_ : Optional[Any] = min_resolution
lowercase_ : int = max_resolution
lowercase_ : List[str] = do_resize
lowercase_ : Optional[Any] = size
lowercase_ : List[str] = do_center_crop
lowercase_ : Tuple = crop_size
def lowerCamelCase__ ( self ) -> Union[str, Any]:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class __magic_name__ ( UpperCamelCase__, unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = MobileNetVaImageProcessor if is_vision_available() else None
def lowerCamelCase__ ( self ) -> Optional[int]:
lowercase_ : Dict = MobileNetVaImageProcessingTester(self )
@property
def lowerCamelCase__ ( self ) -> str:
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCamelCase__ ( self ) -> str:
lowercase_ : Any = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_a , 'do_resize' ) )
self.assertTrue(hasattr(_a , 'size' ) )
self.assertTrue(hasattr(_a , 'do_center_crop' ) )
self.assertTrue(hasattr(_a , 'crop_size' ) )
def lowerCamelCase__ ( self ) -> Optional[Any]:
lowercase_ : List[str] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 20} )
self.assertEqual(image_processor.crop_size , {'height': 18, 'width': 18} )
lowercase_ : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'shortest_edge': 42} )
self.assertEqual(image_processor.crop_size , {'height': 84, 'width': 84} )
def lowerCamelCase__ ( self ) -> List[str]:
pass
def lowerCamelCase__ ( self ) -> str:
# Initialize image_processing
lowercase_ : int = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase_ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a )
for image in image_inputs:
self.assertIsInstance(_a , Image.Image )
# Test not batched input
lowercase_ : List[Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
lowercase_ : Union[str, Any] = image_processing(_a , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def lowerCamelCase__ ( self ) -> Union[str, Any]:
# Initialize image_processing
lowercase_ : Any = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowercase_ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a , numpify=_a )
for image in image_inputs:
self.assertIsInstance(_a , np.ndarray )
# Test not batched input
lowercase_ : List[str] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
lowercase_ : Any = image_processing(_a , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def lowerCamelCase__ ( self ) -> int:
# Initialize image_processing
lowercase_ : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowercase_ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a , torchify=_a )
for image in image_inputs:
self.assertIsInstance(_a , torch.Tensor )
# Test not batched input
lowercase_ : List[Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
lowercase_ : Union[str, Any] = image_processing(_a , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
| 709 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
BertTokenizer,
ViltConfig,
ViltForImageAndTextRetrieval,
ViltForImagesAndTextClassification,
ViltForMaskedLM,
ViltForQuestionAnswering,
ViltImageProcessor,
ViltProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
A: List[Any] = logging.get_logger(__name__)
def _UpperCAmelCase ( a : Any , a : Dict=False , a : Union[str, Any]=False , a : Tuple=False ) -> List[str]:
"""simple docstring"""
lowercase_ : int = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"transformer.blocks.{i}.norm1.weight", f"vilt.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((f"transformer.blocks.{i}.norm1.bias", f"vilt.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append(
(f"transformer.blocks.{i}.attn.proj.weight", f"vilt.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append(
(f"transformer.blocks.{i}.attn.proj.bias", f"vilt.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((f"transformer.blocks.{i}.norm2.weight", f"vilt.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((f"transformer.blocks.{i}.norm2.bias", f"vilt.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append(
(f"transformer.blocks.{i}.mlp.fc1.weight", f"vilt.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((f"transformer.blocks.{i}.mlp.fc1.bias", f"vilt.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((f"transformer.blocks.{i}.mlp.fc2.weight", f"vilt.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((f"transformer.blocks.{i}.mlp.fc2.bias", f"vilt.encoder.layer.{i}.output.dense.bias") )
# embeddings
rename_keys.extend(
[
# text embeddings
('text_embeddings.word_embeddings.weight', 'vilt.embeddings.text_embeddings.word_embeddings.weight'),
(
'text_embeddings.position_embeddings.weight',
'vilt.embeddings.text_embeddings.position_embeddings.weight',
),
('text_embeddings.position_ids', 'vilt.embeddings.text_embeddings.position_ids'),
(
'text_embeddings.token_type_embeddings.weight',
'vilt.embeddings.text_embeddings.token_type_embeddings.weight',
),
('text_embeddings.LayerNorm.weight', 'vilt.embeddings.text_embeddings.LayerNorm.weight'),
('text_embeddings.LayerNorm.bias', 'vilt.embeddings.text_embeddings.LayerNorm.bias'),
# patch embeddings
('transformer.cls_token', 'vilt.embeddings.cls_token'),
('transformer.patch_embed.proj.weight', 'vilt.embeddings.patch_embeddings.projection.weight'),
('transformer.patch_embed.proj.bias', 'vilt.embeddings.patch_embeddings.projection.bias'),
('transformer.pos_embed', 'vilt.embeddings.position_embeddings'),
# token type embeddings
('token_type_embeddings.weight', 'vilt.embeddings.token_type_embeddings.weight'),
] )
# final layernorm + pooler
rename_keys.extend(
[
('transformer.norm.weight', 'vilt.layernorm.weight'),
('transformer.norm.bias', 'vilt.layernorm.bias'),
('pooler.dense.weight', 'vilt.pooler.dense.weight'),
('pooler.dense.bias', 'vilt.pooler.dense.bias'),
] )
# classifier head(s)
if vqa_model:
# classification head
rename_keys.extend(
[
('vqa_classifier.0.weight', 'classifier.0.weight'),
('vqa_classifier.0.bias', 'classifier.0.bias'),
('vqa_classifier.1.weight', 'classifier.1.weight'),
('vqa_classifier.1.bias', 'classifier.1.bias'),
('vqa_classifier.3.weight', 'classifier.3.weight'),
('vqa_classifier.3.bias', 'classifier.3.bias'),
] )
elif nlvr_model:
# classification head
rename_keys.extend(
[
('nlvr2_classifier.0.weight', 'classifier.0.weight'),
('nlvr2_classifier.0.bias', 'classifier.0.bias'),
('nlvr2_classifier.1.weight', 'classifier.1.weight'),
('nlvr2_classifier.1.bias', 'classifier.1.bias'),
('nlvr2_classifier.3.weight', 'classifier.3.weight'),
('nlvr2_classifier.3.bias', 'classifier.3.bias'),
] )
else:
pass
return rename_keys
def _UpperCAmelCase ( a : Dict , a : Tuple ) -> Dict:
"""simple docstring"""
for i in range(config.num_hidden_layers ):
lowercase_ : Optional[int] = 'vilt.'
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowercase_ : str = state_dict.pop(f"transformer.blocks.{i}.attn.qkv.weight" )
lowercase_ : int = state_dict.pop(f"transformer.blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
lowercase_ : Dict = in_proj_weight[
: config.hidden_size, :
]
lowercase_ : List[str] = in_proj_bias[: config.hidden_size]
lowercase_ : int = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowercase_ : Optional[int] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowercase_ : Tuple = in_proj_weight[
-config.hidden_size :, :
]
lowercase_ : Dict = in_proj_bias[-config.hidden_size :]
def _UpperCAmelCase ( a : List[str] ) -> Optional[int]:
"""simple docstring"""
lowercase_ : Union[str, Any] = ['head.weight', 'head.bias']
for k in ignore_keys:
state_dict.pop(a , a )
def _UpperCAmelCase ( a : Optional[Any] , a : Tuple , a : Union[str, Any] ) -> Tuple:
"""simple docstring"""
lowercase_ : List[Any] = dct.pop(a )
lowercase_ : Dict = val
@torch.no_grad()
def _UpperCAmelCase ( a : List[Any] , a : List[Any] ) -> Optional[Any]:
"""simple docstring"""
lowercase_ : str = ViltConfig(image_size=3_8_4 , patch_size=3_2 , tie_word_embeddings=a )
lowercase_ : int = False
lowercase_ : Union[str, Any] = False
lowercase_ : List[str] = False
lowercase_ : str = False
if "vqa" in checkpoint_url:
lowercase_ : str = True
lowercase_ : Optional[int] = 3_1_2_9
lowercase_ : Any = 'huggingface/label-files'
lowercase_ : Optional[Any] = 'vqa2-id2label.json'
lowercase_ : int = json.load(open(hf_hub_download(a , a , repo_type='dataset' ) , 'r' ) )
lowercase_ : Optional[int] = {int(a ): v for k, v in idalabel.items()}
lowercase_ : List[Any] = idalabel
lowercase_ : str = {v: k for k, v in idalabel.items()}
lowercase_ : List[Any] = ViltForQuestionAnswering(a )
elif "nlvr" in checkpoint_url:
lowercase_ : Dict = True
lowercase_ : List[str] = 2
lowercase_ : Tuple = {0: 'False', 1: 'True'}
lowercase_ : Optional[int] = {v: k for k, v in config.idalabel.items()}
lowercase_ : int = 3
lowercase_ : Any = ViltForImagesAndTextClassification(a )
elif "irtr" in checkpoint_url:
lowercase_ : Union[str, Any] = True
lowercase_ : Dict = ViltForImageAndTextRetrieval(a )
elif "mlm_itm" in checkpoint_url:
lowercase_ : int = True
lowercase_ : Tuple = ViltForMaskedLM(a )
else:
raise ValueError('Unknown model type' )
# load state_dict of original model, remove and rename some keys
lowercase_ : List[Any] = torch.hub.load_state_dict_from_url(a , map_location='cpu' )['state_dict']
lowercase_ : Union[str, Any] = create_rename_keys(a , a , a , a )
for src, dest in rename_keys:
rename_key(a , a , a )
read_in_q_k_v(a , a )
if mlm_model or irtr_model:
lowercase_ : str = ['itm_score.fc.weight', 'itm_score.fc.bias']
for k in ignore_keys:
state_dict.pop(a , a )
# load state dict into HuggingFace model
model.eval()
if mlm_model:
lowercase_ , lowercase_ : Dict = model.load_state_dict(a , strict=a )
assert missing_keys == ["mlm_score.decoder.bias"]
else:
model.load_state_dict(a )
# Define processor
lowercase_ : Optional[int] = ViltImageProcessor(size=3_8_4 )
lowercase_ : Optional[int] = BertTokenizer.from_pretrained('bert-base-uncased' )
lowercase_ : Any = ViltProcessor(a , a )
# Forward pass on example inputs (image + text)
if nlvr_model:
lowercase_ : Union[str, Any] = Image.open(requests.get('https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg' , stream=a ).raw )
lowercase_ : Optional[Any] = Image.open(requests.get('https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg' , stream=a ).raw )
lowercase_ : Any = (
'The left image contains twice the number of dogs as the right image, and at least two dogs in total are'
' standing.'
)
lowercase_ : Union[str, Any] = processor(a , a , return_tensors='pt' )
lowercase_ : List[str] = processor(a , a , return_tensors='pt' )
lowercase_ : Union[str, Any] = model(
input_ids=encoding_a.input_ids , pixel_values=encoding_a.pixel_values , pixel_values_a=encoding_a.pixel_values , )
else:
lowercase_ : List[str] = Image.open(requests.get('http://images.cocodataset.org/val2017/000000039769.jpg' , stream=a ).raw )
if mlm_model:
lowercase_ : Dict = 'a bunch of [MASK] laying on a [MASK].'
else:
lowercase_ : List[Any] = 'How many cats are there?'
lowercase_ : List[Any] = processor(a , a , return_tensors='pt' )
lowercase_ : Optional[int] = model(**a )
# Verify outputs
if mlm_model:
lowercase_ : Union[str, Any] = torch.Size([1, 1_1, 3_0_5_2_2] )
lowercase_ : Optional[Any] = torch.tensor([-12.50_61, -12.51_23, -12.51_74] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , a , atol=1e-4 )
# verify masked token prediction equals "cats"
lowercase_ : int = outputs.logits[0, 4, :].argmax(-1 ).item()
assert tokenizer.decode([predicted_id] ) == "cats"
elif vqa_model:
lowercase_ : Optional[Any] = torch.Size([1, 3_1_2_9] )
lowercase_ : Tuple = torch.tensor([-15.94_95, -18.14_72, -10.30_41] )
assert torch.allclose(outputs.logits[0, :3] , a , atol=1e-4 )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , a , atol=1e-4 )
# verify vqa prediction equals "2"
lowercase_ : Union[str, Any] = outputs.logits.argmax(-1 ).item()
assert model.config.idalabel[predicted_idx] == "2"
elif nlvr_model:
lowercase_ : Optional[Any] = torch.Size([1, 2] )
lowercase_ : Optional[Any] = torch.tensor([-2.87_21, 2.12_91] )
assert torch.allclose(outputs.logits[0, :3] , a , atol=1e-4 )
assert outputs.logits.shape == expected_shape
Path(a ).mkdir(exist_ok=a )
print(f"Saving model and processor to {pytorch_dump_folder_path}" )
model.save_pretrained(a )
processor.save_pretrained(a )
if __name__ == "__main__":
A: Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://github.com/dandelin/ViLT/releases/download/200k/vilt_200k_mlm_itm.ckpt",
type=str,
help="URL of the checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
A: Union[str, Any] = parser.parse_args()
convert_vilt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 7 | 0 |
'''simple docstring'''
import unittest
from transformers import PegasusTokenizer, PegasusTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__a: Any = get_tests_dir("fixtures/test_sentencepiece_no_bos.model")
@require_sentencepiece
@require_tokenizers
class __magic_name__ ( SCREAMING_SNAKE_CASE_, unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = PegasusTokenizer
SCREAMING_SNAKE_CASE_ : Union[str, Any] = PegasusTokenizerFast
SCREAMING_SNAKE_CASE_ : Optional[Any] = True
SCREAMING_SNAKE_CASE_ : Dict = True
def lowerCamelCase__ ( self ) -> int:
super().setUp()
# We have a SentencePiece fixture for testing
lowercase_ : List[str] = PegasusTokenizer(UpperCamelCase__ )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def lowerCamelCase__ ( self ) -> str:
return PegasusTokenizer.from_pretrained('google/pegasus-large' )
def lowerCamelCase__ ( self , **_lowercase ) -> Dict:
return PegasusTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def lowerCamelCase__ ( self , _lowercase ) -> Any:
return ("This is a test", "This is a test")
def lowerCamelCase__ ( self ) -> Optional[int]:
lowercase_ : List[Any] = '''</s>'''
lowercase_ : List[Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCamelCase__ ) , UpperCamelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCamelCase__ ) , UpperCamelCase__ )
def lowerCamelCase__ ( self ) -> List[Any]:
lowercase_ : Optional[int] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<pad>' )
self.assertEqual(vocab_keys[1] , '</s>' )
self.assertEqual(vocab_keys[-1] , 'v' )
self.assertEqual(len(UpperCamelCase__ ) , 1103 )
def lowerCamelCase__ ( self ) -> str:
self.assertEqual(self.get_tokenizer().vocab_size , 1103 )
def lowerCamelCase__ ( self ) -> List[Any]:
lowercase_ : Tuple = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
lowercase_ : List[str] = self.tokenizer_class.from_pretrained(self.tmpdirname )
lowercase_ : List[Any] = (
'''Let\'s see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important'''
''' </s> <pad> <pad> <pad>'''
)
lowercase_ : str = rust_tokenizer([raw_input_str] , return_tensors=UpperCamelCase__ , add_special_tokens=UpperCamelCase__ ).input_ids[0]
lowercase_ : List[str] = py_tokenizer([raw_input_str] , return_tensors=UpperCamelCase__ , add_special_tokens=UpperCamelCase__ ).input_ids[0]
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
def lowerCamelCase__ ( self ) -> Any:
lowercase_ : List[str] = self._large_tokenizer
# <mask_1> masks whole sentence while <mask_2> masks single word
lowercase_ : List[str] = '''<mask_1> To ensure a <mask_2> flow of bank resolutions.'''
lowercase_ : List[Any] = [2, 413, 615, 114, 3, 1971, 113, 1679, 1_0710, 107, 1]
lowercase_ : Union[str, Any] = tokenizer([raw_input_str] , return_tensors=UpperCamelCase__ ).input_ids[0]
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
def lowerCamelCase__ ( self ) -> str:
lowercase_ : Any = self._large_tokenizer
# The tracebacks for the following asserts are **better** without messages or self.assertEqual
assert tokenizer.vocab_size == 9_6103
assert tokenizer.pad_token_id == 0
assert tokenizer.eos_token_id == 1
assert tokenizer.offset == 103
assert tokenizer.unk_token_id == tokenizer.offset + 2 == 105
assert tokenizer.unk_token == "<unk>"
assert tokenizer.model_max_length == 1024
lowercase_ : List[Any] = '''To ensure a smooth flow of bank resolutions.'''
lowercase_ : Union[str, Any] = [413, 615, 114, 2291, 1971, 113, 1679, 1_0710, 107, 1]
lowercase_ : List[str] = tokenizer([raw_input_str] , return_tensors=UpperCamelCase__ ).input_ids[0]
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"]
@require_torch
def lowerCamelCase__ ( self ) -> Tuple:
lowercase_ : Union[str, Any] = ['''This is going to be way too long.''' * 150, '''short example''']
lowercase_ : Tuple = ['''not super long but more than 5 tokens''', '''tiny''']
lowercase_ : Tuple = self._large_tokenizer(UpperCamelCase__ , padding=UpperCamelCase__ , truncation=UpperCamelCase__ , return_tensors='pt' )
lowercase_ : Any = self._large_tokenizer(
text_target=UpperCamelCase__ , max_length=5 , padding=UpperCamelCase__ , truncation=UpperCamelCase__ , return_tensors='pt' )
assert batch.input_ids.shape == (2, 1024)
assert batch.attention_mask.shape == (2, 1024)
assert targets["input_ids"].shape == (2, 5)
assert len(UpperCamelCase__ ) == 2 # input_ids, attention_mask.
@slow
def lowerCamelCase__ ( self ) -> Optional[Any]:
# fmt: off
lowercase_ : Optional[Any] = {'''input_ids''': [[3_8979, 143, 1_8485, 606, 130, 2_6669, 8_7686, 121, 5_4189, 1129, 111, 2_6669, 8_7686, 121, 9114, 1_4787, 121, 1_3249, 158, 592, 956, 121, 1_4621, 3_1576, 143, 6_2613, 108, 9688, 930, 4_3430, 1_1562, 6_2613, 304, 108, 1_1443, 897, 108, 9314, 1_7415, 6_3399, 108, 1_1443, 7614, 1_8316, 118, 4284, 7148, 1_2430, 143, 1400, 2_5703, 158, 111, 4284, 7148, 1_1772, 143, 2_1297, 1064, 158, 122, 204, 3506, 1754, 1133, 1_4787, 1581, 115, 3_3224, 4482, 111, 1355, 110, 2_9173, 317, 5_0833, 108, 2_0147, 9_4665, 111, 7_7198, 107, 1], [110, 6_2613, 117, 638, 112, 1133, 121, 2_0098, 1355, 7_9050, 1_3872, 135, 1596, 5_3541, 1352, 141, 1_3039, 5542, 124, 302, 518, 111, 268, 2956, 115, 149, 4427, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [139, 1235, 2799, 1_8289, 1_7780, 204, 109, 9474, 1296, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCamelCase__ , model_name='google/bigbird-pegasus-large-arxiv' , revision='ba85d0851d708441f91440d509690f1ab6353415' , )
@require_sentencepiece
@require_tokenizers
class __magic_name__ ( SCREAMING_SNAKE_CASE_, unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = PegasusTokenizer
SCREAMING_SNAKE_CASE_ : Tuple = PegasusTokenizerFast
SCREAMING_SNAKE_CASE_ : List[Any] = True
SCREAMING_SNAKE_CASE_ : Optional[Any] = True
def lowerCamelCase__ ( self ) -> str:
super().setUp()
# We have a SentencePiece fixture for testing
lowercase_ : Optional[int] = PegasusTokenizer(UpperCamelCase__ , offset=0 , mask_token_sent=UpperCamelCase__ , mask_token='[MASK]' )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def lowerCamelCase__ ( self ) -> List[Any]:
return PegasusTokenizer.from_pretrained('google/bigbird-pegasus-large-arxiv' )
def lowerCamelCase__ ( self , **_lowercase ) -> List[str]:
return PegasusTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def lowerCamelCase__ ( self , _lowercase ) -> Tuple:
return ("This is a test", "This is a test")
def lowerCamelCase__ ( self ) -> Tuple:
lowercase_ : Any = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
lowercase_ : Tuple = self.tokenizer_class.from_pretrained(self.tmpdirname )
lowercase_ : Tuple = (
'''Let\'s see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>'''
''' <pad> <pad> <pad>'''
)
lowercase_ : List[str] = rust_tokenizer([raw_input_str] , return_tensors=UpperCamelCase__ , add_special_tokens=UpperCamelCase__ ).input_ids[0]
lowercase_ : str = py_tokenizer([raw_input_str] , return_tensors=UpperCamelCase__ , add_special_tokens=UpperCamelCase__ ).input_ids[0]
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
@require_torch
def lowerCamelCase__ ( self ) -> Any:
lowercase_ : Dict = ['''This is going to be way too long.''' * 1000, '''short example''']
lowercase_ : str = ['''not super long but more than 5 tokens''', '''tiny''']
lowercase_ : List[Any] = self._large_tokenizer(UpperCamelCase__ , padding=UpperCamelCase__ , truncation=UpperCamelCase__ , return_tensors='pt' )
lowercase_ : Dict = self._large_tokenizer(
text_target=UpperCamelCase__ , max_length=5 , padding=UpperCamelCase__ , truncation=UpperCamelCase__ , return_tensors='pt' )
assert batch.input_ids.shape == (2, 4096)
assert batch.attention_mask.shape == (2, 4096)
assert targets["input_ids"].shape == (2, 5)
assert len(UpperCamelCase__ ) == 2 # input_ids, attention_mask.
def lowerCamelCase__ ( self ) -> Union[str, Any]:
lowercase_ : Tuple = (
'''This is an example string that is used to test the original TF implementation against the HF'''
''' implementation'''
)
lowercase_ : List[str] = self._large_tokenizer(UpperCamelCase__ ).input_ids
self.assertListEqual(
UpperCamelCase__ , [182, 117, 142, 587, 4211, 120, 117, 263, 112, 804, 109, 856, 2_5016, 3137, 464, 109, 2_6955, 3137, 1] , )
| 710 |
'''simple docstring'''
def _UpperCAmelCase ( a : list ) -> list:
"""simple docstring"""
for i in range(len(a ) - 1 , 0 , -1 ):
lowercase_ : Any = False
for j in range(a , 0 , -1 ):
if unsorted[j] < unsorted[j - 1]:
lowercase_ , lowercase_ : Any = unsorted[j - 1], unsorted[j]
lowercase_ : int = True
for j in range(a ):
if unsorted[j] > unsorted[j + 1]:
lowercase_ , lowercase_ : Union[str, Any] = unsorted[j + 1], unsorted[j]
lowercase_ : Optional[Any] = True
if not swapped:
break
return unsorted
if __name__ == "__main__":
import doctest
doctest.testmod()
A: Union[str, Any] = input("Enter numbers separated by a comma:\n").strip()
A: Tuple = [int(item) for item in user_input.split(",")]
print(f"""{cocktail_shaker_sort(unsorted) = }""")
| 7 | 0 |
'''simple docstring'''
class __magic_name__ :
"""simple docstring"""
def __init__( self ) -> Optional[int]:
lowercase_ : Any = ''
lowercase_ : Union[str, Any] = ''
lowercase_ : Any = []
def lowerCamelCase__ ( self , _lowercase , _lowercase ) -> int:
if m == -1:
return n + 1
elif n == -1:
return m + 1
elif self.dp[m][n] > -1:
return self.dp[m][n]
else:
if self.worda[m] == self.worda[n]:
lowercase_ : List[str] = self.__min_dist_top_down_dp(m - 1 , n - 1 )
else:
lowercase_ : str = self.__min_dist_top_down_dp(_lowerCamelCase , n - 1 )
lowercase_ : List[Any] = self.__min_dist_top_down_dp(m - 1 , _lowerCamelCase )
lowercase_ : str = self.__min_dist_top_down_dp(m - 1 , n - 1 )
lowercase_ : Any = 1 + min(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
return self.dp[m][n]
def lowerCamelCase__ ( self , _lowercase , _lowercase ) -> int:
lowercase_ : int = worda
lowercase_ : Optional[int] = worda
lowercase_ : Dict = [[-1 for _ in range(len(_lowerCamelCase ) )] for _ in range(len(_lowerCamelCase ) )]
return self.__min_dist_top_down_dp(len(_lowerCamelCase ) - 1 , len(_lowerCamelCase ) - 1 )
def lowerCamelCase__ ( self , _lowercase , _lowercase ) -> int:
lowercase_ : str = worda
lowercase_ : Any = worda
lowercase_ : str = len(_lowerCamelCase )
lowercase_ : int = len(_lowerCamelCase )
lowercase_ : Union[str, Any] = [[0 for _ in range(n + 1 )] for _ in range(m + 1 )]
for i in range(m + 1 ):
for j in range(n + 1 ):
if i == 0: # first string is empty
lowercase_ : Optional[int] = j
elif j == 0: # second string is empty
lowercase_ : Optional[int] = i
elif worda[i - 1] == worda[j - 1]: # last characters are equal
lowercase_ : Tuple = self.dp[i - 1][j - 1]
else:
lowercase_ : Union[str, Any] = self.dp[i][j - 1]
lowercase_ : List[str] = self.dp[i - 1][j]
lowercase_ : List[Any] = self.dp[i - 1][j - 1]
lowercase_ : Tuple = 1 + min(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
return self.dp[m][n]
if __name__ == "__main__":
A: List[Any] = EditDistance()
print("****************** Testing Edit Distance DP Algorithm ******************")
print()
A: Optional[Any] = input("Enter the first string: ").strip()
A: List[str] = input("Enter the second string: ").strip()
print()
print(f"""The minimum edit distance is: {solver.min_dist_top_down(Sa, Sa)}""")
print(f"""The minimum edit distance is: {solver.min_dist_bottom_up(Sa, Sa)}""")
print()
print("*************** End of Testing Edit Distance DP Algorithm ***************")
| 711 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class __magic_name__ ( metaclass=UpperCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = ['transformers', 'torch', 'note_seq']
def __init__( self , *_lowercase , **_lowercase ) -> Dict:
requires_backends(self , ['transformers', 'torch', 'note_seq'] )
@classmethod
def lowerCamelCase__ ( cls , *_lowercase , **_lowercase ) -> List[str]:
requires_backends(cls , ['transformers', 'torch', 'note_seq'] )
@classmethod
def lowerCamelCase__ ( cls , *_lowercase , **_lowercase ) -> Dict:
requires_backends(cls , ['transformers', 'torch', 'note_seq'] )
| 7 | 0 |
import collections
import inspect
import unittest
from transformers import FocalNetConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
)
from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __magic_name__ :
"""simple docstring"""
def __init__( self , _lowercase , _lowercase=13 , _lowercase=32 , _lowercase=2 , _lowercase=3 , _lowercase=16 , _lowercase=[32, 64, 128] , _lowercase=[1, 2, 1] , _lowercase=[2, 2, 4] , _lowercase=2 , _lowercase=2.0 , _lowercase=True , _lowercase=0.0 , _lowercase=0.0 , _lowercase=0.1 , _lowercase="gelu" , _lowercase=False , _lowercase=True , _lowercase=0.02 , _lowercase=1E-5 , _lowercase=True , _lowercase=None , _lowercase=True , _lowercase=10 , _lowercase=8 , _lowercase=["stage1", "stage2"] , _lowercase=[1, 2] , ) -> Optional[Any]:
lowercase_ : int = parent
lowercase_ : Tuple = batch_size
lowercase_ : int = image_size
lowercase_ : Any = patch_size
lowercase_ : Any = num_channels
lowercase_ : Union[str, Any] = embed_dim
lowercase_ : Any = hidden_sizes
lowercase_ : Dict = depths
lowercase_ : int = num_heads
lowercase_ : int = window_size
lowercase_ : Optional[int] = mlp_ratio
lowercase_ : List[str] = qkv_bias
lowercase_ : Optional[Any] = hidden_dropout_prob
lowercase_ : Optional[int] = attention_probs_dropout_prob
lowercase_ : Optional[Any] = drop_path_rate
lowercase_ : Optional[int] = hidden_act
lowercase_ : str = use_absolute_embeddings
lowercase_ : Any = patch_norm
lowercase_ : int = layer_norm_eps
lowercase_ : str = initializer_range
lowercase_ : Tuple = is_training
lowercase_ : Any = scope
lowercase_ : Any = use_labels
lowercase_ : List[Any] = type_sequence_label_size
lowercase_ : List[str] = encoder_stride
lowercase_ : str = out_features
lowercase_ : List[str] = out_indices
def lowerCamelCase__ ( self ) -> int:
lowercase_ : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase_ : Dict = None
if self.use_labels:
lowercase_ : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase_ : Any = self.get_config()
return config, pixel_values, labels
def lowerCamelCase__ ( self ) -> Union[str, Any]:
return FocalNetConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , hidden_sizes=self.hidden_sizes , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def lowerCamelCase__ ( self , _lowercase , _lowercase , _lowercase ) -> List[Any]:
lowercase_ : List[Any] = FocalNetModel(config=_lowercase )
model.to(_lowercase )
model.eval()
lowercase_ : Any = model(_lowercase )
lowercase_ : Optional[int] = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
lowercase_ : Optional[Any] = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def lowerCamelCase__ ( self , _lowercase , _lowercase , _lowercase ) -> List[str]:
lowercase_ : List[Any] = FocalNetBackbone(config=_lowercase )
model.to(_lowercase )
model.eval()
lowercase_ : List[str] = model(_lowercase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size, 8, 8] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[:-1] )
# verify backbone works with out_features=None
lowercase_ : Optional[int] = None
lowercase_ : Union[str, Any] = FocalNetBackbone(config=_lowercase )
model.to(_lowercase )
model.eval()
lowercase_ : Tuple = model(_lowercase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size * 2, 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def lowerCamelCase__ ( self , _lowercase , _lowercase , _lowercase ) -> int:
lowercase_ : Any = FocalNetForMaskedImageModeling(config=_lowercase )
model.to(_lowercase )
model.eval()
lowercase_ : str = model(_lowercase )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
lowercase_ : Union[str, Any] = 1
lowercase_ : str = FocalNetForMaskedImageModeling(_lowercase )
model.to(_lowercase )
model.eval()
lowercase_ : str = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase_ : Optional[int] = model(_lowercase )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def lowerCamelCase__ ( self , _lowercase , _lowercase , _lowercase ) -> Union[str, Any]:
lowercase_ : List[Any] = self.type_sequence_label_size
lowercase_ : int = FocalNetForImageClassification(_lowercase )
model.to(_lowercase )
model.eval()
lowercase_ : str = model(_lowercase , labels=_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowercase_ : List[str] = 1
lowercase_ : Union[str, Any] = FocalNetForImageClassification(_lowercase )
model.to(_lowercase )
model.eval()
lowercase_ : str = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase_ : Union[str, Any] = model(_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowerCamelCase__ ( self ) -> Any:
lowercase_ : List[str] = self.prepare_config_and_inputs()
lowercase_ : str = config_and_inputs
lowercase_ : Any = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class __magic_name__ ( _A, _A, unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = (
(
FocalNetModel,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetBackbone,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE_ : Optional[int] = (
{'feature-extraction': FocalNetModel, 'image-classification': FocalNetForImageClassification}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE_ : Dict = False
SCREAMING_SNAKE_CASE_ : List[str] = False
SCREAMING_SNAKE_CASE_ : Union[str, Any] = False
SCREAMING_SNAKE_CASE_ : Union[str, Any] = False
SCREAMING_SNAKE_CASE_ : List[str] = False
def lowerCamelCase__ ( self ) -> Union[str, Any]:
lowercase_ : Any = FocalNetModelTester(self )
lowercase_ : Any = ConfigTester(self , config_class=_lowercase , embed_dim=37 , has_text_modality=_lowercase )
def lowerCamelCase__ ( self ) -> Tuple:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCamelCase__ ( self ) -> str:
return
def lowerCamelCase__ ( self ) -> List[Any]:
lowercase_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowercase )
def lowerCamelCase__ ( self ) -> str:
lowercase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*_lowercase )
def lowerCamelCase__ ( self ) -> Optional[Any]:
lowercase_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*_lowercase )
def lowerCamelCase__ ( self ) -> List[str]:
lowercase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowercase )
@unittest.skip(reason='FocalNet does not use inputs_embeds' )
def lowerCamelCase__ ( self ) -> Dict:
pass
@unittest.skip(reason='FocalNet does not use feedforward chunking' )
def lowerCamelCase__ ( self ) -> List[Any]:
pass
def lowerCamelCase__ ( self ) -> Dict:
lowercase_ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
lowercase_ : List[Any] = model_class(_lowercase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowercase_ : Tuple = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_lowercase , nn.Linear ) )
def lowerCamelCase__ ( self ) -> str:
lowercase_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
lowercase_ : Union[str, Any] = model_class(_lowercase )
lowercase_ : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase_ : Tuple = [*signature.parameters.keys()]
lowercase_ : List[str] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , _lowercase )
def lowerCamelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase ) -> Optional[int]:
lowercase_ : List[str] = model_class(_lowercase )
model.to(_lowercase )
model.eval()
with torch.no_grad():
lowercase_ : Any = model(**self._prepare_for_class(_lowercase , _lowercase ) )
lowercase_ : Optional[int] = outputs.hidden_states
lowercase_ : Union[str, Any] = getattr(
self.model_tester , 'expected_num_hidden_layers' , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(_lowercase ) , _lowercase )
# FocalNet has a different seq_length
lowercase_ : List[str] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowercase_ : Dict = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
lowercase_ : List[str] = outputs.reshaped_hidden_states
self.assertEqual(len(_lowercase ) , _lowercase )
lowercase_ : Dict = reshaped_hidden_states[0].shape
lowercase_ : Any = (
reshaped_hidden_states[0].view(_lowercase , _lowercase , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def lowerCamelCase__ ( self ) -> str:
lowercase_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ : Optional[Any] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes[:-1]:
lowercase_ : List[Any] = True
self.check_hidden_states_output(_lowercase , _lowercase , _lowercase , _lowercase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase_ : Any = True
self.check_hidden_states_output(_lowercase , _lowercase , _lowercase , _lowercase )
def lowerCamelCase__ ( self ) -> List[Any]:
lowercase_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ : List[Any] = 3
lowercase_ : List[Any] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
lowercase_ : Optional[int] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowercase_ : Tuple = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
lowercase_ : Optional[int] = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes[:-1]:
lowercase_ : Tuple = True
self.check_hidden_states_output(_lowercase , _lowercase , _lowercase , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase_ : int = True
self.check_hidden_states_output(_lowercase , _lowercase , _lowercase , (padded_height, padded_width) )
@slow
def lowerCamelCase__ ( self ) -> str:
for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ : Tuple = FocalNetModel.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
def lowerCamelCase__ ( self ) -> str:
lowercase_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ : int = _config_zero_init(_lowercase )
for model_class in self.all_model_classes:
lowercase_ : Dict = model_class(config=_lowercase )
for name, param in model.named_parameters():
if "embeddings" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f"Parameter {name} of model {model_class} seems not properly initialized" , )
@require_vision
@require_torch
class __magic_name__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def lowerCamelCase__ ( self ) -> Union[str, Any]:
# TODO update organization
return AutoImageProcessor.from_pretrained('microsoft/focalnet-tiny' ) if is_vision_available() else None
@slow
def lowerCamelCase__ ( self ) -> Optional[int]:
lowercase_ : str = FocalNetForImageClassification.from_pretrained('microsoft/focalnet-tiny' ).to(_lowercase )
lowercase_ : Optional[int] = self.default_image_processor
lowercase_ : Tuple = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
lowercase_ : int = image_processor(images=_lowercase , return_tensors='pt' ).to(_lowercase )
# forward pass
with torch.no_grad():
lowercase_ : Union[str, Any] = model(**_lowercase )
# verify the logits
lowercase_ : Tuple = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _lowercase )
lowercase_ : Union[str, Any] = torch.tensor([0.21_66, -0.43_68, 0.21_91] ).to(_lowercase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _lowercase , atol=1E-4 ) )
self.assertTrue(outputs.logits.argmax(dim=-1 ).item() , 281 )
@require_torch
class __magic_name__ ( _A, unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = (FocalNetBackbone,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE_ : int = FocalNetConfig
SCREAMING_SNAKE_CASE_ : Optional[Any] = False
def lowerCamelCase__ ( self ) -> Optional[int]:
lowercase_ : Any = FocalNetModelTester(self )
| 712 |
'''simple docstring'''
def _UpperCAmelCase ( a : str , a : str ) -> float:
"""simple docstring"""
def get_matched_characters(a : str , a : str ) -> str:
lowercase_ : Union[str, Any] = []
lowercase_ : Tuple = min(len(_stra ) , len(_stra ) ) // 2
for i, l in enumerate(_stra ):
lowercase_ : Optional[int] = int(max(0 , i - limit ) )
lowercase_ : Union[str, Any] = int(min(i + limit + 1 , len(_stra ) ) )
if l in _stra[left:right]:
matched.append(a )
lowercase_ : Union[str, Any] = f"{_stra[0:_stra.index(a )]} {_stra[_stra.index(a ) + 1:]}"
return "".join(a )
# matching characters
lowercase_ : Union[str, Any] = get_matched_characters(a , a )
lowercase_ : Optional[Any] = get_matched_characters(a , a )
lowercase_ : Optional[int] = len(a )
# transposition
lowercase_ : Dict = (
len([(ca, ca) for ca, ca in zip(a , a ) if ca != ca] ) // 2
)
if not match_count:
lowercase_ : List[str] = 0.0
else:
lowercase_ : Any = (
1
/ 3
* (
match_count / len(a )
+ match_count / len(a )
+ (match_count - transpositions) / match_count
)
)
# common prefix up to 4 characters
lowercase_ : Optional[Any] = 0
for ca, ca in zip(stra[:4] , stra[:4] ):
if ca == ca:
prefix_len += 1
else:
break
return jaro + 0.1 * prefix_len * (1 - jaro)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(jaro_winkler("hello", "world"))
| 7 | 0 |
import copy
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
A: List[Any] = logging.get_logger(__name__)
A: Optional[Any] = {
"microsoft/conditional-detr-resnet-50": (
"https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json"
),
}
class __magic_name__ ( __lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = 'conditional_detr'
SCREAMING_SNAKE_CASE_ : Dict = ['past_key_values']
SCREAMING_SNAKE_CASE_ : Optional[Any] = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self , _lowercase=True , _lowercase=None , _lowercase=3 , _lowercase=300 , _lowercase=6 , _lowercase=2048 , _lowercase=8 , _lowercase=6 , _lowercase=2048 , _lowercase=8 , _lowercase=0.0 , _lowercase=0.0 , _lowercase=True , _lowercase="relu" , _lowercase=256 , _lowercase=0.1 , _lowercase=0.0 , _lowercase=0.0 , _lowercase=0.02 , _lowercase=1.0 , _lowercase=False , _lowercase="sine" , _lowercase="resnet50" , _lowercase=True , _lowercase=False , _lowercase=2 , _lowercase=5 , _lowercase=2 , _lowercase=1 , _lowercase=1 , _lowercase=2 , _lowercase=5 , _lowercase=2 , _lowercase=0.25 , **_lowercase , ) -> Optional[Any]:
if backbone_config is not None and use_timm_backbone:
raise ValueError('You can\'t specify both `backbone_config` and `use_timm_backbone`.' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.' )
lowercase_ : Union[str, Any] = CONFIG_MAPPING['resnet'](out_features=['stage4'] )
elif isinstance(__A , __A ):
lowercase_ : List[Any] = backbone_config.get('model_type' )
lowercase_ : int = CONFIG_MAPPING[backbone_model_type]
lowercase_ : Dict = config_class.from_dict(__A )
lowercase_ : Tuple = use_timm_backbone
lowercase_ : Tuple = backbone_config
lowercase_ : Optional[int] = num_channels
lowercase_ : Optional[Any] = num_queries
lowercase_ : Dict = d_model
lowercase_ : List[Any] = encoder_ffn_dim
lowercase_ : Optional[Any] = encoder_layers
lowercase_ : Any = encoder_attention_heads
lowercase_ : Union[str, Any] = decoder_ffn_dim
lowercase_ : Optional[Any] = decoder_layers
lowercase_ : Dict = decoder_attention_heads
lowercase_ : str = dropout
lowercase_ : Union[str, Any] = attention_dropout
lowercase_ : Union[str, Any] = activation_dropout
lowercase_ : Tuple = activation_function
lowercase_ : int = init_std
lowercase_ : Any = init_xavier_std
lowercase_ : Optional[Any] = encoder_layerdrop
lowercase_ : Tuple = decoder_layerdrop
lowercase_ : int = encoder_layers
lowercase_ : Tuple = auxiliary_loss
lowercase_ : Optional[int] = position_embedding_type
lowercase_ : List[str] = backbone
lowercase_ : List[str] = use_pretrained_backbone
lowercase_ : str = dilation
# Hungarian matcher
lowercase_ : Optional[Any] = class_cost
lowercase_ : Optional[Any] = bbox_cost
lowercase_ : Optional[int] = giou_cost
# Loss coefficients
lowercase_ : List[Any] = mask_loss_coefficient
lowercase_ : List[Any] = dice_loss_coefficient
lowercase_ : str = cls_loss_coefficient
lowercase_ : int = bbox_loss_coefficient
lowercase_ : Any = giou_loss_coefficient
lowercase_ : Optional[int] = focal_alpha
super().__init__(is_encoder_decoder=__A , **__A )
@property
def lowerCamelCase__ ( self ) -> int:
return self.encoder_attention_heads
@property
def lowerCamelCase__ ( self ) -> int:
return self.d_model
def lowerCamelCase__ ( self ) -> List[Any]:
lowercase_ : Dict = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
lowercase_ : List[Any] = self.backbone_config.to_dict()
lowercase_ : Any = self.__class__.model_type
return output
class __magic_name__ ( __lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = version.parse('1.11' )
@property
def lowerCamelCase__ ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
('pixel_mask', {0: 'batch'}),
] )
@property
def lowerCamelCase__ ( self ) -> float:
return 1E-5
@property
def lowerCamelCase__ ( self ) -> int:
return 12
| 713 |
'''simple docstring'''
from __future__ import annotations
def _UpperCAmelCase ( a : int = 4 ) -> list[list[int]]:
"""simple docstring"""
lowercase_ : Tuple = abs(a ) or 4
return [[1 + x + y * row_size for x in range(a )] for y in range(a )]
def _UpperCAmelCase ( a : list[list[int]] ) -> list[list[int]]:
"""simple docstring"""
return reverse_row(transpose(a ) )
# OR.. transpose(reverse_column(matrix))
def _UpperCAmelCase ( a : list[list[int]] ) -> list[list[int]]:
"""simple docstring"""
return reverse_row(reverse_column(a ) )
# OR.. reverse_column(reverse_row(matrix))
def _UpperCAmelCase ( a : list[list[int]] ) -> list[list[int]]:
"""simple docstring"""
return reverse_column(transpose(a ) )
# OR.. transpose(reverse_row(matrix))
def _UpperCAmelCase ( a : list[list[int]] ) -> list[list[int]]:
"""simple docstring"""
lowercase_ : Any = [list(a ) for x in zip(*a )]
return matrix
def _UpperCAmelCase ( a : list[list[int]] ) -> list[list[int]]:
"""simple docstring"""
lowercase_ : List[str] = matrix[::-1]
return matrix
def _UpperCAmelCase ( a : list[list[int]] ) -> list[list[int]]:
"""simple docstring"""
lowercase_ : str = [x[::-1] for x in matrix]
return matrix
def _UpperCAmelCase ( a : list[list[int]] ) -> None:
"""simple docstring"""
for i in matrix:
print(*a )
if __name__ == "__main__":
A: Dict = make_matrix()
print("\norigin:\n")
print_matrix(matrix)
print("\nrotate 90 counterclockwise:\n")
print_matrix(rotate_aa(matrix))
A: List[Any] = make_matrix()
print("\norigin:\n")
print_matrix(matrix)
print("\nrotate 180:\n")
print_matrix(rotate_aaa(matrix))
A: List[str] = make_matrix()
print("\norigin:\n")
print_matrix(matrix)
print("\nrotate 270 counterclockwise:\n")
print_matrix(rotate_aaa(matrix))
| 7 | 0 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
A: Dict = "▁"
A: Any = {"vocab_file": "spiece.model"}
A: List[Any] = {
"vocab_file": {"google/pegasus-xsum": "https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model"}
}
A: Tuple = {
"google/pegasus-xsum": 5_1_2,
}
A: Optional[int] = logging.get_logger(__name__)
class __magic_name__ ( UpperCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ : str = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ : List[Any] = ['input_ids', 'attention_mask']
def __init__( self , _lowercase , _lowercase="<pad>" , _lowercase="</s>" , _lowercase="<unk>" , _lowercase="<mask_2>" , _lowercase="<mask_1>" , _lowercase=None , _lowercase=103 , _lowercase = None , **_lowercase , ) -> None:
lowercase_ : Any = offset
if additional_special_tokens is not None:
if not isinstance(__A , __A ):
raise TypeError(
f"additional_special_tokens should be of type {type(__A )}, but is"
f" {type(__A )}" )
lowercase_ : List[str] = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
f"<unk_{i}>" for i in range(len(__A ) , self.offset - 1 )
]
if len(set(__A ) ) != len(__A ):
raise ValueError(
'Please make sure that the provided additional_special_tokens do not contain an incorrectly'
f" shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}." )
lowercase_ : Optional[int] = additional_special_tokens_extended
else:
lowercase_ : Optional[int] = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [f"<unk_{i}>" for i in range(2 , self.offset )]
lowercase_ : List[str] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=__A , unk_token=__A , mask_token=__A , pad_token=__A , mask_token_sent=__A , offset=__A , additional_special_tokens=__A , sp_model_kwargs=self.sp_model_kwargs , **__A , )
lowercase_ : Optional[Any] = mask_token_sent
lowercase_ : str = vocab_file
lowercase_ : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__A )
# add special tokens to encoder dict
lowercase_ : List[Any] = {
0: self.pad_token,
1: self.eos_token,
}
if self.mask_token_sent is not None:
self.encoder.update(
{
2: self.mask_token_sent,
3: self.mask_token,
} )
if self.offset > 0:
# entries 2-104 are only used for pretraining and called <mask_1>, <mask_2>, unk_2, ...unk_102
# mask_token_sent is already added to list -> so start at 1
self.encoder.update({i + 3: additional_special_tokens[i] for i in range(1 , self.offset - 1 )} )
lowercase_ : Any = {v: k for k, v in self.encoder.items()}
@property
def lowerCamelCase__ ( self ) -> int:
return len(self.sp_model ) + self.offset
def lowerCamelCase__ ( self ) -> Dict[str, int]:
lowercase_ : Tuple = {self.convert_ids_to_tokens(__A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> Union[str, Any]:
lowercase_ : str = self.__dict__.copy()
lowercase_ : int = None
return state
def __setstate__( self , _lowercase ) -> str:
lowercase_ : Union[str, Any] = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
lowercase_ : Any = {}
lowercase_ : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowerCamelCase__ ( self , _lowercase ) -> List[str]:
return self.sp_model.encode(__A , out_type=__A )
def lowerCamelCase__ ( self , _lowercase ) -> int:
if token in self.decoder:
return self.decoder[token]
elif token in self.added_tokens_decoder:
return self.added_tokens_decoder[token]
lowercase_ : str = self.sp_model.piece_to_id(__A )
return sp_id + self.offset
def lowerCamelCase__ ( self , _lowercase ) -> str:
if index in self.encoder:
return self.encoder[index]
elif index in self.added_tokens_encoder:
return self.added_tokens_encoder[index]
else:
lowercase_ : str = self.sp_model.IdToPiece(index - self.offset )
return token
def lowerCamelCase__ ( self , _lowercase ) -> Optional[Any]:
lowercase_ : List[str] = []
lowercase_ : str = ''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(__A ) + token
lowercase_ : int = []
else:
current_sub_tokens.append(__A )
out_string += self.sp_model.decode(__A )
return out_string.strip()
def lowerCamelCase__ ( self , _lowercase=False ) -> Any:
return 1
def lowerCamelCase__ ( self , _lowercase ) -> List[Any]:
lowercase_ : Optional[Any] = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
return [1 if x in all_special_ids else 0 for x in seq]
def lowerCamelCase__ ( self , _lowercase , _lowercase = None , _lowercase = False ) -> List[int]:
if already_has_special_tokens:
return self._special_token_mask(__A )
elif token_ids_a is None:
return self._special_token_mask(__A ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def lowerCamelCase__ ( self , _lowercase , _lowercase=None ) -> List[int]:
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def lowerCamelCase__ ( self , _lowercase , _lowercase = None ) -> Tuple[str]:
if not os.path.isdir(__A ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
lowercase_ : Tuple = os.path.join(
__A , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __A )
elif not os.path.isfile(self.vocab_file ):
with open(__A , 'wb' ) as fi:
lowercase_ : Any = self.sp_model.serialized_model_proto()
fi.write(__A )
return (out_vocab_file,) | 714 |
'''simple docstring'''
# Usage:
# ./gen-card-facebook-wmt19.py
import os
from pathlib import Path
def _UpperCAmelCase ( a : Dict , a : Optional[int] , a : Tuple ) -> Optional[int]:
"""simple docstring"""
lowercase_ : Any = {
'en': 'Machine learning is great, isn\'t it?',
'ru': 'Машинное обучение - это здорово, не так ли?',
'de': 'Maschinelles Lernen ist großartig, oder?',
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
lowercase_ : List[str] = {
'ru-en': ['[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)', '39.20'],
'en-ru': ['[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)', '33.47'],
'en-de': ['[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)', '42.83'],
'de-en': ['[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)', '41.35'],
}
lowercase_ : Optional[Any] = f"{src_lang}-{tgt_lang}"
lowercase_ : Optional[Any] = f"\n---\nlanguage: \n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt19\n- facebook\nlicense: apache-2.0\ndatasets:\n- wmt19\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.\n\nFor more details, please see, [Facebook FAIR's WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).\n\nThe abbreviation FSMT stands for FairSeqMachineTranslation\n\nAll four models are available:\n\n* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)\n* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)\n* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)\n* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = \"facebook/wmt19-{src_lang}-{tgt_lang}\"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = \"{texts[src_lang]}\"\ninput_ids = tokenizer.encode(input, return_tensors=\"pt\")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n- The original (and this ported model) doesn't seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)\n\n## Training data\n\nPretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).\n\n## Eval results\n\npair | fairseq | transformers\n-------|---------|----------\n{pair} | {scores[pair][0]} | {scores[pair][1]}\n\nThe score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn't support:\n- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).\n- re-ranking\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=15\nmkdir -p $DATA_DIR\nsacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\nnote: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt19/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)\n\n\n### BibTeX entry and citation info\n\n```bibtex\n@inproceedings{{...,\n year={{2020}},\n title={{Facebook FAIR's WMT19 News Translation Task Submission}},\n author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},\n booktitle={{Proc. of WMT}},\n}}\n```\n\n\n## TODO\n\n- port model ensemble (fairseq uses 4 model checkpoints)\n\n"
os.makedirs(a , exist_ok=a )
lowercase_ : int = os.path.join(a , 'README.md' )
print(f"Generating {path}" )
with open(a , 'w' , encoding='utf-8' ) as f:
f.write(a )
# make sure we are under the root of the project
A: List[str] = Path(__file__).resolve().parent.parent.parent
A: List[str] = repo_dir / "model_cards"
for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
A , A , A: Any = model_name.split("-")
A: int = model_cards_dir / "facebook" / model_name
write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
| 7 | 0 |
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
A: Optional[Any] = ['''\nclass''', '''\ndef''', '''\n#''', '''\n@''', '''\nprint''', '''\nif''']
class __magic_name__ ( __UpperCAmelCase ):
"""simple docstring"""
def __init__( self , _lowercase , _lowercase , _lowercase=None , _lowercase=1 ) -> List[Any]:
lowercase_ : List[Any] = tokenizer
lowercase_ : int = dataset
lowercase_ : str = len(__SCREAMING_SNAKE_CASE ) if n_tasks is None else n_tasks
lowercase_ : int = n_copies
def __iter__( self ) -> str:
lowercase_ : List[Any] = []
for task in range(self.n_tasks ):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]['prompt'].strip() )
lowercase_ : str = self.tokenizer(__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , return_tensors='pt' )
for task in range(self.n_tasks ):
for _ in range(self.n_copies ):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class __magic_name__ ( __UpperCAmelCase ):
"""simple docstring"""
def __init__( self , _lowercase , _lowercase , _lowercase ) -> Tuple:
lowercase_ : List[str] = start_length
lowercase_ : List[Any] = eof_strings
lowercase_ : str = tokenizer
def __call__( self , _lowercase , _lowercase , **_lowercase ) -> str:
lowercase_ : int = self.tokenizer.batch_decode(input_ids[:, self.start_length :] )
lowercase_ : List[str] = []
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) )
return all(__SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( a : List[Any] ) -> Optional[Any]:
"""simple docstring"""
lowercase_ : Optional[Any] = re.split('(%s)' % '|'.join(_UpperCAmelCase ) , _UpperCAmelCase )
# last string should be ""
return "".join(string_list[:-2] )
def _UpperCAmelCase ( a : List[str] , a : Optional[int] , a : int , a : Union[str, Any] , a : Tuple , a : List[Any]=2_0 , **a : str ) -> List[Any]:
"""simple docstring"""
lowercase_ : Optional[int] = defaultdict(_UpperCAmelCase ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(_UpperCAmelCase ) ):
with torch.no_grad():
lowercase_ : int = batch['ids'].shape[-1]
lowercase_ : Tuple = accelerator.unwrap_model(_UpperCAmelCase ).generate(
input_ids=batch['ids'][:, : batch['input_len']] , num_return_sequences=_UpperCAmelCase , **_UpperCAmelCase )
# each task is generated batch_size times
lowercase_ : Union[str, Any] = batch['task_id'].repeat(_UpperCAmelCase )
lowercase_ : Any = accelerator.pad_across_processes(
_UpperCAmelCase , dim=1 , pad_index=tokenizer.pad_token_id )
lowercase_ , lowercase_ : List[str] = accelerator.gather((generated_tokens, generated_tasks) )
lowercase_ : Any = generated_tokens.cpu().numpy()
lowercase_ : Optional[Any] = generated_tasks.cpu().numpy()
for task, generated_tokens in zip(_UpperCAmelCase , _UpperCAmelCase ):
gen_token_dict[task].append(_UpperCAmelCase )
lowercase_ : Optional[Any] = [[] for _ in range(_UpperCAmelCase )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
lowercase_ : Dict = tokenizer.decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase , clean_up_tokenization_spaces=_UpperCAmelCase )
code_gens[task].append(remove_last_block(_UpperCAmelCase ) )
return code_gens
def _UpperCAmelCase ( ) -> Tuple:
"""simple docstring"""
# Setup configuration
lowercase_ : Tuple = HfArgumentParser(_UpperCAmelCase )
lowercase_ : Tuple = parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
lowercase_ : Tuple = args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
lowercase_ : Tuple = 'false'
if args.num_workers is None:
lowercase_ : Optional[int] = multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
lowercase_ : Tuple = Accelerator()
set_seed(args.seed , device_specific=_UpperCAmelCase )
# Load model and tokenizer
lowercase_ : str = AutoTokenizer.from_pretrained(args.model_ckpt )
lowercase_ : List[Any] = tokenizer.eos_token
lowercase_ : Optional[int] = AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
lowercase_ : int = {
'do_sample': args.do_sample,
'temperature': args.temperature,
'max_new_tokens': args.max_new_tokens,
'top_p': args.top_p,
'top_k': args.top_k,
'stopping_criteria': StoppingCriteriaList([EndOfFunctionCriteria(0 , _UpperCAmelCase , _UpperCAmelCase )] ),
}
# Load evaluation dataset and metric
lowercase_ : Tuple = load_dataset('openai_humaneval' )
lowercase_ : str = load_metric('code_eval' )
lowercase_ : int = args.num_tasks if args.num_tasks is not None else len(human_eval['test'] )
lowercase_ : Union[str, Any] = args.n_samples // args.batch_size
lowercase_ : List[Any] = TokenizedDataset(_UpperCAmelCase , human_eval['test'] , n_copies=_UpperCAmelCase , n_tasks=_UpperCAmelCase )
# do not confuse args.batch_size, which is actually the num_return_sequences
lowercase_ : str = DataLoader(_UpperCAmelCase , batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
lowercase_ : Union[str, Any] = code_eval_metric.compute(references=[''] , predictions=[['']] )
except ValueError as exception:
print(
'Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL="1"`'
' flag to enable code evaluation.' )
raise exception
lowercase_ , lowercase_ : str = accelerator.prepare(_UpperCAmelCase , _UpperCAmelCase )
lowercase_ : List[Any] = complete_code(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , n_tasks=_UpperCAmelCase , batch_size=args.batch_size , **_UpperCAmelCase , )
if accelerator.is_main_process:
lowercase_ : Dict = []
for task in tqdm(range(_UpperCAmelCase ) ):
lowercase_ : int = human_eval['test'][task]['test']
lowercase_ : Optional[Any] = f"check({human_eval['test'][task]['entry_point']})"
references.append('\n' + test_func + '\n' + entry_point )
# Evaluate completions with "code_eval" metric
lowercase_ , lowercase_ : Dict = code_eval_metric.compute(
references=_UpperCAmelCase , predictions=_UpperCAmelCase , num_workers=args.num_workers )
print(f"Results: {pass_at_k}" )
# Save results to json file
with open(args.output_file , 'w' ) as fp:
json.dump(_UpperCAmelCase , _UpperCAmelCase )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main()
| 715 |
'''simple docstring'''
import json
import logging
import os
import socket
import git
import numpy as np
import torch
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - PID: %(process)d - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
)
A: Tuple = logging.getLogger(__name__)
def _UpperCAmelCase ( a : str ) -> List[Any]:
"""simple docstring"""
lowercase_ : List[str] = git.Repo(search_parent_directories=a )
lowercase_ : Union[str, Any] = {
'repo_id': str(a ),
'repo_sha': str(repo.head.object.hexsha ),
'repo_branch': str(repo.active_branch ),
}
with open(os.path.join(a , 'git_log.json' ) , 'w' ) as f:
json.dump(a , a , indent=4 )
def _UpperCAmelCase ( a : str ) -> Union[str, Any]:
"""simple docstring"""
if params.n_gpu <= 0:
lowercase_ : int = 0
lowercase_ : Union[str, Any] = -1
lowercase_ : List[str] = True
lowercase_ : Optional[Any] = False
return
assert torch.cuda.is_available()
logger.info('Initializing GPUs' )
if params.n_gpu > 1:
assert params.local_rank != -1
lowercase_ : Dict = int(os.environ['WORLD_SIZE'] )
lowercase_ : Union[str, Any] = int(os.environ['N_GPU_NODE'] )
lowercase_ : Optional[int] = int(os.environ['RANK'] )
# number of nodes / node ID
lowercase_ : int = params.world_size // params.n_gpu_per_node
lowercase_ : str = params.global_rank // params.n_gpu_per_node
lowercase_ : Dict = True
assert params.n_nodes == int(os.environ['N_NODES'] )
assert params.node_id == int(os.environ['NODE_RANK'] )
# local job (single GPU)
else:
assert params.local_rank == -1
lowercase_ : str = 1
lowercase_ : Dict = 0
lowercase_ : Tuple = 0
lowercase_ : List[Any] = 0
lowercase_ : int = 1
lowercase_ : Tuple = 1
lowercase_ : str = False
# sanity checks
assert params.n_nodes >= 1
assert 0 <= params.node_id < params.n_nodes
assert 0 <= params.local_rank <= params.global_rank < params.world_size
assert params.world_size == params.n_nodes * params.n_gpu_per_node
# define whether this is the master process / if we are in multi-node distributed mode
lowercase_ : List[str] = params.node_id == 0 and params.local_rank == 0
lowercase_ : Optional[Any] = params.n_nodes > 1
# summary
lowercase_ : int = f"--- Global rank: {params.global_rank} - "
logger.info(PREFIX + 'Number of nodes: %i' % params.n_nodes )
logger.info(PREFIX + 'Node ID : %i' % params.node_id )
logger.info(PREFIX + 'Local rank : %i' % params.local_rank )
logger.info(PREFIX + 'World size : %i' % params.world_size )
logger.info(PREFIX + 'GPUs per node : %i' % params.n_gpu_per_node )
logger.info(PREFIX + 'Master : %s' % str(params.is_master ) )
logger.info(PREFIX + 'Multi-node : %s' % str(params.multi_node ) )
logger.info(PREFIX + 'Multi-GPU : %s' % str(params.multi_gpu ) )
logger.info(PREFIX + 'Hostname : %s' % socket.gethostname() )
# set GPU device
torch.cuda.set_device(params.local_rank )
# initialize multi-GPU
if params.multi_gpu:
logger.info('Initializing PyTorch distributed' )
torch.distributed.init_process_group(
init_method='env://' , backend='nccl' , )
def _UpperCAmelCase ( a : Dict ) -> Optional[int]:
"""simple docstring"""
np.random.seed(args.seed )
torch.manual_seed(args.seed )
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed )
| 7 | 0 |
'''simple docstring'''
import unittest
from transformers import load_tool
from transformers.utils import is_torch_available
if is_torch_available():
import torch
from transformers.testing_utils import require_torch
from .test_tools_common import ToolTesterMixin
@require_torch
class __magic_name__ ( unittest.TestCase, UpperCamelCase_ ):
"""simple docstring"""
def lowerCamelCase__ ( self ) -> List[Any]:
lowercase_ : List[str] = load_tool('text-to-speech' )
self.tool.setup()
def lowerCamelCase__ ( self ) -> Optional[Any]:
torch.manual_seed(0 )
lowercase_ : Dict = self.tool('hey' )
lowercase_ : List[Any] = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0_00_59_66_66_88_32_11_58_29, -0.0_00_36_57_64_01_90_79_50_64, -0.00_01_34_39_50_27_99_88_34_85] ) , ) )
def lowerCamelCase__ ( self ) -> List[Any]:
torch.manual_seed(0 )
lowercase_ : Any = self.tool('hey' )
lowercase_ : Any = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0_00_59_66_66_88_32_11_58_29, -0.0_00_36_57_64_01_90_79_50_64, -0.00_01_34_39_50_27_99_88_34_85] ) , ) )
| 716 |
'''simple docstring'''
import os
from distutils.util import strtobool
def _UpperCAmelCase ( a : Any , a : int ) -> Any:
"""simple docstring"""
for e in env_keys:
lowercase_ : Optional[Any] = int(os.environ.get(a , -1 ) )
if val >= 0:
return val
return default
def _UpperCAmelCase ( a : List[Any] , a : Dict=False ) -> Optional[Any]:
"""simple docstring"""
lowercase_ : Optional[int] = os.environ.get(a , str(a ) )
return strtobool(a ) == 1 # As its name indicates `strtobool` actually returns an int...
def _UpperCAmelCase ( a : List[Any] , a : Dict="no" ) -> str:
"""simple docstring"""
lowercase_ : List[Any] = os.environ.get(a , str(a ) )
return value
| 7 | 0 |
'''simple docstring'''
import importlib.util
import json
import os
import warnings
from dataclasses import dataclass, field
import torch
from ..training_args import TrainingArguments
from ..utils import cached_property, is_sagemaker_dp_enabled, logging
A: Optional[Any] = logging.get_logger(__name__)
def _UpperCAmelCase ( ) -> str:
"""simple docstring"""
lowercase_ : Union[str, Any] = os.getenv('SM_HP_MP_PARAMETERS' , '{}' )
try:
# Parse it and check the field "partitions" is included, it is required for model parallel.
lowercase_ : int = json.loads(_lowercase )
if "partitions" not in smp_options:
return False
except json.JSONDecodeError:
return False
# Get the sagemaker specific framework parameters from mpi_options variable.
lowercase_ : Optional[Any] = os.getenv('SM_FRAMEWORK_PARAMS' , '{}' )
try:
# Parse it and check the field "sagemaker_distributed_dataparallel_enabled".
lowercase_ : Dict = json.loads(_lowercase )
if not mpi_options.get('sagemaker_mpi_enabled' , _lowercase ):
return False
except json.JSONDecodeError:
return False
# Lastly, check if the `smdistributed` module is present.
return importlib.util.find_spec('smdistributed' ) is not None
if is_sagemaker_model_parallel_available():
import smdistributed.modelparallel.torch as smp
smp.init()
@dataclass
class __magic_name__ ( UpperCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = field(
default='', metadata={'help': 'Used by the SageMaker launcher to send mp-specific args. Ignored in SageMakerTrainer'}, )
def lowerCamelCase__ ( self ) -> Tuple:
super().__post_init__()
warnings.warn(
'`SageMakerTrainingArguments` is deprecated and will be removed in v5 of Transformers. You can use '
'`TrainingArguments` instead.' , __A , )
@cached_property
def lowerCamelCase__ ( self ) -> "torch.device":
logger.info('PyTorch: setting up devices' )
if torch.distributed.is_available() and torch.distributed.is_initialized() and self.local_rank == -1:
logger.warning(
'torch.distributed process group is initialized, but local_rank == -1. '
'In order to use Torch DDP, launch your script with `python -m torch.distributed.launch' )
if self.no_cuda:
lowercase_ : List[Any] = torch.device('cpu' )
lowercase_ : str = 0
elif is_sagemaker_model_parallel_available():
lowercase_ : Tuple = smp.local_rank()
lowercase_ : List[Any] = torch.device('cuda' , __A )
lowercase_ : Union[str, Any] = 1
elif is_sagemaker_dp_enabled():
import smdistributed.dataparallel.torch.torch_smddp # noqa: F401
torch.distributed.init_process_group(backend='smddp' , timeout=self.ddp_timeout_delta )
lowercase_ : int = int(os.getenv('SMDATAPARALLEL_LOCAL_RANK' ) )
lowercase_ : Tuple = torch.device('cuda' , self.local_rank )
lowercase_ : Optional[int] = 1
elif self.local_rank == -1:
# if n_gpu is > 1 we'll use nn.DataParallel.
# If you only want to use a specific subset of GPUs use `CUDA_VISIBLE_DEVICES=0`
# Explicitly set CUDA to the first (index 0) CUDA device, otherwise `set_device` will
# trigger an error that a device index is missing. Index 0 takes into account the
# GPUs available in the environment, so `CUDA_VISIBLE_DEVICES=1,2` with `cuda:0`
# will use the first GPU in that env, i.e. GPU#1
lowercase_ : Union[str, Any] = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu' )
# Sometimes the line in the postinit has not been run before we end up here, so just checking we're not at
# the default value.
lowercase_ : Dict = torch.cuda.device_count()
else:
# Here, we'll use torch.distributed.
# Initializes the distributed backend which will take care of synchronizing nodes/GPUs
if not torch.distributed.is_initialized():
torch.distributed.init_process_group(backend='nccl' , timeout=self.ddp_timeout_delta )
lowercase_ : Dict = torch.device('cuda' , self.local_rank )
lowercase_ : int = 1
if device.type == "cuda":
torch.cuda.set_device(__A )
return device
@property
def lowerCamelCase__ ( self ) -> Tuple:
if is_sagemaker_model_parallel_available():
return smp.dp_size()
return super().world_size
@property
def lowerCamelCase__ ( self ) -> Any:
return not is_sagemaker_model_parallel_available()
@property
def lowerCamelCase__ ( self ) -> Optional[Any]:
return False
| 717 |
'''simple docstring'''
from typing import Dict, Optional
import numpy as np
import datasets
A: int = "\nIoU is the area of overlap between the predicted segmentation and the ground truth divided by the area of union\nbetween the predicted segmentation and the ground truth. For binary (two classes) or multi-class segmentation,\nthe mean IoU of the image is calculated by taking the IoU of each class and averaging them.\n"
A: List[str] = "\nArgs:\n predictions (`List[ndarray]`):\n List of predicted segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n references (`List[ndarray]`):\n List of ground truth segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n num_labels (`int`):\n Number of classes (categories).\n ignore_index (`int`):\n Index that will be ignored during evaluation.\n nan_to_num (`int`, *optional*):\n If specified, NaN values will be replaced by the number defined by the user.\n label_map (`dict`, *optional*):\n If specified, dictionary mapping old label indices to new label indices.\n reduce_labels (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background,\n and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by 255.\n\nReturns:\n `Dict[str, float | ndarray]` comprising various elements:\n - *mean_iou* (`float`):\n Mean Intersection-over-Union (IoU averaged over all categories).\n - *mean_accuracy* (`float`):\n Mean accuracy (averaged over all categories).\n - *overall_accuracy* (`float`):\n Overall accuracy on all images.\n - *per_category_accuracy* (`ndarray` of shape `(num_labels,)`):\n Per category accuracy.\n - *per_category_iou* (`ndarray` of shape `(num_labels,)`):\n Per category IoU.\n\nExamples:\n\n >>> import numpy as np\n\n >>> mean_iou = datasets.load_metric(\"mean_iou\")\n\n >>> # suppose one has 3 different segmentation maps predicted\n >>> predicted_1 = np.array([[1, 2], [3, 4], [5, 255]])\n >>> actual_1 = np.array([[0, 3], [5, 4], [6, 255]])\n\n >>> predicted_2 = np.array([[2, 7], [9, 2], [3, 6]])\n >>> actual_2 = np.array([[1, 7], [9, 2], [3, 6]])\n\n >>> predicted_3 = np.array([[2, 2, 3], [8, 2, 4], [3, 255, 2]])\n >>> actual_3 = np.array([[1, 2, 2], [8, 2, 1], [3, 255, 1]])\n\n >>> predicted = [predicted_1, predicted_2, predicted_3]\n >>> ground_truth = [actual_1, actual_2, actual_3]\n\n >>> results = mean_iou.compute(predictions=predicted, references=ground_truth, num_labels=10, ignore_index=255, reduce_labels=False)\n >>> print(results) # doctest: +NORMALIZE_WHITESPACE\n {'mean_iou': 0.47750000000000004, 'mean_accuracy': 0.5916666666666666, 'overall_accuracy': 0.5263157894736842, 'per_category_iou': array([0. , 0. , 0.375, 0.4 , 0.5 , 0. , 0.5 , 1. , 1. , 1. ]), 'per_category_accuracy': array([0. , 0. , 0.75 , 0.66666667, 1. , 0. , 0.5 , 1. , 1. , 1. ])}\n"
A: Union[str, Any] = "\\n@software{MMSegmentation_Contributors_OpenMMLab_Semantic_Segmentation_2020,\nauthor = {{MMSegmentation Contributors}},\nlicense = {Apache-2.0},\nmonth = {7},\ntitle = {{OpenMMLab Semantic Segmentation Toolbox and Benchmark}},\nurl = {https://github.com/open-mmlab/mmsegmentation},\nyear = {2020}\n}"
def _UpperCAmelCase ( a : str , a : Union[str, Any] , a : Dict , a : bool , a : Optional[Dict[int, int]] = None , a : bool = False , ) -> str:
"""simple docstring"""
if label_map is not None:
for old_id, new_id in label_map.items():
lowercase_ : Union[str, Any] = new_id
# turn into Numpy arrays
lowercase_ : List[Any] = np.array(a )
lowercase_ : Optional[Any] = np.array(a )
if reduce_labels:
lowercase_ : Any = 2_5_5
lowercase_ : Dict = label - 1
lowercase_ : List[Any] = 2_5_5
lowercase_ : Any = label != ignore_index
lowercase_ : List[Any] = np.not_equal(a , a )
lowercase_ : Optional[int] = pred_label[mask]
lowercase_ : Union[str, Any] = np.array(a )[mask]
lowercase_ : Optional[int] = pred_label[pred_label == label]
lowercase_ : Optional[int] = np.histogram(a , bins=a , range=(0, num_labels - 1) )[0]
lowercase_ : Optional[int] = np.histogram(a , bins=a , range=(0, num_labels - 1) )[0]
lowercase_ : Dict = np.histogram(a , bins=a , range=(0, num_labels - 1) )[0]
lowercase_ : Optional[Any] = area_pred_label + area_label - area_intersect
return area_intersect, area_union, area_pred_label, area_label
def _UpperCAmelCase ( a : int , a : Optional[Any] , a : Optional[int] , a : bool , a : Optional[Dict[int, int]] = None , a : bool = False , ) -> Dict:
"""simple docstring"""
lowercase_ : Dict = np.zeros((num_labels,) , dtype=np.floataa )
lowercase_ : List[str] = np.zeros((num_labels,) , dtype=np.floataa )
lowercase_ : List[Any] = np.zeros((num_labels,) , dtype=np.floataa )
lowercase_ : str = np.zeros((num_labels,) , dtype=np.floataa )
for result, gt_seg_map in zip(a , a ):
lowercase_ , lowercase_ , lowercase_ , lowercase_ : Tuple = intersect_and_union(
a , a , a , a , a , a )
total_area_intersect += area_intersect
total_area_union += area_union
total_area_pred_label += area_pred_label
total_area_label += area_label
return total_area_intersect, total_area_union, total_area_pred_label, total_area_label
def _UpperCAmelCase ( a : Optional[Any] , a : List[str] , a : Optional[Any] , a : bool , a : Optional[int] = None , a : Optional[Dict[int, int]] = None , a : bool = False , ) -> Optional[int]:
"""simple docstring"""
lowercase_ , lowercase_ , lowercase_ , lowercase_ : Optional[Any] = total_intersect_and_union(
a , a , a , a , a , a )
# compute metrics
lowercase_ : str = {}
lowercase_ : str = total_area_intersect.sum() / total_area_label.sum()
lowercase_ : Optional[Any] = total_area_intersect / total_area_union
lowercase_ : List[Any] = total_area_intersect / total_area_label
lowercase_ : Any = np.nanmean(a )
lowercase_ : Optional[Any] = np.nanmean(a )
lowercase_ : int = all_acc
lowercase_ : Union[str, Any] = iou
lowercase_ : Optional[Any] = acc
if nan_to_num is not None:
lowercase_ : Optional[int] = {metric: np.nan_to_num(a , nan=a ) for metric, metric_value in metrics.items()}
return metrics
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class __magic_name__ ( datasets.Metric ):
"""simple docstring"""
def lowerCamelCase__ ( self ) -> int:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
# 1st Seq - height dim, 2nd - width dim
{
'predictions': datasets.Sequence(datasets.Sequence(datasets.Value('uint16' ) ) ),
'references': datasets.Sequence(datasets.Sequence(datasets.Value('uint16' ) ) ),
} ) , reference_urls=[
'https://github.com/open-mmlab/mmsegmentation/blob/71c201b1813267d78764f306a297ca717827c4bf/mmseg/core/evaluation/metrics.py'
] , )
def lowerCamelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase = None , _lowercase = None , _lowercase = False , ) -> Tuple:
lowercase_ : Optional[int] = mean_iou(
results=_lowercase , gt_seg_maps=_lowercase , num_labels=_lowercase , ignore_index=_lowercase , nan_to_num=_lowercase , label_map=_lowercase , reduce_labels=_lowercase , )
return iou_result
| 7 | 0 |
'''simple docstring'''
from . import (
albert,
align,
altclip,
audio_spectrogram_transformer,
auto,
autoformer,
bark,
bart,
barthez,
bartpho,
beit,
bert,
bert_generation,
bert_japanese,
bertweet,
big_bird,
bigbird_pegasus,
biogpt,
bit,
blenderbot,
blenderbot_small,
blip,
blip_a,
bloom,
bridgetower,
byta,
camembert,
canine,
chinese_clip,
clap,
clip,
clipseg,
codegen,
conditional_detr,
convbert,
convnext,
convnextva,
cpm,
cpmant,
ctrl,
cvt,
dataavec,
deberta,
deberta_va,
decision_transformer,
deformable_detr,
deit,
deprecated,
deta,
detr,
dialogpt,
dinat,
distilbert,
dit,
donut,
dpr,
dpt,
efficientformer,
efficientnet,
electra,
encodec,
encoder_decoder,
ernie,
ernie_m,
esm,
falcon,
flaubert,
flava,
fnet,
focalnet,
fsmt,
funnel,
git,
glpn,
gpta,
gpt_bigcode,
gpt_neo,
gpt_neox,
gpt_neox_japanese,
gpt_swa,
gptj,
gptsan_japanese,
graphormer,
groupvit,
herbert,
hubert,
ibert,
imagegpt,
informer,
instructblip,
jukebox,
layoutlm,
layoutlmva,
layoutlmva,
layoutxlm,
led,
levit,
lilt,
llama,
longformer,
longta,
luke,
lxmert,
mam_aaa,
marian,
markuplm,
maskaformer,
maskformer,
mbart,
mbartaa,
mega,
megatron_bert,
megatron_gpta,
mgp_str,
mluke,
mobilebert,
mobilenet_va,
mobilenet_va,
mobilevit,
mobilevitva,
mpnet,
mra,
mta,
musicgen,
mvp,
nat,
nezha,
nllb,
nllb_moe,
nystromformer,
oneformer,
open_llama,
openai,
opt,
owlvit,
pegasus,
pegasus_x,
perceiver,
phobert,
pixastruct,
plbart,
poolformer,
prophetnet,
qdqbert,
rag,
realm,
reformer,
regnet,
rembert,
resnet,
roberta,
roberta_prelayernorm,
roc_bert,
roformer,
rwkv,
sam,
segformer,
sew,
sew_d,
speech_encoder_decoder,
speech_to_text,
speech_to_text_a,
speechta,
splinter,
squeezebert,
swiftformer,
swin,
swinasr,
swinva,
switch_transformers,
ta,
table_transformer,
tapas,
time_series_transformer,
timesformer,
timm_backbone,
transfo_xl,
trocr,
tvlt,
umta,
unispeech,
unispeech_sat,
upernet,
videomae,
vilt,
vision_encoder_decoder,
vision_text_dual_encoder,
visual_bert,
vit,
vit_hybrid,
vit_mae,
vit_msn,
vivit,
wavaveca,
wavaveca_conformer,
wavaveca_phoneme,
wavaveca_with_lm,
wavlm,
whisper,
x_clip,
xglm,
xlm,
xlm_prophetnet,
xlm_roberta,
xlm_roberta_xl,
xlnet,
xmod,
yolos,
yoso,
)
| 718 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A: Dict = logging.get_logger(__name__)
A: Optional[Any] = {
"google/vit-base-patch16-224": "https://huggingface.co/vit-base-patch16-224/resolve/main/config.json",
# See all ViT models at https://huggingface.co/models?filter=vit
}
class __magic_name__ ( UpperCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = 'vit'
def __init__( self , _lowercase=768 , _lowercase=12 , _lowercase=12 , _lowercase=3072 , _lowercase="gelu" , _lowercase=0.0 , _lowercase=0.0 , _lowercase=0.02 , _lowercase=1E-1_2 , _lowercase=224 , _lowercase=16 , _lowercase=3 , _lowercase=True , _lowercase=16 , **_lowercase , ) -> List[str]:
super().__init__(**_lowercase )
lowercase_ : Optional[int] = hidden_size
lowercase_ : str = num_hidden_layers
lowercase_ : str = num_attention_heads
lowercase_ : int = intermediate_size
lowercase_ : List[Any] = hidden_act
lowercase_ : Any = hidden_dropout_prob
lowercase_ : List[str] = attention_probs_dropout_prob
lowercase_ : str = initializer_range
lowercase_ : List[str] = layer_norm_eps
lowercase_ : Any = image_size
lowercase_ : Tuple = patch_size
lowercase_ : Optional[Any] = num_channels
lowercase_ : str = qkv_bias
lowercase_ : List[str] = encoder_stride
class __magic_name__ ( UpperCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = version.parse('1.11' )
@property
def lowerCamelCase__ ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def lowerCamelCase__ ( self ) -> float:
return 1E-4
| 7 | 0 |
import os
import tempfile
import unittest
from transformers import NezhaConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
)
from transformers.models.nezha.modeling_nezha import NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST
class __magic_name__ :
"""simple docstring"""
def __init__( self , _lowercase , _lowercase=13 , _lowercase=7 , _lowercase=True , _lowercase=True , _lowercase=True , _lowercase=True , _lowercase=99 , _lowercase=32 , _lowercase=5 , _lowercase=4 , _lowercase=37 , _lowercase="gelu" , _lowercase=0.1 , _lowercase=0.1 , _lowercase=128 , _lowercase=32 , _lowercase=16 , _lowercase=2 , _lowercase=0.02 , _lowercase=3 , _lowercase=4 , _lowercase=None , ) -> Union[str, Any]:
lowercase_ : List[Any] = parent
lowercase_ : int = batch_size
lowercase_ : Union[str, Any] = seq_length
lowercase_ : List[str] = is_training
lowercase_ : Optional[Any] = use_input_mask
lowercase_ : str = use_token_type_ids
lowercase_ : Union[str, Any] = use_labels
lowercase_ : Optional[int] = vocab_size
lowercase_ : Optional[int] = hidden_size
lowercase_ : Any = num_hidden_layers
lowercase_ : Optional[Any] = num_attention_heads
lowercase_ : List[str] = intermediate_size
lowercase_ : Any = hidden_act
lowercase_ : Dict = hidden_dropout_prob
lowercase_ : str = attention_probs_dropout_prob
lowercase_ : Any = max_position_embeddings
lowercase_ : List[str] = type_vocab_size
lowercase_ : Dict = type_sequence_label_size
lowercase_ : Optional[int] = initializer_range
lowercase_ : str = num_labels
lowercase_ : Any = num_choices
lowercase_ : Union[str, Any] = scope
def lowerCamelCase__ ( self ) -> Any:
lowercase_ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase_ : Optional[int] = None
if self.use_input_mask:
lowercase_ : int = random_attention_mask([self.batch_size, self.seq_length] )
lowercase_ : str = None
if self.use_token_type_ids:
lowercase_ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase_ : Tuple = None
lowercase_ : int = None
lowercase_ : str = None
if self.use_labels:
lowercase_ : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase_ : str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase_ : List[str] = ids_tensor([self.batch_size] , self.num_choices )
lowercase_ : Dict = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCamelCase__ ( self ) -> str:
return NezhaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=A__ , initializer_range=self.initializer_range , )
def lowerCamelCase__ ( self ) -> Dict:
(
lowercase_
) : List[Any] = self.prepare_config_and_inputs()
lowercase_ : Union[str, Any] = True
lowercase_ : Tuple = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowercase_ : str = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def lowerCamelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) -> Dict:
lowercase_ : List[Any] = NezhaModel(config=A__ )
model.to(A__ )
model.eval()
lowercase_ : Union[str, Any] = model(A__ , attention_mask=A__ , token_type_ids=A__ )
lowercase_ : str = model(A__ , token_type_ids=A__ )
lowercase_ : Optional[int] = model(A__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowerCamelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , ) -> List[str]:
lowercase_ : List[str] = True
lowercase_ : List[Any] = NezhaModel(A__ )
model.to(A__ )
model.eval()
lowercase_ : Tuple = model(
A__ , attention_mask=A__ , token_type_ids=A__ , encoder_hidden_states=A__ , encoder_attention_mask=A__ , )
lowercase_ : Optional[Any] = model(
A__ , attention_mask=A__ , token_type_ids=A__ , encoder_hidden_states=A__ , )
lowercase_ : Dict = model(A__ , attention_mask=A__ , token_type_ids=A__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowerCamelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) -> int:
lowercase_ : Any = NezhaForMaskedLM(config=A__ )
model.to(A__ )
model.eval()
lowercase_ : int = model(A__ , attention_mask=A__ , token_type_ids=A__ , labels=A__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) -> Dict:
lowercase_ : Optional[Any] = NezhaForNextSentencePrediction(config=A__ )
model.to(A__ )
model.eval()
lowercase_ : int = model(
A__ , attention_mask=A__ , token_type_ids=A__ , labels=A__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def lowerCamelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) -> Dict:
lowercase_ : str = NezhaForPreTraining(config=A__ )
model.to(A__ )
model.eval()
lowercase_ : Union[str, Any] = model(
A__ , attention_mask=A__ , token_type_ids=A__ , labels=A__ , next_sentence_label=A__ , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def lowerCamelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) -> Dict:
lowercase_ : str = NezhaForQuestionAnswering(config=A__ )
model.to(A__ )
model.eval()
lowercase_ : str = model(
A__ , attention_mask=A__ , token_type_ids=A__ , start_positions=A__ , end_positions=A__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCamelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) -> Union[str, Any]:
lowercase_ : Optional[int] = self.num_labels
lowercase_ : Optional[int] = NezhaForSequenceClassification(A__ )
model.to(A__ )
model.eval()
lowercase_ : List[Any] = model(A__ , attention_mask=A__ , token_type_ids=A__ , labels=A__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) -> Optional[Any]:
lowercase_ : Union[str, Any] = self.num_labels
lowercase_ : List[Any] = NezhaForTokenClassification(config=A__ )
model.to(A__ )
model.eval()
lowercase_ : List[str] = model(A__ , attention_mask=A__ , token_type_ids=A__ , labels=A__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCamelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) -> List[Any]:
lowercase_ : int = self.num_choices
lowercase_ : Optional[int] = NezhaForMultipleChoice(config=A__ )
model.to(A__ )
model.eval()
lowercase_ : Union[str, Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase_ : int = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase_ : Tuple = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase_ : int = model(
A__ , attention_mask=A__ , token_type_ids=A__ , labels=A__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCamelCase__ ( self ) -> int:
lowercase_ : Any = self.prepare_config_and_inputs()
(
lowercase_
) : List[str] = config_and_inputs
lowercase_ : Optional[Any] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class __magic_name__ ( UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = (
(
NezhaModel,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE_ : List[Any] = (
{
'''feature-extraction''': NezhaModel,
'''fill-mask''': NezhaForMaskedLM,
'''question-answering''': NezhaForQuestionAnswering,
'''text-classification''': NezhaForSequenceClassification,
'''token-classification''': NezhaForTokenClassification,
'''zero-shot''': NezhaForSequenceClassification,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE_ : List[Any] = True
def lowerCamelCase__ ( self , _lowercase , _lowercase , _lowercase=False ) -> int:
lowercase_ : Tuple = super()._prepare_for_class(A__ , A__ , return_labels=A__ )
if return_labels:
if model_class in get_values(A__ ):
lowercase_ : List[Any] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=A__ )
lowercase_ : Tuple = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=A__ )
return inputs_dict
def lowerCamelCase__ ( self ) -> Union[str, Any]:
lowercase_ : Optional[int] = NezhaModelTester(self )
lowercase_ : List[Any] = ConfigTester(self , config_class=A__ , hidden_size=37 )
def lowerCamelCase__ ( self ) -> Tuple:
self.config_tester.run_common_tests()
def lowerCamelCase__ ( self ) -> Dict:
lowercase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A__ )
def lowerCamelCase__ ( self ) -> List[Any]:
lowercase_ : Dict = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*A__ )
def lowerCamelCase__ ( self ) -> List[Any]:
# This regression test was failing with PyTorch < 1.3
(
lowercase_
) : Tuple = self.model_tester.prepare_config_and_inputs_for_decoder()
lowercase_ : Optional[Any] = None
self.model_tester.create_and_check_model_as_decoder(
A__ , A__ , A__ , A__ , A__ , A__ , A__ , A__ , A__ , )
def lowerCamelCase__ ( self ) -> List[Any]:
lowercase_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*A__ )
def lowerCamelCase__ ( self ) -> Optional[Any]:
lowercase_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*A__ )
def lowerCamelCase__ ( self ) -> Dict:
lowercase_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_next_sequence_prediction(*A__ )
def lowerCamelCase__ ( self ) -> List[str]:
lowercase_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*A__ )
def lowerCamelCase__ ( self ) -> Union[str, Any]:
lowercase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*A__ )
def lowerCamelCase__ ( self ) -> Optional[int]:
lowercase_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*A__ )
def lowerCamelCase__ ( self ) -> Optional[Any]:
lowercase_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*A__ )
@slow
def lowerCamelCase__ ( self ) -> Tuple:
for model_name in NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ : Union[str, Any] = NezhaModel.from_pretrained(A__ )
self.assertIsNotNone(A__ )
@slow
@require_torch_gpu
def lowerCamelCase__ ( self ) -> int:
lowercase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# NezhaForMultipleChoice behaves incorrectly in JIT environments.
if model_class == NezhaForMultipleChoice:
return
lowercase_ : Optional[Any] = True
lowercase_ : Dict = model_class(config=A__ )
lowercase_ : Optional[Any] = self._prepare_for_class(A__ , A__ )
lowercase_ : List[Any] = torch.jit.trace(
A__ , (inputs_dict['input_ids'].to('cpu' ), inputs_dict['attention_mask'].to('cpu' )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(A__ , os.path.join(A__ , 'bert.pt' ) )
lowercase_ : Optional[Any] = torch.jit.load(os.path.join(A__ , 'bert.pt' ) , map_location=A__ )
loaded(inputs_dict['input_ids'].to(A__ ) , inputs_dict['attention_mask'].to(A__ ) )
@require_torch
class __magic_name__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowerCamelCase__ ( self ) -> Any:
lowercase_ : Optional[int] = NezhaModel.from_pretrained('sijunhe/nezha-cn-base' )
lowercase_ : List[str] = torch.tensor([[0, 1, 2, 3, 4, 5]] )
lowercase_ : Tuple = torch.tensor([[0, 1, 1, 1, 1, 1]] )
with torch.no_grad():
lowercase_ : Any = model(A__ , attention_mask=A__ )[0]
lowercase_ : str = torch.Size((1, 6, 768) )
self.assertEqual(output.shape , A__ )
lowercase_ : Optional[Any] = torch.tensor([[[0.06_85, 0.24_41, 0.11_02], [0.06_00, 0.19_06, 0.13_49], [0.02_21, 0.08_19, 0.05_86]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , A__ , atol=1E-4 ) )
@slow
def lowerCamelCase__ ( self ) -> str:
lowercase_ : List[Any] = NezhaForMaskedLM.from_pretrained('sijunhe/nezha-cn-base' )
lowercase_ : Union[str, Any] = torch.tensor([[0, 1, 2, 3, 4, 5]] )
lowercase_ : Tuple = torch.tensor([[1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
lowercase_ : List[Any] = model(A__ , attention_mask=A__ )[0]
lowercase_ : Union[str, Any] = torch.Size((1, 6, 2_1128) )
self.assertEqual(output.shape , A__ )
lowercase_ : Optional[int] = torch.tensor(
[[-2.79_39, -1.79_02, -2.21_89], [-2.85_85, -1.89_08, -2.37_23], [-2.64_99, -1.77_50, -2.25_58]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , A__ , atol=1E-4 ) )
| 719 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A: int = logging.get_logger(__name__)
A: int = {
"bigcode/gpt_bigcode-santacoder": "https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json",
}
class __magic_name__ ( UpperCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = 'gpt_bigcode'
SCREAMING_SNAKE_CASE_ : int = ['past_key_values']
SCREAMING_SNAKE_CASE_ : Any = {
'hidden_size': 'n_embd',
'max_position_embeddings': 'n_positions',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self , _lowercase=5_0257 , _lowercase=1024 , _lowercase=768 , _lowercase=12 , _lowercase=12 , _lowercase=None , _lowercase="gelu_pytorch_tanh" , _lowercase=0.1 , _lowercase=0.1 , _lowercase=0.1 , _lowercase=1E-5 , _lowercase=0.02 , _lowercase=True , _lowercase=True , _lowercase=5_0256 , _lowercase=5_0256 , _lowercase=True , _lowercase=True , _lowercase=True , **_lowercase , ) -> Any:
lowercase_ : Tuple = vocab_size
lowercase_ : str = n_positions
lowercase_ : List[str] = n_embd
lowercase_ : str = n_layer
lowercase_ : Optional[Any] = n_head
lowercase_ : Optional[int] = n_inner
lowercase_ : Union[str, Any] = activation_function
lowercase_ : Dict = resid_pdrop
lowercase_ : str = embd_pdrop
lowercase_ : Optional[Any] = attn_pdrop
lowercase_ : List[Any] = layer_norm_epsilon
lowercase_ : Optional[int] = initializer_range
lowercase_ : List[Any] = scale_attn_weights
lowercase_ : Any = use_cache
lowercase_ : List[str] = attention_softmax_in_fpaa
lowercase_ : Any = scale_attention_softmax_in_fpaa
lowercase_ : Optional[Any] = multi_query
lowercase_ : Optional[Any] = bos_token_id
lowercase_ : Optional[Any] = eos_token_id
super().__init__(bos_token_id=_lowercase , eos_token_id=_lowercase , **_lowercase )
| 7 | 0 |
'''simple docstring'''
from argparse import ArgumentParser
from .env import EnvironmentCommand
def _UpperCAmelCase ( ) -> int:
"""simple docstring"""
lowercase_ : Optional[Any] = ArgumentParser('Diffusers CLI tool' , usage='diffusers-cli <command> [<args>]' )
lowercase_ : Union[str, Any] = parser.add_subparsers(help='diffusers-cli command helpers' )
# Register commands
EnvironmentCommand.register_subcommand(lowerCAmelCase__ )
# Let's go
lowercase_ : Optional[Any] = parser.parse_args()
if not hasattr(lowerCAmelCase__ , 'func' ):
parser.print_help()
exit(1 )
# Run
lowercase_ : Any = args.func(lowerCAmelCase__ )
service.run()
if __name__ == "__main__":
main()
| 720 |
'''simple docstring'''
import unittest
from diffusers.pipelines.pipeline_utils import is_safetensors_compatible
class __magic_name__ ( unittest.TestCase ):
"""simple docstring"""
def lowerCamelCase__ ( self ) -> Optional[Any]:
lowercase_ : Tuple = [
'safety_checker/pytorch_model.bin',
'safety_checker/model.safetensors',
'vae/diffusion_pytorch_model.bin',
'vae/diffusion_pytorch_model.safetensors',
'text_encoder/pytorch_model.bin',
'text_encoder/model.safetensors',
'unet/diffusion_pytorch_model.bin',
'unet/diffusion_pytorch_model.safetensors',
]
self.assertTrue(is_safetensors_compatible(_lowercase ) )
def lowerCamelCase__ ( self ) -> Dict:
lowercase_ : Tuple = [
'unet/diffusion_pytorch_model.bin',
'unet/diffusion_pytorch_model.safetensors',
]
self.assertTrue(is_safetensors_compatible(_lowercase ) )
def lowerCamelCase__ ( self ) -> str:
lowercase_ : int = [
'safety_checker/pytorch_model.bin',
'safety_checker/model.safetensors',
'vae/diffusion_pytorch_model.bin',
'vae/diffusion_pytorch_model.safetensors',
'text_encoder/pytorch_model.bin',
'text_encoder/model.safetensors',
'unet/diffusion_pytorch_model.bin',
# Removed: 'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(_lowercase ) )
def lowerCamelCase__ ( self ) -> Any:
lowercase_ : Dict = [
'text_encoder/pytorch_model.bin',
'text_encoder/model.safetensors',
]
self.assertTrue(is_safetensors_compatible(_lowercase ) )
def lowerCamelCase__ ( self ) -> Optional[Any]:
lowercase_ : List[Any] = [
'safety_checker/pytorch_model.bin',
'safety_checker/model.safetensors',
'vae/diffusion_pytorch_model.bin',
'vae/diffusion_pytorch_model.safetensors',
'text_encoder/pytorch_model.bin',
# Removed: 'text_encoder/model.safetensors',
'unet/diffusion_pytorch_model.bin',
'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(_lowercase ) )
def lowerCamelCase__ ( self ) -> Optional[int]:
lowercase_ : str = [
'safety_checker/pytorch_model.fp16.bin',
'safety_checker/model.fp16.safetensors',
'vae/diffusion_pytorch_model.fp16.bin',
'vae/diffusion_pytorch_model.fp16.safetensors',
'text_encoder/pytorch_model.fp16.bin',
'text_encoder/model.fp16.safetensors',
'unet/diffusion_pytorch_model.fp16.bin',
'unet/diffusion_pytorch_model.fp16.safetensors',
]
lowercase_ : Union[str, Any] = 'fp16'
self.assertTrue(is_safetensors_compatible(_lowercase , variant=_lowercase ) )
def lowerCamelCase__ ( self ) -> int:
lowercase_ : Optional[int] = [
'unet/diffusion_pytorch_model.fp16.bin',
'unet/diffusion_pytorch_model.fp16.safetensors',
]
lowercase_ : Dict = 'fp16'
self.assertTrue(is_safetensors_compatible(_lowercase , variant=_lowercase ) )
def lowerCamelCase__ ( self ) -> int:
# pass variant but use the non-variant filenames
lowercase_ : Optional[int] = [
'unet/diffusion_pytorch_model.bin',
'unet/diffusion_pytorch_model.safetensors',
]
lowercase_ : Dict = 'fp16'
self.assertTrue(is_safetensors_compatible(_lowercase , variant=_lowercase ) )
def lowerCamelCase__ ( self ) -> Union[str, Any]:
lowercase_ : int = [
'safety_checker/pytorch_model.fp16.bin',
'safety_checker/model.fp16.safetensors',
'vae/diffusion_pytorch_model.fp16.bin',
'vae/diffusion_pytorch_model.fp16.safetensors',
'text_encoder/pytorch_model.fp16.bin',
'text_encoder/model.fp16.safetensors',
'unet/diffusion_pytorch_model.fp16.bin',
# Removed: 'unet/diffusion_pytorch_model.fp16.safetensors',
]
lowercase_ : Dict = 'fp16'
self.assertFalse(is_safetensors_compatible(_lowercase , variant=_lowercase ) )
def lowerCamelCase__ ( self ) -> int:
lowercase_ : str = [
'text_encoder/pytorch_model.fp16.bin',
'text_encoder/model.fp16.safetensors',
]
lowercase_ : str = 'fp16'
self.assertTrue(is_safetensors_compatible(_lowercase , variant=_lowercase ) )
def lowerCamelCase__ ( self ) -> List[str]:
# pass variant but use the non-variant filenames
lowercase_ : List[Any] = [
'text_encoder/pytorch_model.bin',
'text_encoder/model.safetensors',
]
lowercase_ : Dict = 'fp16'
self.assertTrue(is_safetensors_compatible(_lowercase , variant=_lowercase ) )
def lowerCamelCase__ ( self ) -> List[str]:
lowercase_ : Union[str, Any] = [
'safety_checker/pytorch_model.fp16.bin',
'safety_checker/model.fp16.safetensors',
'vae/diffusion_pytorch_model.fp16.bin',
'vae/diffusion_pytorch_model.fp16.safetensors',
'text_encoder/pytorch_model.fp16.bin',
# 'text_encoder/model.fp16.safetensors',
'unet/diffusion_pytorch_model.fp16.bin',
'unet/diffusion_pytorch_model.fp16.safetensors',
]
lowercase_ : Dict = 'fp16'
self.assertFalse(is_safetensors_compatible(_lowercase , variant=_lowercase ) )
| 7 | 0 |
'''simple docstring'''
import itertools
import json
import os
import unittest
from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast
from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __magic_name__ ( UpperCAmelCase_, unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = LongformerTokenizer
SCREAMING_SNAKE_CASE_ : Dict = True
SCREAMING_SNAKE_CASE_ : Optional[int] = LongformerTokenizerFast
SCREAMING_SNAKE_CASE_ : Optional[int] = True
def lowerCamelCase__ ( self ) -> Tuple:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowercase_ : List[str] = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
]
lowercase_ : str = dict(zip(UpperCamelCase__ , range(len(UpperCamelCase__ ) ) ) )
lowercase_ : Optional[int] = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
lowercase_ : Optional[int] = {'unk_token': '<unk>'}
lowercase_ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
lowercase_ : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(UpperCamelCase__ ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(UpperCamelCase__ ) )
def lowerCamelCase__ ( self , **_lowercase ) -> Optional[int]:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def lowerCamelCase__ ( self , **_lowercase ) -> List[str]:
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def lowerCamelCase__ ( self , _lowercase ) -> int:
lowercase_ : List[str] = 'lower newer'
lowercase_ : str = 'lower newer'
return input_text, output_text
def lowerCamelCase__ ( self ) -> str:
lowercase_ : Union[str, Any] = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
lowercase_ : List[Any] = 'lower newer'
lowercase_ : List[Any] = ['l', 'o', 'w', 'er', '\u0120', 'n', 'e', 'w', 'er']
lowercase_ : Any = tokenizer.tokenize(UpperCamelCase__ ) # , add_prefix_space=True)
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
lowercase_ : Optional[Any] = tokens + [tokenizer.unk_token]
lowercase_ : Any = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase__ ) , UpperCamelCase__ )
def lowerCamelCase__ ( self ) -> Dict:
lowercase_ : Optional[int] = self.get_tokenizer()
self.assertListEqual(tokenizer.encode('Hello world!' , add_special_tokens=UpperCamelCase__ ) , [0, 3_1414, 232, 328, 2] )
self.assertListEqual(
tokenizer.encode('Hello world! cécé herlolip 418' , add_special_tokens=UpperCamelCase__ ) , [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2] , )
@slow
def lowerCamelCase__ ( self ) -> Dict:
lowercase_ : Dict = self.tokenizer_class.from_pretrained('allenai/longformer-base-4096' )
lowercase_ : List[str] = tokenizer.encode('sequence builders' , add_special_tokens=UpperCamelCase__ )
lowercase_ : Tuple = tokenizer.encode('multi-sequence build' , add_special_tokens=UpperCamelCase__ )
lowercase_ : List[Any] = tokenizer.encode(
'sequence builders' , add_special_tokens=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ )
lowercase_ : List[str] = tokenizer.encode(
'sequence builders' , 'multi-sequence build' , add_special_tokens=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ )
lowercase_ : Tuple = tokenizer.build_inputs_with_special_tokens(UpperCamelCase__ )
lowercase_ : Optional[int] = tokenizer.build_inputs_with_special_tokens(UpperCamelCase__ , UpperCamelCase__ )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def lowerCamelCase__ ( self ) -> Any:
lowercase_ : Union[str, Any] = self.get_tokenizer()
lowercase_ : Tuple = 'Encode this sequence.'
lowercase_ : Tuple = tokenizer.byte_encoder[' '.encode('utf-8' )[0]]
# Testing encoder arguments
lowercase_ : Union[str, Any] = tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ )
lowercase_ : Union[str, Any] = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(UpperCamelCase__ , UpperCamelCase__ )
lowercase_ : List[str] = tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ )
lowercase_ : List[str] = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
tokenizer.add_special_tokens({'bos_token': '<s>'} )
lowercase_ : str = tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
lowercase_ : Union[str, Any] = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(UpperCamelCase__ , UpperCamelCase__ )
# Testing spaces after special tokens
lowercase_ : str = '<mask>'
tokenizer.add_special_tokens(
{'mask_token': AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ )} ) # mask token has a left space
lowercase_ : Optional[int] = tokenizer.convert_tokens_to_ids(UpperCamelCase__ )
lowercase_ : List[Any] = 'Encode <mask> sequence'
lowercase_ : Union[str, Any] = 'Encode <mask>sequence'
lowercase_ : Union[str, Any] = tokenizer.encode(UpperCamelCase__ )
lowercase_ : List[str] = encoded.index(UpperCamelCase__ )
lowercase_ : Optional[int] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
lowercase_ : List[Any] = tokenizer.encode(UpperCamelCase__ )
lowercase_ : Dict = encoded.index(UpperCamelCase__ )
lowercase_ : Optional[Any] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(UpperCamelCase__ , UpperCamelCase__ )
def lowerCamelCase__ ( self ) -> List[str]:
pass
def lowerCamelCase__ ( self ) -> Dict:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
lowercase_ : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(UpperCamelCase__ , **UpperCamelCase__ )
lowercase_ : str = self.tokenizer_class.from_pretrained(UpperCamelCase__ , **UpperCamelCase__ )
lowercase_ : str = 'A, <mask> AllenNLP sentence.'
lowercase_ : Optional[int] = tokenizer_r.encode_plus(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , return_token_type_ids=UpperCamelCase__ )
lowercase_ : Any = tokenizer_p.encode_plus(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , return_token_type_ids=UpperCamelCase__ )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['token_type_ids'] ) , sum(tokens_p['token_type_ids'] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['attention_mask'] ) / len(tokens_r['attention_mask'] ) , sum(tokens_p['attention_mask'] ) / len(tokens_p['attention_mask'] ) , )
lowercase_ : Optional[Any] = tokenizer_r.convert_ids_to_tokens(tokens_r['input_ids'] )
lowercase_ : List[str] = tokenizer_p.convert_ids_to_tokens(tokens_p['input_ids'] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['input_ids'] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r['input_ids'] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(
UpperCamelCase__ , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
self.assertSequenceEqual(
UpperCamelCase__ , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
def lowerCamelCase__ ( self ) -> Optional[int]:
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
lowercase_ : Optional[int] = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ , trim_offsets=UpperCamelCase__ )
lowercase_ : Dict = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
lowercase_ : Union[str, Any] = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state['add_prefix_space'] , UpperCamelCase__ )
self.assertEqual(post_processor_state['add_prefix_space'] , UpperCamelCase__ )
self.assertEqual(post_processor_state['trim_offsets'] , UpperCamelCase__ )
def lowerCamelCase__ ( self ) -> List[str]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
lowercase_ : List[Any] = 'hello' # `hello` is a token in the vocabulary of `pretrained_name`
lowercase_ : Dict = f"{text_of_1_token} {text_of_1_token}"
lowercase_ : List[Any] = self.rust_tokenizer_class.from_pretrained(
UpperCamelCase__ , use_fast=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ , trim_offsets=UpperCamelCase__ )
lowercase_ : List[str] = tokenizer_r(UpperCamelCase__ , return_offsets_mapping=UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(UpperCamelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(UpperCamelCase__ ) + 1, len(UpperCamelCase__ ) + 1 + len(UpperCamelCase__ )) , )
lowercase_ : Dict = self.rust_tokenizer_class.from_pretrained(
UpperCamelCase__ , use_fast=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ , trim_offsets=UpperCamelCase__ )
lowercase_ : Any = tokenizer_r(UpperCamelCase__ , return_offsets_mapping=UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(UpperCamelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(UpperCamelCase__ ) + 1, len(UpperCamelCase__ ) + 1 + len(UpperCamelCase__ )) , )
lowercase_ : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(
UpperCamelCase__ , use_fast=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ , trim_offsets=UpperCamelCase__ )
lowercase_ : int = tokenizer_r(UpperCamelCase__ , return_offsets_mapping=UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(UpperCamelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(UpperCamelCase__ ), len(UpperCamelCase__ ) + 1 + len(UpperCamelCase__ )) , )
lowercase_ : List[Any] = self.rust_tokenizer_class.from_pretrained(
UpperCamelCase__ , use_fast=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ , trim_offsets=UpperCamelCase__ )
lowercase_ : Optional[Any] = tokenizer_r(UpperCamelCase__ , return_offsets_mapping=UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(UpperCamelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(UpperCamelCase__ ), len(UpperCamelCase__ ) + 1 + len(UpperCamelCase__ )) , )
lowercase_ : Any = f" {text}"
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
lowercase_ : int = self.rust_tokenizer_class.from_pretrained(
UpperCamelCase__ , use_fast=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ , trim_offsets=UpperCamelCase__ )
lowercase_ : List[Any] = tokenizer_r(UpperCamelCase__ , return_offsets_mapping=UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(UpperCamelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(UpperCamelCase__ ) + 1, 1 + len(UpperCamelCase__ ) + 1 + len(UpperCamelCase__ )) , )
lowercase_ : List[str] = self.rust_tokenizer_class.from_pretrained(
UpperCamelCase__ , use_fast=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ , trim_offsets=UpperCamelCase__ )
lowercase_ : Optional[int] = tokenizer_r(UpperCamelCase__ , return_offsets_mapping=UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(UpperCamelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(UpperCamelCase__ ), 1 + len(UpperCamelCase__ ) + 1 + len(UpperCamelCase__ )) , )
lowercase_ : Tuple = self.rust_tokenizer_class.from_pretrained(
UpperCamelCase__ , use_fast=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ , trim_offsets=UpperCamelCase__ )
lowercase_ : Optional[Any] = tokenizer_r(UpperCamelCase__ , return_offsets_mapping=UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(UpperCamelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(UpperCamelCase__ ), 1 + len(UpperCamelCase__ ) + 1 + len(UpperCamelCase__ )) , )
| 721 |
'''simple docstring'''
import argparse
A: List[Any] = "docs/source/_static/js/custom.js"
def _UpperCAmelCase ( a : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
with open(a , encoding='utf-8' , newline='\n' ) as f:
lowercase_ : List[Any] = f.readlines()
lowercase_ : Dict = 0
# First let's put the right version
while not lines[index].startswith('const stableVersion =' ):
index += 1
lowercase_ : Dict = f"const stableVersion = \"v{version}\"\n"
# Then update the dictionary
while not lines[index].startswith('const versionMapping = {' ):
index += 1
# We go until the end
while not lines[index].startswith('}' ):
index += 1
# We add the new version at the end
lines[index - 1] += f" \"v{version}\": \"v{version}\",\n"
with open(a , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.writelines(a )
if __name__ == "__main__":
A: str = argparse.ArgumentParser()
parser.add_argument("--version", help="Release version.")
A: List[str] = parser.parse_args()
update_custom_js(args.version)
| 7 | 0 |
'''simple docstring'''
def _UpperCAmelCase ( ) -> Tuple:
"""simple docstring"""
lowercase_ : Optional[int] = [3_1, 2_8, 3_1, 3_0, 3_1, 3_0, 3_1, 3_1, 3_0, 3_1, 3_0, 3_1]
lowercase_ : int = 6
lowercase_ : Tuple = 1
lowercase_ : Tuple = 1_9_0_1
lowercase_ : str = 0
while year < 2_0_0_1:
day += 7
if (year % 4 == 0 and year % 1_0_0 != 0) or (year % 4_0_0 == 0):
if day > days_per_month[month - 1] and month != 2:
month += 1
lowercase_ : Tuple = day - days_per_month[month - 2]
elif day > 2_9 and month == 2:
month += 1
lowercase_ : Tuple = day - 2_9
else:
if day > days_per_month[month - 1]:
month += 1
lowercase_ : Any = day - days_per_month[month - 2]
if month > 1_2:
year += 1
lowercase_ : Any = 1
if year < 2_0_0_1 and day == 1:
sundays += 1
return sundays
if __name__ == "__main__":
print(solution())
| 700 |
'''simple docstring'''
def _UpperCAmelCase ( a : list[list[float]] ) -> list[list[float]]:
"""simple docstring"""
lowercase_ : list[list[float]] = []
for data in source_data:
for i, el in enumerate(a ):
if len(a ) < i + 1:
data_lists.append([] )
data_lists[i].append(float(a ) )
return data_lists
def _UpperCAmelCase ( a : list[list[float]] , a : list[int] ) -> list[list[float]]:
"""simple docstring"""
lowercase_ : list[list[float]] = []
for dlist, weight in zip(a , a ):
lowercase_ : Tuple = min(a )
lowercase_ : Any = max(a )
lowercase_ : list[float] = []
# for weight 0 score is 1 - actual score
if weight == 0:
for item in dlist:
try:
score.append(1 - ((item - mind) / (maxd - mind)) )
except ZeroDivisionError:
score.append(1 )
elif weight == 1:
for item in dlist:
try:
score.append((item - mind) / (maxd - mind) )
except ZeroDivisionError:
score.append(0 )
# weight not 0 or 1
else:
lowercase_ : str = f"Invalid weight of {weight:f} provided"
raise ValueError(a )
score_lists.append(a )
return score_lists
def _UpperCAmelCase ( a : list[list[float]] ) -> list[float]:
"""simple docstring"""
lowercase_ : list[float] = [0 for i in range(len(score_lists[0] ) )]
for slist in score_lists:
for j, ele in enumerate(a ):
lowercase_ : List[Any] = final_scores[j] + ele
return final_scores
def _UpperCAmelCase ( a : list[list[float]] , a : list[int] ) -> list[list[float]]:
"""simple docstring"""
lowercase_ : int = get_data(a )
lowercase_ : Optional[int] = calculate_each_score(a , a )
lowercase_ : Dict = generate_final_scores(a )
# append scores to source data
for i, ele in enumerate(a ):
source_data[i].append(a )
return source_data
| 7 | 0 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
A: str = logging.get_logger(__name__)
def _UpperCAmelCase ( a : str ) -> YolosConfig:
"""simple docstring"""
lowercase_ : str = YolosConfig()
# size of the architecture
if "yolos_ti" in yolos_name:
lowercase_ : str = 1_9_2
lowercase_ : Optional[Any] = 7_6_8
lowercase_ : Optional[Any] = 1_2
lowercase_ : Any = 3
lowercase_ : int = [8_0_0, 1_3_3_3]
lowercase_ : Tuple = False
elif yolos_name == "yolos_s_dWr":
lowercase_ : Dict = 3_3_0
lowercase_ : List[Any] = 1_4
lowercase_ : int = 6
lowercase_ : str = 1_3_2_0
elif "yolos_s" in yolos_name:
lowercase_ : Union[str, Any] = 3_8_4
lowercase_ : str = 1_5_3_6
lowercase_ : List[str] = 1_2
lowercase_ : Optional[Any] = 6
elif "yolos_b" in yolos_name:
lowercase_ : Union[str, Any] = [8_0_0, 1_3_4_4]
lowercase_ : List[Any] = 9_1
lowercase_ : Dict = "huggingface/label-files"
lowercase_ : str = "coco-detection-id2label.json"
lowercase_ : Any = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , repo_type='dataset' ) , 'r' ) )
lowercase_ : int = {int(SCREAMING_SNAKE_CASE_ ): v for k, v in idalabel.items()}
lowercase_ : Optional[Any] = idalabel
lowercase_ : Dict = {v: k for k, v in idalabel.items()}
return config
def _UpperCAmelCase ( a : dict , a : YolosConfig , a : bool = False ) -> Optional[Any]:
"""simple docstring"""
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowercase_ : Any = state_dict.pop(f"blocks.{i}.attn.qkv.weight" )
lowercase_ : Union[str, Any] = state_dict.pop(f"blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
lowercase_ : Any = in_proj_weight[: config.hidden_size, :]
lowercase_ : Union[str, Any] = in_proj_bias[: config.hidden_size]
lowercase_ : Union[str, Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowercase_ : Any = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowercase_ : int = in_proj_weight[-config.hidden_size :, :]
lowercase_ : List[Any] = in_proj_bias[-config.hidden_size :]
def _UpperCAmelCase ( a : str ) -> str:
"""simple docstring"""
if "backbone" in name:
lowercase_ : Optional[int] = name.replace('backbone' , 'vit' )
if "cls_token" in name:
lowercase_ : str = name.replace('cls_token' , 'embeddings.cls_token' )
if "det_token" in name:
lowercase_ : str = name.replace('det_token' , 'embeddings.detection_tokens' )
if "mid_pos_embed" in name:
lowercase_ : List[str] = name.replace('mid_pos_embed' , 'encoder.mid_position_embeddings' )
if "pos_embed" in name:
lowercase_ : str = name.replace('pos_embed' , 'embeddings.position_embeddings' )
if "patch_embed.proj" in name:
lowercase_ : Optional[Any] = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "blocks" in name:
lowercase_ : Dict = name.replace('blocks' , 'encoder.layer' )
if "attn.proj" in name:
lowercase_ : Tuple = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name:
lowercase_ : str = name.replace('attn' , 'attention.self' )
if "norm1" in name:
lowercase_ : Any = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
lowercase_ : Optional[int] = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
lowercase_ : str = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
lowercase_ : Dict = name.replace('mlp.fc2' , 'output.dense' )
if "class_embed" in name:
lowercase_ : int = name.replace('class_embed' , 'class_labels_classifier' )
if "bbox_embed" in name:
lowercase_ : Any = name.replace('bbox_embed' , 'bbox_predictor' )
if "vit.norm" in name:
lowercase_ : Any = name.replace('vit.norm' , 'vit.layernorm' )
return name
def _UpperCAmelCase ( a : dict , a : YolosForObjectDetection ) -> dict:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
lowercase_ : List[Any] = orig_state_dict.pop(SCREAMING_SNAKE_CASE_ )
if "qkv" in key:
lowercase_ : str = key.split('.' )
lowercase_ : str = int(key_split[2] )
lowercase_ : Dict = model.vit.encoder.layer[layer_num].attention.attention.all_head_size
if "weight" in key:
lowercase_ : Dict = val[:dim, :]
lowercase_ : Optional[int] = val[
dim : dim * 2, :
]
lowercase_ : List[Any] = val[-dim:, :]
else:
lowercase_ : str = val[:dim]
lowercase_ : Optional[Any] = val[dim : dim * 2]
lowercase_ : int = val[-dim:]
else:
lowercase_ : Dict = val
return orig_state_dict
def _UpperCAmelCase ( ) -> torch.Tensor:
"""simple docstring"""
lowercase_ : Union[str, Any] = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowercase_ : List[Any] = Image.open(requests.get(SCREAMING_SNAKE_CASE_ , stream=SCREAMING_SNAKE_CASE_ ).raw )
return im
@torch.no_grad()
def _UpperCAmelCase ( a : str , a : str , a : str , a : bool = False ) -> List[Any]:
"""simple docstring"""
lowercase_ : str = get_yolos_config(SCREAMING_SNAKE_CASE_ )
# load original state_dict
lowercase_ : Union[str, Any] = torch.load(SCREAMING_SNAKE_CASE_ , map_location='cpu' )["model"]
# load 🤗 model
lowercase_ : List[Any] = YolosForObjectDetection(SCREAMING_SNAKE_CASE_ )
model.eval()
lowercase_ : int = convert_state_dict(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
model.load_state_dict(SCREAMING_SNAKE_CASE_ )
# Check outputs on an image, prepared by YolosImageProcessor
lowercase_ : int = 8_0_0 if yolos_name != "yolos_ti" else 5_1_2
lowercase_ : Dict = YolosImageProcessor(format='coco_detection' , size=SCREAMING_SNAKE_CASE_ )
lowercase_ : Union[str, Any] = image_processor(images=prepare_img() , return_tensors='pt' )
lowercase_ : Union[str, Any] = model(**SCREAMING_SNAKE_CASE_ )
lowercase_ : List[str] = outputs.logits, outputs.pred_boxes
lowercase_ : Optional[int] = None, None
if yolos_name == "yolos_ti":
lowercase_ : Union[str, Any] = torch.tensor(
[[-39.50_22, -11.98_20, -17.68_88], [-29.95_74, -9.97_69, -17.76_91], [-42.32_81, -20.72_00, -30.62_94]] )
lowercase_ : Optional[Any] = torch.tensor(
[[0.40_21, 0.08_36, 0.79_79], [0.01_84, 0.26_09, 0.03_64], [0.17_81, 0.20_04, 0.20_95]] )
elif yolos_name == "yolos_s_200_pre":
lowercase_ : List[Any] = torch.tensor(
[[-24.02_48, -10.30_24, -14.82_90], [-42.03_92, -16.82_00, -27.43_34], [-27.27_43, -11.81_54, -18.71_48]] )
lowercase_ : str = torch.tensor(
[[0.25_59, 0.54_55, 0.47_06], [0.29_89, 0.72_79, 0.18_75], [0.77_32, 0.40_17, 0.44_62]] )
elif yolos_name == "yolos_s_300_pre":
lowercase_ : List[Any] = torch.tensor(
[[-36.22_20, -14.43_85, -23.54_57], [-35.69_70, -14.75_83, -21.39_35], [-31.59_39, -13.60_42, -16.80_49]] )
lowercase_ : Optional[Any] = torch.tensor(
[[0.76_14, 0.23_16, 0.47_28], [0.71_68, 0.44_95, 0.38_55], [0.49_96, 0.14_66, 0.99_96]] )
elif yolos_name == "yolos_s_dWr":
lowercase_ : List[Any] = torch.tensor(
[[-42.86_68, -24.10_49, -41.16_90], [-34.74_56, -14.12_74, -24.91_94], [-33.78_98, -12.19_46, -25.64_95]] )
lowercase_ : Tuple = torch.tensor(
[[0.55_87, 0.27_73, 0.06_05], [0.50_04, 0.30_14, 0.99_94], [0.49_99, 0.15_48, 0.99_94]] )
elif yolos_name == "yolos_base":
lowercase_ : List[str] = torch.tensor(
[[-40.60_64, -24.30_84, -32.64_47], [-55.19_90, -30.77_19, -35.58_77], [-51.43_11, -33.35_07, -35.64_62]] )
lowercase_ : List[Any] = torch.tensor(
[[0.55_55, 0.27_94, 0.06_55], [0.90_49, 0.26_64, 0.18_94], [0.91_83, 0.19_84, 0.16_35]] )
else:
raise ValueError(f"Unknown yolos_name: {yolos_name}" )
assert torch.allclose(logits[0, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=1e-4 )
assert torch.allclose(pred_boxes[0, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=1e-4 )
Path(SCREAMING_SNAKE_CASE_ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE_ )
print(f"Saving model {yolos_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE_ )
if push_to_hub:
lowercase_ : List[str] = {
"yolos_ti": "yolos-tiny",
"yolos_s_200_pre": "yolos-small",
"yolos_s_300_pre": "yolos-small-300",
"yolos_s_dWr": "yolos-small-dwr",
"yolos_base": "yolos-base",
}
print('Pushing to the hub...' )
lowercase_ : Union[str, Any] = model_mapping[yolos_name]
image_processor.push_to_hub(SCREAMING_SNAKE_CASE_ , organization='hustvl' )
model.push_to_hub(SCREAMING_SNAKE_CASE_ , organization='hustvl' )
if __name__ == "__main__":
A: Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--yolos_name",
default="yolos_s_200_pre",
type=str,
help=(
"Name of the YOLOS model you\'d like to convert. Should be one of \'yolos_ti\', \'yolos_s_200_pre\',"
" \'yolos_s_300_pre\', \'yolos_s_dWr\', \'yolos_base\'."
),
)
parser.add_argument(
"--checkpoint_path", default=None, type=str, help="Path to the original state dict (.pth file)."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
A: Optional[Any] = parser.parse_args()
convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 701 |
'''simple docstring'''
def _UpperCAmelCase ( a : int , a : int ) -> int:
"""simple docstring"""
while second != 0:
lowercase_ : Any = first & second
first ^= second
lowercase_ : List[str] = c << 1
return first
if __name__ == "__main__":
import doctest
doctest.testmod()
A: Union[str, Any] = int(input("Enter the first number: ").strip())
A: Union[str, Any] = int(input("Enter the second number: ").strip())
print(f"""{add(first, second) = }""")
| 7 | 0 |
'''simple docstring'''
import math
def _UpperCAmelCase ( a : Dict , a : Any ) -> Optional[int]:
"""simple docstring"""
if 0 not in (x, y):
# We use the relation x^y = y*log10(x), where 10 is the base.
return y * math.logaa(SCREAMING_SNAKE_CASE_ )
else:
if x == 0: # 0 raised to any number is 0
return 0
elif y == 0:
return 1 # any number raised to 0 is 1
raise AssertionError('This should never happen' )
if __name__ == "__main__": # Main function
# Read two numbers from input and typecast them to int using map function.
# Here x is the base and y is the power.
A: str = "Enter the base and the power separated by a comma: "
A , A: int = map(int, input(prompt).split(","))
A , A: Tuple = map(int, input(prompt).split(","))
# We find the log of each number, using the function res(), which takes two
# arguments.
A: str = res(xa, ya)
A: Optional[Any] = res(xa, ya)
# We check for the largest number
if resa > resa:
print("Largest number is", xa, "^", ya)
elif resa > resa:
print("Largest number is", xa, "^", ya)
else:
print("Both are equal")
| 702 |
'''simple docstring'''
class __magic_name__ :
"""simple docstring"""
def __init__( self , _lowercase ) -> Union[str, Any]:
lowercase_ : Dict = n
lowercase_ : Dict = [None] * self.n
lowercase_ : Tuple = 0 # index of the first element
lowercase_ : List[Any] = 0
lowercase_ : List[Any] = 0
def __len__( self ) -> int:
return self.size
def lowerCamelCase__ ( self ) -> bool:
return self.size == 0
def lowerCamelCase__ ( self ) -> List[Any]:
return False if self.is_empty() else self.array[self.front]
def lowerCamelCase__ ( self , _lowercase ) -> Any:
if self.size >= self.n:
raise Exception('QUEUE IS FULL' )
lowercase_ : Tuple = data
lowercase_ : List[Any] = (self.rear + 1) % self.n
self.size += 1
return self
def lowerCamelCase__ ( self ) -> Any:
if self.size == 0:
raise Exception('UNDERFLOW' )
lowercase_ : Dict = self.array[self.front]
lowercase_ : Tuple = None
lowercase_ : int = (self.front + 1) % self.n
self.size -= 1
return temp
| 7 | 0 |
'''simple docstring'''
from math import sqrt
def _UpperCAmelCase ( a : int = 1_0_0_0_0_0_0 ) -> Dict:
"""simple docstring"""
lowercase_ : int = 0
lowercase_ : int = 0
lowercase_ : int
while num_cuboids <= limit:
max_cuboid_size += 1
for sum_shortest_sides in range(2 , 2 * max_cuboid_size + 1 ):
if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer():
num_cuboids += (
min(__lowercase , sum_shortest_sides // 2 )
- max(1 , sum_shortest_sides - max_cuboid_size )
+ 1
)
return max_cuboid_size
if __name__ == "__main__":
print(f"""{solution() = }""")
| 703 |
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
A: List[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
A: Union[str, Any] = "\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline\n >>> from diffusers.utils import load_image\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... \"kandinsky-community/kandinsky-2-2-prior\", torch_dtype=torch.float16\n ... )\n >>> pipe_prior.to(\"cuda\")\n\n >>> prompt = \"A red cartoon frog, 4k\"\n >>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False)\n\n >>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained(\n ... \"kandinsky-community/kandinsky-2-2-decoder\", torch_dtype=torch.float16\n ... )\n >>> pipe.to(\"cuda\")\n\n >>> init_image = load_image(\n ... \"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main\"\n ... \"/kandinsky/frog.png\"\n ... )\n\n >>> image = pipe(\n ... image=init_image,\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=100,\n ... strength=0.2,\n ... ).images\n\n >>> image[0].save(\"red_frog.png\")\n ```\n"
def _UpperCAmelCase ( a : Tuple , a : Union[str, Any] , a : List[Any]=8 ) -> Dict:
"""simple docstring"""
lowercase_ : List[Any] = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
lowercase_ : List[str] = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
def _UpperCAmelCase ( a : Any , a : Dict=5_1_2 , a : Optional[Any]=5_1_2 ) -> Tuple:
"""simple docstring"""
lowercase_ : int = pil_image.resize((w, h) , resample=Image.BICUBIC , reducing_gap=1 )
lowercase_ : int = np.array(pil_image.convert('RGB' ) )
lowercase_ : Optional[int] = arr.astype(np.floataa ) / 1_27.5 - 1
lowercase_ : Any = np.transpose(a , [2, 0, 1] )
lowercase_ : Any = torch.from_numpy(a ).unsqueeze(0 )
return image
class __magic_name__ ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self , _lowercase , _lowercase , _lowercase , ) -> List[Any]:
super().__init__()
self.register_modules(
unet=_lowercase , scheduler=_lowercase , movq=_lowercase , )
lowercase_ : Dict = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def lowerCamelCase__ ( self , _lowercase , _lowercase , _lowercase ) -> int:
# get the original timestep using init_timestep
lowercase_ : List[Any] = min(int(num_inference_steps * strength ) , _lowercase )
lowercase_ : Tuple = max(num_inference_steps - init_timestep , 0 )
lowercase_ : Optional[Any] = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def lowerCamelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase=None ) -> Any:
if not isinstance(_lowercase , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(_lowercase )}" )
lowercase_ : Dict = image.to(device=_lowercase , dtype=_lowercase )
lowercase_ : Dict = batch_size * num_images_per_prompt
if image.shape[1] == 4:
lowercase_ : str = image
else:
if isinstance(_lowercase , _lowercase ) and len(_lowercase ) != batch_size:
raise ValueError(
f"You have passed a list of generators of length {len(_lowercase )}, but requested an effective batch"
f" size of {batch_size}. Make sure the batch size matches the length of the generators." )
elif isinstance(_lowercase , _lowercase ):
lowercase_ : List[Any] = [
self.movq.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(_lowercase )
]
lowercase_ : Union[str, Any] = torch.cat(_lowercase , dim=0 )
else:
lowercase_ : Union[str, Any] = self.movq.encode(_lowercase ).latent_dist.sample(_lowercase )
lowercase_ : str = self.movq.config.scaling_factor * init_latents
lowercase_ : int = torch.cat([init_latents] , dim=0 )
lowercase_ : Dict = init_latents.shape
lowercase_ : Dict = randn_tensor(_lowercase , generator=_lowercase , device=_lowercase , dtype=_lowercase )
# get latents
lowercase_ : List[str] = self.scheduler.add_noise(_lowercase , _lowercase , _lowercase )
lowercase_ : Optional[Any] = init_latents
return latents
def lowerCamelCase__ ( self , _lowercase=0 ) -> int:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
lowercase_ : List[Any] = torch.device(f"cuda:{gpu_id}" )
lowercase_ : Optional[Any] = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(_lowercase , _lowercase )
def lowerCamelCase__ ( self , _lowercase=0 ) -> int:
if is_accelerate_available() and is_accelerate_version('>=' , '0.17.0.dev0' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.' )
lowercase_ : List[Any] = torch.device(f"cuda:{gpu_id}" )
if self.device.type != "cpu":
self.to('cpu' , silence_dtype_warnings=_lowercase )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
lowercase_ : Tuple = None
for cpu_offloaded_model in [self.unet, self.movq]:
lowercase_ , lowercase_ : Dict = cpu_offload_with_hook(_lowercase , _lowercase , prev_module_hook=_lowercase )
# We'll offload the last model manually.
lowercase_ : List[str] = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def lowerCamelCase__ ( self ) -> List[str]:
if not hasattr(self.unet , '_hf_hook' ):
return self.device
for module in self.unet.modules():
if (
hasattr(_lowercase , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(_lowercase )
def __call__( self , _lowercase , _lowercase , _lowercase , _lowercase = 512 , _lowercase = 512 , _lowercase = 100 , _lowercase = 4.0 , _lowercase = 0.3 , _lowercase = 1 , _lowercase = None , _lowercase = "pil" , _lowercase = True , ) -> str:
lowercase_ : List[Any] = self._execution_device
lowercase_ : List[Any] = guidance_scale > 1.0
if isinstance(_lowercase , _lowercase ):
lowercase_ : Union[str, Any] = torch.cat(_lowercase , dim=0 )
lowercase_ : Optional[Any] = image_embeds.shape[0]
if isinstance(_lowercase , _lowercase ):
lowercase_ : List[str] = torch.cat(_lowercase , dim=0 )
if do_classifier_free_guidance:
lowercase_ : List[str] = image_embeds.repeat_interleave(_lowercase , dim=0 )
lowercase_ : Dict = negative_image_embeds.repeat_interleave(_lowercase , dim=0 )
lowercase_ : Dict = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=_lowercase )
if not isinstance(_lowercase , _lowercase ):
lowercase_ : Union[str, Any] = [image]
if not all(isinstance(_lowercase , (PIL.Image.Image, torch.Tensor) ) for i in image ):
raise ValueError(
f"Input is in incorrect format: {[type(_lowercase ) for i in image]}. Currently, we only support PIL image and pytorch tensor" )
lowercase_ : List[Any] = torch.cat([prepare_image(_lowercase , _lowercase , _lowercase ) for i in image] , dim=0 )
lowercase_ : Dict = image.to(dtype=image_embeds.dtype , device=_lowercase )
lowercase_ : Dict = self.movq.encode(_lowercase )['latents']
lowercase_ : Optional[Any] = latents.repeat_interleave(_lowercase , dim=0 )
self.scheduler.set_timesteps(_lowercase , device=_lowercase )
lowercase_ , lowercase_ : str = self.get_timesteps(_lowercase , _lowercase , _lowercase )
lowercase_ : int = timesteps[:1].repeat(batch_size * num_images_per_prompt )
lowercase_ , lowercase_ : Union[str, Any] = downscale_height_and_width(_lowercase , _lowercase , self.movq_scale_factor )
lowercase_ : List[str] = self.prepare_latents(
_lowercase , _lowercase , _lowercase , _lowercase , image_embeds.dtype , _lowercase , _lowercase )
for i, t in enumerate(self.progress_bar(_lowercase ) ):
# expand the latents if we are doing classifier free guidance
lowercase_ : int = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowercase_ : str = {'image_embeds': image_embeds}
lowercase_ : str = self.unet(
sample=_lowercase , timestep=_lowercase , encoder_hidden_states=_lowercase , added_cond_kwargs=_lowercase , return_dict=_lowercase , )[0]
if do_classifier_free_guidance:
lowercase_ , lowercase_ : Dict = noise_pred.split(latents.shape[1] , dim=1 )
lowercase_ , lowercase_ : Optional[int] = noise_pred.chunk(2 )
lowercase_ , lowercase_ : Tuple = variance_pred.chunk(2 )
lowercase_ : List[str] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
lowercase_ : Tuple = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , 'variance_type' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
lowercase_ , lowercase_ : List[str] = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
lowercase_ : Dict = self.scheduler.step(
_lowercase , _lowercase , _lowercase , generator=_lowercase , )[0]
# post-processing
lowercase_ : Any = self.movq.decode(_lowercase , force_not_quantize=_lowercase )['sample']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f"Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}" )
if output_type in ["np", "pil"]:
lowercase_ : Dict = image * 0.5 + 0.5
lowercase_ : Dict = image.clamp(0 , 1 )
lowercase_ : int = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
lowercase_ : int = self.numpy_to_pil(_lowercase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_lowercase )
| 7 | 0 |
'''simple docstring'''
A: Tuple = {
"joule": 1.0,
"kilojoule": 1_0_0_0,
"megajoule": 1_0_0_0_0_0_0,
"gigajoule": 1_0_0_0_0_0_0_0_0_0,
"wattsecond": 1.0,
"watthour": 3_6_0_0,
"kilowatthour": 3_6_0_0_0_0_0,
"newtonmeter": 1.0,
"calorie_nutr": 4_1_8_6.8,
"kilocalorie_nutr": 4_1_8_6_8_0_0.0_0,
"electronvolt": 1.602_176_634E-19,
"britishthermalunit_it": 1_0_5_5.0_5_5_8_5,
"footpound": 1.355_818,
}
def _UpperCAmelCase ( a : str , a : Dict , a : Any ) -> Any:
"""simple docstring"""
if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION:
lowercase_ : Dict = (
f"Incorrect 'from_type' or 'to_type' value: {from_type!r}, {to_type!r}\n"
f"Valid values are: {', '.join(__snake_case )}"
)
raise ValueError(__snake_case )
return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 704 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ..utils import _LazyModule
A: int = {
"config": [
"EXTERNAL_DATA_FORMAT_SIZE_LIMIT",
"OnnxConfig",
"OnnxConfigWithPast",
"OnnxSeq2SeqConfigWithPast",
"PatchingSpec",
],
"convert": ["export", "validate_model_outputs"],
"features": ["FeaturesManager"],
"utils": ["ParameterFormat", "compute_serialized_parameters_size"],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
A: Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 7 | 0 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
A: int = logging.get_logger(__name__)
A: int = {'''vocab_file''': '''sentencepiece.bpe.model'''}
A: Optional[Any] = {
'''vocab_file''': {
'''moussaKam/mbarthez''': '''https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model''',
'''moussaKam/barthez''': '''https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model''',
'''moussaKam/barthez-orangesum-title''': (
'''https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model'''
),
},
}
A: Optional[int] = {
'''moussaKam/mbarthez''': 1_0_2_4,
'''moussaKam/barthez''': 1_0_2_4,
'''moussaKam/barthez-orangesum-title''': 1_0_2_4,
}
A: Dict = '''▁'''
class __magic_name__ ( __a ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ : List[str] = ['''input_ids''', '''attention_mask''']
def __init__( self , _lowercase , _lowercase="<s>" , _lowercase="</s>" , _lowercase="</s>" , _lowercase="<s>" , _lowercase="<unk>" , _lowercase="<pad>" , _lowercase="<mask>" , _lowercase = None , **_lowercase , ) -> List[Any]:
# Mask token behave like a normal word, i.e. include the space before it
lowercase_ : List[str] = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else mask_token
lowercase_ : Union[str, Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=snake_case__ , eos_token=snake_case__ , unk_token=snake_case__ , sep_token=snake_case__ , cls_token=snake_case__ , pad_token=snake_case__ , mask_token=snake_case__ , sp_model_kwargs=self.sp_model_kwargs , **snake_case__ , )
lowercase_ : int = vocab_file
lowercase_ : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(snake_case__ ) )
lowercase_ : Dict = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3}
lowercase_ : List[str] = len(self.sp_model ) - 1
lowercase_ : Tuple = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def lowerCamelCase__ ( self , _lowercase , _lowercase = None ) -> Any:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowercase_ : str = [self.cls_token_id]
lowercase_ : Optional[int] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowerCamelCase__ ( self , _lowercase , _lowercase = None , _lowercase = False ) -> Dict:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case__ , token_ids_a=snake_case__ , already_has_special_tokens=snake_case__ )
if token_ids_a is None:
return [1] + ([0] * len(snake_case__ )) + [1]
return [1] + ([0] * len(snake_case__ )) + [1, 1] + ([0] * len(snake_case__ )) + [1]
def lowerCamelCase__ ( self , _lowercase , _lowercase = None ) -> List[str]:
lowercase_ : Optional[Any] = [self.sep_token_id]
lowercase_ : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def lowerCamelCase__ ( self ) -> Union[str, Any]:
return len(self.sp_model )
def lowerCamelCase__ ( self ) -> Any:
lowercase_ : Any = {self.convert_ids_to_tokens(snake_case__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowerCamelCase__ ( self , _lowercase ) -> List[Any]:
return self.sp_model.encode(snake_case__ , out_type=snake_case__ )
def lowerCamelCase__ ( self , _lowercase ) -> Optional[Any]:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
lowercase_ : str = self.sp_model.PieceToId(snake_case__ )
return spm_id if spm_id else self.unk_token_id
def lowerCamelCase__ ( self , _lowercase ) -> Union[str, Any]:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(snake_case__ )
def lowerCamelCase__ ( self , _lowercase ) -> int:
lowercase_ : Dict = []
lowercase_ : str = ''
lowercase_ : Optional[int] = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(snake_case__ ) + token
lowercase_ : List[str] = True
lowercase_ : List[str] = []
else:
current_sub_tokens.append(snake_case__ )
lowercase_ : Dict = False
out_string += self.sp_model.decode(snake_case__ )
return out_string.strip()
def __getstate__( self ) -> Any:
lowercase_ : Dict = self.__dict__.copy()
lowercase_ : Dict = None
return state
def __setstate__( self , _lowercase ) -> List[Any]:
lowercase_ : Optional[Any] = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
lowercase_ : Optional[Any] = {}
lowercase_ : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowerCamelCase__ ( self , _lowercase , _lowercase = None ) -> Tuple:
if not os.path.isdir(snake_case__ ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
lowercase_ : Tuple = os.path.join(
snake_case__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , snake_case__ )
elif not os.path.isfile(self.vocab_file ):
with open(snake_case__ , 'wb' ) as fi:
lowercase_ : List[str] = self.sp_model.serialized_model_proto()
fi.write(snake_case__ )
return (out_vocab_file,)
| 705 |
'''simple docstring'''
import io
import json
import unittest
from parameterized import parameterized
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device
from utils import calculate_bleu
A: Any = get_tests_dir() + "/test_data/fsmt/fsmt_val_data.json"
with io.open(filename, "r", encoding="utf-8") as f:
A: List[Any] = json.load(f)
@require_torch
class __magic_name__ ( unittest.TestCase ):
"""simple docstring"""
def lowerCamelCase__ ( self , _lowercase ) -> Tuple:
return FSMTTokenizer.from_pretrained(_lowercase )
def lowerCamelCase__ ( self , _lowercase ) -> Optional[int]:
lowercase_ : str = FSMTForConditionalGeneration.from_pretrained(_lowercase ).to(_lowercase )
if torch_device == "cuda":
model.half()
return model
@parameterized.expand(
[
['en-ru', 26.0],
['ru-en', 22.0],
['en-de', 22.0],
['de-en', 29.0],
] )
@slow
def lowerCamelCase__ ( self , _lowercase , _lowercase ) -> Optional[int]:
# note: this test is not testing the best performance since it only evals a small batch
# but it should be enough to detect a regression in the output quality
lowercase_ : Optional[Any] = f"facebook/wmt19-{pair}"
lowercase_ : str = self.get_tokenizer(_lowercase )
lowercase_ : Any = self.get_model(_lowercase )
lowercase_ : Any = bleu_data[pair]['src']
lowercase_ : Any = bleu_data[pair]['tgt']
lowercase_ : Dict = tokenizer(_lowercase , return_tensors='pt' , truncation=_lowercase , padding='longest' ).to(_lowercase )
lowercase_ : str = model.generate(
input_ids=batch.input_ids , num_beams=8 , )
lowercase_ : Any = tokenizer.batch_decode(
_lowercase , skip_special_tokens=_lowercase , clean_up_tokenization_spaces=_lowercase )
lowercase_ : Union[str, Any] = calculate_bleu(_lowercase , _lowercase )
print(_lowercase )
self.assertGreaterEqual(scores['bleu'] , _lowercase )
| 7 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
A: Union[str, Any] = {"configuration_vit_mae": ["VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP", "ViTMAEConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A: int = [
"VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST",
"ViTMAEForPreTraining",
"ViTMAELayer",
"ViTMAEModel",
"ViTMAEPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A: List[Any] = [
"TFViTMAEForPreTraining",
"TFViTMAEModel",
"TFViTMAEPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_mae import (
VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMAEForPreTraining,
ViTMAELayer,
ViTMAEModel,
ViTMAEPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel
else:
import sys
A: Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 706 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A: int = {
"configuration_trajectory_transformer": [
"TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"TrajectoryTransformerConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A: Union[str, Any] = [
"TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TrajectoryTransformerModel",
"TrajectoryTransformerPreTrainedModel",
"load_tf_weights_in_trajectory_transformer",
]
if TYPE_CHECKING:
from .configuration_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TrajectoryTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TrajectoryTransformerModel,
TrajectoryTransformerPreTrainedModel,
load_tf_weights_in_trajectory_transformer,
)
else:
import sys
A: int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 7 | 0 |
'''simple docstring'''
from queue import Queue
from typing import TYPE_CHECKING, Optional
if TYPE_CHECKING:
from ..models.auto import AutoTokenizer
class __magic_name__ :
"""simple docstring"""
def lowerCamelCase__ ( self , _lowercase ) -> Optional[Any]:
raise NotImplementedError()
def lowerCamelCase__ ( self ) -> str:
raise NotImplementedError()
class __magic_name__ ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self , _lowercase , _lowercase = False , **_lowercase ) -> Dict:
lowercase_ : Tuple = tokenizer
lowercase_ : Union[str, Any] = skip_prompt
lowercase_ : int = decode_kwargs
# variables used in the streaming process
lowercase_ : Tuple = []
lowercase_ : Tuple = 0
lowercase_ : List[str] = True
def lowerCamelCase__ ( self , _lowercase ) -> List[str]:
if len(value.shape ) > 1 and value.shape[0] > 1:
raise ValueError('TextStreamer only supports batch size 1' )
elif len(value.shape ) > 1:
lowercase_ : List[Any] = value[0]
if self.skip_prompt and self.next_tokens_are_prompt:
lowercase_ : Any = False
return
# Add the new token to the cache and decodes the entire thing.
self.token_cache.extend(value.tolist() )
lowercase_ : List[Any] = self.tokenizer.decode(self.token_cache , **self.decode_kwargs )
# After the symbol for a new line, we flush the cache.
if text.endswith('\n' ):
lowercase_ : Any = text[self.print_len :]
lowercase_ : Optional[Any] = []
lowercase_ : Dict = 0
# If the last token is a CJK character, we print the characters.
elif len(_lowercase ) > 0 and self._is_chinese_char(ord(text[-1] ) ):
lowercase_ : List[Any] = text[self.print_len :]
self.print_len += len(_lowercase )
# Otherwise, prints until the last space char (simple heuristic to avoid printing incomplete words,
# which may change with the subsequent token -- there are probably smarter ways to do this!)
else:
lowercase_ : Tuple = text[self.print_len : text.rfind(' ' ) + 1]
self.print_len += len(_lowercase )
self.on_finalized_text(_lowercase )
def lowerCamelCase__ ( self ) -> Any:
# Flush the cache, if it exists
if len(self.token_cache ) > 0:
lowercase_ : List[str] = self.tokenizer.decode(self.token_cache , **self.decode_kwargs )
lowercase_ : Optional[int] = text[self.print_len :]
lowercase_ : Optional[Any] = []
lowercase_ : int = 0
else:
lowercase_ : int = ''
lowercase_ : Any = True
self.on_finalized_text(_lowercase , stream_end=_lowercase )
def lowerCamelCase__ ( self , _lowercase , _lowercase = False ) -> Any:
print(_lowercase , flush=_lowercase , end='' if not stream_end else None )
def lowerCamelCase__ ( self , _lowercase ) -> Any:
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0X4_e00 and cp <= 0X9_fff)
or (cp >= 0X3_400 and cp <= 0X4_dbf) #
or (cp >= 0X20_000 and cp <= 0X2a_6df) #
or (cp >= 0X2a_700 and cp <= 0X2b_73f) #
or (cp >= 0X2b_740 and cp <= 0X2b_81f) #
or (cp >= 0X2b_820 and cp <= 0X2c_eaf) #
or (cp >= 0Xf_900 and cp <= 0Xf_aff)
or (cp >= 0X2f_800 and cp <= 0X2f_a1f) #
): #
return True
return False
class __magic_name__ ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self , _lowercase , _lowercase = False , _lowercase = None , **_lowercase ) -> Optional[Any]:
super().__init__(_lowercase , _lowercase , **_lowercase )
lowercase_ : str = Queue()
lowercase_ : Tuple = None
lowercase_ : Any = timeout
def lowerCamelCase__ ( self , _lowercase , _lowercase = False ) -> Optional[int]:
self.text_queue.put(_lowercase , timeout=self.timeout )
if stream_end:
self.text_queue.put(self.stop_signal , timeout=self.timeout )
def __iter__( self ) -> Tuple:
return self
def lowerCamelCase__ ( self ) -> Optional[Any]:
lowercase_ : Union[str, Any] = self.text_queue.get(timeout=self.timeout )
if value == self.stop_signal:
raise StopIteration()
else:
return value
| 707 |
'''simple docstring'''
def _UpperCAmelCase ( a : str ) -> str:
"""simple docstring"""
lowercase_ : Dict = 0
# if input_string is "aba" than new_input_string become "a|b|a"
lowercase_ : Dict = ''
lowercase_ : Any = ''
# append each character + "|" in new_string for range(0, length-1)
for i in input_string[: len(a ) - 1]:
new_input_string += i + "|"
# append last character
new_input_string += input_string[-1]
# we will store the starting and ending of previous furthest ending palindromic
# substring
lowercase_ , lowercase_ : Dict = 0, 0
# length[i] shows the length of palindromic substring with center i
lowercase_ : List[Any] = [1 for i in range(len(a ) )]
# for each character in new_string find corresponding palindromic string
lowercase_ : Dict = 0
for j in range(len(a ) ):
lowercase_ : Tuple = 1 if j > r else min(length[l + r - j] // 2 , r - j + 1 )
while (
j - k >= 0
and j + k < len(a )
and new_input_string[k + j] == new_input_string[j - k]
):
k += 1
lowercase_ : int = 2 * k - 1
# does this string is ending after the previously explored end (that is r) ?
# if yes the update the new r to the last index of this
if j + k - 1 > r:
lowercase_ : Tuple = j - k + 1 # noqa: E741
lowercase_ : Tuple = j + k - 1
# update max_length and start position
if max_length < length[j]:
lowercase_ : Tuple = length[j]
lowercase_ : List[Any] = j
# create that string
lowercase_ : str = new_input_string[start - max_length // 2 : start + max_length // 2 + 1]
for i in s:
if i != "|":
output_string += i
return output_string
if __name__ == "__main__":
import doctest
doctest.testmod()
| 7 | 0 |
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import torch
from torch import nn
from torch.nn import CrossEntropyLoss
from ... import AutoBackbone
from ...modeling_outputs import SemanticSegmenterOutput
from ...modeling_utils import PreTrainedModel
from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings
from ...utils.backbone_utils import BackboneMixin
from .configuration_upernet import UperNetConfig
A: Dict = [
'openmmlab/upernet-convnext-tiny',
# See all UperNet models at https://huggingface.co/models?filter=upernet
]
# General docstring
A: int = 'UperNetConfig'
class __magic_name__ ( nn.Module ):
"""simple docstring"""
def __init__( self , _lowercase , _lowercase , _lowercase , _lowercase = 0 , _lowercase = False , _lowercase = 1 , ) -> None:
super().__init__()
lowercase_ : Union[str, Any] = nn.Convad(
in_channels=_SCREAMING_SNAKE_CASE , out_channels=_SCREAMING_SNAKE_CASE , kernel_size=_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE , bias=_SCREAMING_SNAKE_CASE , dilation=_SCREAMING_SNAKE_CASE , )
lowercase_ : Optional[Any] = nn.BatchNormad(_SCREAMING_SNAKE_CASE )
lowercase_ : Tuple = nn.ReLU()
def snake_case__ ( self , _lowercase ) -> torch.Tensor:
lowercase_ : Optional[Any] = self.conv(_SCREAMING_SNAKE_CASE )
lowercase_ : Union[str, Any] = self.batch_norm(_SCREAMING_SNAKE_CASE )
lowercase_ : Optional[Any] = self.activation(_SCREAMING_SNAKE_CASE )
return output
class __magic_name__ ( nn.Module ):
"""simple docstring"""
def __init__( self , _lowercase , _lowercase , _lowercase ) -> None:
super().__init__()
lowercase_ : Any = [
nn.AdaptiveAvgPoolad(_SCREAMING_SNAKE_CASE ),
UperNetConvModule(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , kernel_size=1 ),
]
for i, layer in enumerate(self.layers ):
self.add_module(str(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
def snake_case__ ( self , _lowercase ) -> torch.Tensor:
lowercase_ : Any = input
for layer in self.layers:
lowercase_ : int = layer(_SCREAMING_SNAKE_CASE )
return hidden_state
class __magic_name__ ( nn.Module ):
"""simple docstring"""
def __init__( self , _lowercase , _lowercase , _lowercase , _lowercase ) -> None:
super().__init__()
lowercase_ : Dict = pool_scales
lowercase_ : Tuple = align_corners
lowercase_ : List[Any] = in_channels
lowercase_ : Tuple = channels
lowercase_ : Dict = []
for i, pool_scale in enumerate(_SCREAMING_SNAKE_CASE ):
lowercase_ : Any = UperNetPyramidPoolingBlock(pool_scale=_SCREAMING_SNAKE_CASE , in_channels=_SCREAMING_SNAKE_CASE , channels=_SCREAMING_SNAKE_CASE )
self.blocks.append(_SCREAMING_SNAKE_CASE )
self.add_module(str(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
def snake_case__ ( self , _lowercase ) -> List[torch.Tensor]:
lowercase_ : Any = []
for ppm in self.blocks:
lowercase_ : Optional[Any] = ppm(_SCREAMING_SNAKE_CASE )
lowercase_ : int = nn.functional.interpolate(
_SCREAMING_SNAKE_CASE , size=x.size()[2:] , mode='bilinear' , align_corners=self.align_corners )
ppm_outs.append(_SCREAMING_SNAKE_CASE )
return ppm_outs
class __magic_name__ ( nn.Module ):
"""simple docstring"""
def __init__( self , _lowercase , _lowercase ) -> Dict:
super().__init__()
lowercase_ : List[Any] = config
lowercase_ : Optional[Any] = config.pool_scales # e.g. (1, 2, 3, 6)
lowercase_ : Dict = in_channels
lowercase_ : str = config.hidden_size
lowercase_ : int = False
lowercase_ : Optional[int] = nn.Convad(self.channels , config.num_labels , kernel_size=1 )
# PSP Module
lowercase_ : Tuple = UperNetPyramidPoolingModule(
self.pool_scales , self.in_channels[-1] , self.channels , align_corners=self.align_corners , )
lowercase_ : List[Any] = UperNetConvModule(
self.in_channels[-1] + len(self.pool_scales ) * self.channels , self.channels , kernel_size=3 , padding=1 , )
# FPN Module
lowercase_ : List[Any] = nn.ModuleList()
lowercase_ : List[str] = nn.ModuleList()
for in_channels in self.in_channels[:-1]: # skip the top layer
lowercase_ : Dict = UperNetConvModule(_SCREAMING_SNAKE_CASE , self.channels , kernel_size=1 )
lowercase_ : str = UperNetConvModule(self.channels , self.channels , kernel_size=3 , padding=1 )
self.lateral_convs.append(_SCREAMING_SNAKE_CASE )
self.fpn_convs.append(_SCREAMING_SNAKE_CASE )
lowercase_ : str = UperNetConvModule(
len(self.in_channels ) * self.channels , self.channels , kernel_size=3 , padding=1 , )
def snake_case__ ( self ) -> List[str]:
self.apply(self._init_weights )
def snake_case__ ( self , _lowercase ) -> Dict:
if isinstance(_SCREAMING_SNAKE_CASE , nn.Convad ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def snake_case__ ( self , _lowercase ) -> List[str]:
lowercase_ : Optional[Any] = inputs[-1]
lowercase_ : List[str] = [x]
psp_outs.extend(self.psp_modules(_SCREAMING_SNAKE_CASE ) )
lowercase_ : Union[str, Any] = torch.cat(_SCREAMING_SNAKE_CASE , dim=1 )
lowercase_ : Union[str, Any] = self.bottleneck(_SCREAMING_SNAKE_CASE )
return output
def snake_case__ ( self , _lowercase ) -> torch.Tensor:
# build laterals
lowercase_ : List[Any] = [lateral_conv(encoder_hidden_states[i] ) for i, lateral_conv in enumerate(self.lateral_convs )]
laterals.append(self.psp_forward(_SCREAMING_SNAKE_CASE ) )
# build top-down path
lowercase_ : Any = len(_SCREAMING_SNAKE_CASE )
for i in range(used_backbone_levels - 1 , 0 , -1 ):
lowercase_ : int = laterals[i - 1].shape[2:]
lowercase_ : Dict = laterals[i - 1] + nn.functional.interpolate(
laterals[i] , size=_SCREAMING_SNAKE_CASE , mode='bilinear' , align_corners=self.align_corners )
# build outputs
lowercase_ : Optional[int] = [self.fpn_convs[i](laterals[i] ) for i in range(used_backbone_levels - 1 )]
# append psp feature
fpn_outs.append(laterals[-1] )
for i in range(used_backbone_levels - 1 , 0 , -1 ):
lowercase_ : List[str] = nn.functional.interpolate(
fpn_outs[i] , size=fpn_outs[0].shape[2:] , mode='bilinear' , align_corners=self.align_corners )
lowercase_ : Optional[int] = torch.cat(_SCREAMING_SNAKE_CASE , dim=1 )
lowercase_ : int = self.fpn_bottleneck(_SCREAMING_SNAKE_CASE )
lowercase_ : Union[str, Any] = self.classifier(_SCREAMING_SNAKE_CASE )
return output
class __magic_name__ ( nn.Module ):
"""simple docstring"""
def __init__( self , _lowercase , _lowercase = 2 , _lowercase = 3 , _lowercase = 1 ) -> None:
super().__init__()
lowercase_ : str = config
lowercase_ : Optional[Any] = config.auxiliary_in_channels
lowercase_ : Tuple = config.auxiliary_channels
lowercase_ : List[str] = config.auxiliary_num_convs
lowercase_ : str = config.auxiliary_concat_input
lowercase_ : Any = in_index
lowercase_ : Any = (kernel_size // 2) * dilation
lowercase_ : Optional[Any] = []
convs.append(
UperNetConvModule(
self.in_channels , self.channels , kernel_size=_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE , dilation=_SCREAMING_SNAKE_CASE ) )
for i in range(self.num_convs - 1 ):
convs.append(
UperNetConvModule(
self.channels , self.channels , kernel_size=_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE , dilation=_SCREAMING_SNAKE_CASE ) )
if self.num_convs == 0:
lowercase_ : Optional[int] = nn.Identity()
else:
lowercase_ : List[Any] = nn.Sequential(*_SCREAMING_SNAKE_CASE )
if self.concat_input:
lowercase_ : List[str] = UperNetConvModule(
self.in_channels + self.channels , self.channels , kernel_size=_SCREAMING_SNAKE_CASE , padding=kernel_size // 2 )
lowercase_ : List[str] = nn.Convad(self.channels , config.num_labels , kernel_size=1 )
def snake_case__ ( self ) -> str:
self.apply(self._init_weights )
def snake_case__ ( self , _lowercase ) -> Optional[Any]:
if isinstance(_SCREAMING_SNAKE_CASE , nn.Convad ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def snake_case__ ( self , _lowercase ) -> torch.Tensor:
# just take the relevant feature maps
lowercase_ : Optional[Any] = encoder_hidden_states[self.in_index]
lowercase_ : List[str] = self.convs(_SCREAMING_SNAKE_CASE )
if self.concat_input:
lowercase_ : Union[str, Any] = self.conv_cat(torch.cat([hidden_states, output] , dim=1 ) )
lowercase_ : List[Any] = self.classifier(_SCREAMING_SNAKE_CASE )
return output
class __magic_name__ ( lowerCAmelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = UperNetConfig
SCREAMING_SNAKE_CASE_ : Tuple = "pixel_values"
SCREAMING_SNAKE_CASE_ : Tuple = True
def snake_case__ ( self , _lowercase ) -> Tuple:
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
module.backbone.init_weights()
module.decode_head.init_weights()
module.auxiliary_head.init_weights()
def snake_case__ ( self ) -> Union[str, Any]:
self.backbone.init_weights()
self.decode_head.init_weights()
self.auxiliary_head.init_weights()
def snake_case__ ( self , _lowercase , _lowercase=False ) -> Optional[int]:
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
lowercase_ : int = value
A: List[str] = R'\n Parameters:\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n config ([`UperNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'
A: List[Any] = R'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using\n [`AutoImageProcessor`]. See [`SegformerImageProcessor.__call__`] for details.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers in case the backbone has them. See\n `attentions` under returned tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers of the backbone. See `hidden_states` under\n returned tensors for more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
'UperNet framework leveraging any vision backbone e.g. for ADE20k, CityScapes.', lowerCAmelCase__, )
class __magic_name__ ( lowerCAmelCase__ ):
"""simple docstring"""
def __init__( self , _lowercase ) -> Dict:
super().__init__(_SCREAMING_SNAKE_CASE )
lowercase_ : Tuple = AutoBackbone.from_config(config.backbone_config )
# Semantic segmentation head(s)
lowercase_ : Union[str, Any] = UperNetHead(_SCREAMING_SNAKE_CASE , in_channels=self.backbone.channels )
lowercase_ : Dict = UperNetFCNHead(_SCREAMING_SNAKE_CASE ) if config.use_auxiliary_head else None
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UPERNET_INPUTS_DOCSTRING.format('batch_size, sequence_length' ) )
@replace_return_docstrings(output_type=_SCREAMING_SNAKE_CASE , config_class=_CONFIG_FOR_DOC )
def snake_case__ ( self , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = None , ) -> Union[tuple, SemanticSegmenterOutput]:
lowercase_ : Union[str, Any] = return_dict if return_dict is not None else self.config.use_return_dict
lowercase_ : str = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowercase_ : str = output_attentions if output_attentions is not None else self.config.output_attentions
lowercase_ : str = self.backbone.forward_with_filtered_kwargs(
_SCREAMING_SNAKE_CASE , output_hidden_states=_SCREAMING_SNAKE_CASE , output_attentions=_SCREAMING_SNAKE_CASE )
lowercase_ : Dict = outputs.feature_maps
lowercase_ : Tuple = self.decode_head(_SCREAMING_SNAKE_CASE )
lowercase_ : Tuple = nn.functional.interpolate(_SCREAMING_SNAKE_CASE , size=pixel_values.shape[2:] , mode='bilinear' , align_corners=_SCREAMING_SNAKE_CASE )
lowercase_ : Dict = None
if self.auxiliary_head is not None:
lowercase_ : Dict = self.auxiliary_head(_SCREAMING_SNAKE_CASE )
lowercase_ : Dict = nn.functional.interpolate(
_SCREAMING_SNAKE_CASE , size=pixel_values.shape[2:] , mode='bilinear' , align_corners=_SCREAMING_SNAKE_CASE )
lowercase_ : Any = None
if labels is not None:
if self.config.num_labels == 1:
raise ValueError('The number of labels should be greater than one' )
else:
# compute weighted loss
lowercase_ : Optional[Any] = CrossEntropyLoss(ignore_index=self.config.loss_ignore_index )
lowercase_ : Optional[int] = loss_fct(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowercase_ : List[Any] = loss_fct(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowercase_ : Optional[Any] = main_loss + self.config.auxiliary_loss_weight * auxiliary_loss
if not return_dict:
if output_hidden_states:
lowercase_ : Union[str, Any] = (logits,) + outputs[1:]
else:
lowercase_ : Dict = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return SemanticSegmenterOutput(
loss=_SCREAMING_SNAKE_CASE , logits=_SCREAMING_SNAKE_CASE , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
| 708 |
'''simple docstring'''
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class __magic_name__ ( UpperCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ''
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 'hf-legacy' # "hf://"" is reserved for hffs
def __init__( self , _lowercase = None , _lowercase = None , **_lowercase , ) -> Optional[Any]:
super().__init__(self , **_lowercase )
lowercase_ : int = repo_info
lowercase_ : List[Any] = token
lowercase_ : Union[str, Any] = None
def lowerCamelCase__ ( self ) -> Optional[Any]:
if self.dir_cache is None:
lowercase_ : Optional[Any] = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
lowercase_ : str = {
'name': hf_file.rfilename,
'size': None,
'type': 'file',
}
self.dir_cache.update(
{
str(_lowercase ): {'name': str(_lowercase ), 'size': None, 'type': 'directory'}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def lowerCamelCase__ ( self , _lowercase , _lowercase = "rb" , **_lowercase , ) -> Dict:
if not isinstance(self.repo_info , _lowercase ):
raise NotImplementedError(f"Open is only implemented for dataset repositories, but got {self.repo_info}" )
lowercase_ : Optional[int] = hf_hub_url(self.repo_info.id , _lowercase , revision=self.repo_info.sha )
return fsspec.open(
_lowercase , mode=_lowercase , headers=get_authentication_headers_for_url(_lowercase , use_auth_token=self.token ) , client_kwargs={'trust_env': True} , ).open()
def lowerCamelCase__ ( self , _lowercase , **_lowercase ) -> Tuple:
self._get_dirs()
lowercase_ : str = self._strip_protocol(_lowercase )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(_lowercase )
def lowerCamelCase__ ( self , _lowercase , _lowercase=False , **_lowercase ) -> List[str]:
self._get_dirs()
lowercase_ : List[str] = PurePosixPath(path.strip('/' ) )
lowercase_ : List[str] = {}
for p, f in self.dir_cache.items():
lowercase_ : Tuple = PurePosixPath(p.strip('/' ) )
lowercase_ : Optional[int] = p.parent
if root == path:
lowercase_ : List[str] = f
lowercase_ : List[str] = list(paths.values() )
if detail:
return out
else:
return sorted(f['name'] for f in out )
| 7 | 0 |
'''simple docstring'''
import re
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class __magic_name__ ( _A ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = ['image_processor', 'tokenizer']
SCREAMING_SNAKE_CASE_ : Any = 'AutoImageProcessor'
SCREAMING_SNAKE_CASE_ : Optional[int] = 'AutoTokenizer'
def __init__( self , _lowercase=None , _lowercase=None , **_lowercase ) -> Union[str, Any]:
lowercase_ : Optional[int] = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , UpperCamelCase__ , )
lowercase_ : Dict = kwargs.pop('feature_extractor' )
lowercase_ : int = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(UpperCamelCase__ , UpperCamelCase__ )
lowercase_ : Any = self.image_processor
lowercase_ : Optional[int] = False
def __call__( self , *_lowercase , **_lowercase ) -> Any:
if self._in_target_context_manager:
return self.current_processor(*UpperCamelCase__ , **UpperCamelCase__ )
lowercase_ : Tuple = kwargs.pop('images' , UpperCamelCase__ )
lowercase_ : str = kwargs.pop('text' , UpperCamelCase__ )
if len(UpperCamelCase__ ) > 0:
lowercase_ : Dict = args[0]
lowercase_ : int = args[1:]
if images is None and text is None:
raise ValueError('You need to specify either an `images` or `text` input to process.' )
if images is not None:
lowercase_ : Any = self.image_processor(UpperCamelCase__ , *UpperCamelCase__ , **UpperCamelCase__ )
if text is not None:
lowercase_ : Optional[int] = self.tokenizer(UpperCamelCase__ , **UpperCamelCase__ )
if text is None:
return inputs
elif images is None:
return encodings
else:
lowercase_ : Optional[Any] = encodings['input_ids']
return inputs
def lowerCamelCase__ ( self , *_lowercase , **_lowercase ) -> List[Any]:
return self.tokenizer.batch_decode(*UpperCamelCase__ , **UpperCamelCase__ )
def lowerCamelCase__ ( self , *_lowercase , **_lowercase ) -> Any:
return self.tokenizer.decode(*UpperCamelCase__ , **UpperCamelCase__ )
@contextmanager
def lowerCamelCase__ ( self ) -> Dict:
warnings.warn(
'`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '
'labels by using the argument `text` of the regular `__call__` method (either in the same call as '
'your images inputs, or in a separate call.' )
lowercase_ : Dict = True
lowercase_ : List[str] = self.tokenizer
yield
lowercase_ : Any = self.image_processor
lowercase_ : List[str] = False
def lowerCamelCase__ ( self , _lowercase , _lowercase=False , _lowercase=None ) -> Optional[Any]:
if added_vocab is None:
lowercase_ : Any = self.tokenizer.get_added_vocab()
lowercase_ : Dict = {}
while tokens:
lowercase_ : Dict = re.search(r'<s_(.*?)>' , UpperCamelCase__ , re.IGNORECASE )
if start_token is None:
break
lowercase_ : str = start_token.group(1 )
lowercase_ : List[Any] = re.search(rf"</s_{key}>" , UpperCamelCase__ , re.IGNORECASE )
lowercase_ : Any = start_token.group()
if end_token is None:
lowercase_ : Tuple = tokens.replace(UpperCamelCase__ , '' )
else:
lowercase_ : str = end_token.group()
lowercase_ : Optional[Any] = re.escape(UpperCamelCase__ )
lowercase_ : Dict = re.escape(UpperCamelCase__ )
lowercase_ : Any = re.search(f"{start_token_escaped}(.*?){end_token_escaped}" , UpperCamelCase__ , re.IGNORECASE )
if content is not None:
lowercase_ : int = content.group(1 ).strip()
if r"<s_" in content and r"</s_" in content: # non-leaf node
lowercase_ : Union[str, Any] = self.tokenajson(UpperCamelCase__ , is_inner_value=UpperCamelCase__ , added_vocab=UpperCamelCase__ )
if value:
if len(UpperCamelCase__ ) == 1:
lowercase_ : int = value[0]
lowercase_ : str = value
else: # leaf nodes
lowercase_ : Any = []
for leaf in content.split(r'<sep/>' ):
lowercase_ : Optional[Any] = leaf.strip()
if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>":
lowercase_ : Optional[int] = leaf[1:-2] # for categorical special tokens
output[key].append(UpperCamelCase__ )
if len(output[key] ) == 1:
lowercase_ : Union[str, Any] = output[key][0]
lowercase_ : str = tokens[tokens.find(UpperCamelCase__ ) + len(UpperCamelCase__ ) :].strip()
if tokens[:6] == r"<sep/>": # non-leaf nodes
return [output] + self.tokenajson(tokens[6:] , is_inner_value=UpperCamelCase__ , added_vocab=UpperCamelCase__ )
if len(UpperCamelCase__ ):
return [output] if is_inner_value else output
else:
return [] if is_inner_value else {"text_sequence": tokens}
@property
def lowerCamelCase__ ( self ) -> Tuple:
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , UpperCamelCase__ , )
return self.image_processor_class
@property
def lowerCamelCase__ ( self ) -> Tuple:
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , UpperCamelCase__ , )
return self.image_processor
| 709 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
BertTokenizer,
ViltConfig,
ViltForImageAndTextRetrieval,
ViltForImagesAndTextClassification,
ViltForMaskedLM,
ViltForQuestionAnswering,
ViltImageProcessor,
ViltProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
A: List[Any] = logging.get_logger(__name__)
def _UpperCAmelCase ( a : Any , a : Dict=False , a : Union[str, Any]=False , a : Tuple=False ) -> List[str]:
"""simple docstring"""
lowercase_ : int = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"transformer.blocks.{i}.norm1.weight", f"vilt.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((f"transformer.blocks.{i}.norm1.bias", f"vilt.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append(
(f"transformer.blocks.{i}.attn.proj.weight", f"vilt.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append(
(f"transformer.blocks.{i}.attn.proj.bias", f"vilt.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((f"transformer.blocks.{i}.norm2.weight", f"vilt.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((f"transformer.blocks.{i}.norm2.bias", f"vilt.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append(
(f"transformer.blocks.{i}.mlp.fc1.weight", f"vilt.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((f"transformer.blocks.{i}.mlp.fc1.bias", f"vilt.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((f"transformer.blocks.{i}.mlp.fc2.weight", f"vilt.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((f"transformer.blocks.{i}.mlp.fc2.bias", f"vilt.encoder.layer.{i}.output.dense.bias") )
# embeddings
rename_keys.extend(
[
# text embeddings
('text_embeddings.word_embeddings.weight', 'vilt.embeddings.text_embeddings.word_embeddings.weight'),
(
'text_embeddings.position_embeddings.weight',
'vilt.embeddings.text_embeddings.position_embeddings.weight',
),
('text_embeddings.position_ids', 'vilt.embeddings.text_embeddings.position_ids'),
(
'text_embeddings.token_type_embeddings.weight',
'vilt.embeddings.text_embeddings.token_type_embeddings.weight',
),
('text_embeddings.LayerNorm.weight', 'vilt.embeddings.text_embeddings.LayerNorm.weight'),
('text_embeddings.LayerNorm.bias', 'vilt.embeddings.text_embeddings.LayerNorm.bias'),
# patch embeddings
('transformer.cls_token', 'vilt.embeddings.cls_token'),
('transformer.patch_embed.proj.weight', 'vilt.embeddings.patch_embeddings.projection.weight'),
('transformer.patch_embed.proj.bias', 'vilt.embeddings.patch_embeddings.projection.bias'),
('transformer.pos_embed', 'vilt.embeddings.position_embeddings'),
# token type embeddings
('token_type_embeddings.weight', 'vilt.embeddings.token_type_embeddings.weight'),
] )
# final layernorm + pooler
rename_keys.extend(
[
('transformer.norm.weight', 'vilt.layernorm.weight'),
('transformer.norm.bias', 'vilt.layernorm.bias'),
('pooler.dense.weight', 'vilt.pooler.dense.weight'),
('pooler.dense.bias', 'vilt.pooler.dense.bias'),
] )
# classifier head(s)
if vqa_model:
# classification head
rename_keys.extend(
[
('vqa_classifier.0.weight', 'classifier.0.weight'),
('vqa_classifier.0.bias', 'classifier.0.bias'),
('vqa_classifier.1.weight', 'classifier.1.weight'),
('vqa_classifier.1.bias', 'classifier.1.bias'),
('vqa_classifier.3.weight', 'classifier.3.weight'),
('vqa_classifier.3.bias', 'classifier.3.bias'),
] )
elif nlvr_model:
# classification head
rename_keys.extend(
[
('nlvr2_classifier.0.weight', 'classifier.0.weight'),
('nlvr2_classifier.0.bias', 'classifier.0.bias'),
('nlvr2_classifier.1.weight', 'classifier.1.weight'),
('nlvr2_classifier.1.bias', 'classifier.1.bias'),
('nlvr2_classifier.3.weight', 'classifier.3.weight'),
('nlvr2_classifier.3.bias', 'classifier.3.bias'),
] )
else:
pass
return rename_keys
def _UpperCAmelCase ( a : Dict , a : Tuple ) -> Dict:
"""simple docstring"""
for i in range(config.num_hidden_layers ):
lowercase_ : Optional[int] = 'vilt.'
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowercase_ : str = state_dict.pop(f"transformer.blocks.{i}.attn.qkv.weight" )
lowercase_ : int = state_dict.pop(f"transformer.blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
lowercase_ : Dict = in_proj_weight[
: config.hidden_size, :
]
lowercase_ : List[str] = in_proj_bias[: config.hidden_size]
lowercase_ : int = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowercase_ : Optional[int] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowercase_ : Tuple = in_proj_weight[
-config.hidden_size :, :
]
lowercase_ : Dict = in_proj_bias[-config.hidden_size :]
def _UpperCAmelCase ( a : List[str] ) -> Optional[int]:
"""simple docstring"""
lowercase_ : Union[str, Any] = ['head.weight', 'head.bias']
for k in ignore_keys:
state_dict.pop(a , a )
def _UpperCAmelCase ( a : Optional[Any] , a : Tuple , a : Union[str, Any] ) -> Tuple:
"""simple docstring"""
lowercase_ : List[Any] = dct.pop(a )
lowercase_ : Dict = val
@torch.no_grad()
def _UpperCAmelCase ( a : List[Any] , a : List[Any] ) -> Optional[Any]:
"""simple docstring"""
lowercase_ : str = ViltConfig(image_size=3_8_4 , patch_size=3_2 , tie_word_embeddings=a )
lowercase_ : int = False
lowercase_ : Union[str, Any] = False
lowercase_ : List[str] = False
lowercase_ : str = False
if "vqa" in checkpoint_url:
lowercase_ : str = True
lowercase_ : Optional[int] = 3_1_2_9
lowercase_ : Any = 'huggingface/label-files'
lowercase_ : Optional[Any] = 'vqa2-id2label.json'
lowercase_ : int = json.load(open(hf_hub_download(a , a , repo_type='dataset' ) , 'r' ) )
lowercase_ : Optional[int] = {int(a ): v for k, v in idalabel.items()}
lowercase_ : List[Any] = idalabel
lowercase_ : str = {v: k for k, v in idalabel.items()}
lowercase_ : List[Any] = ViltForQuestionAnswering(a )
elif "nlvr" in checkpoint_url:
lowercase_ : Dict = True
lowercase_ : List[str] = 2
lowercase_ : Tuple = {0: 'False', 1: 'True'}
lowercase_ : Optional[int] = {v: k for k, v in config.idalabel.items()}
lowercase_ : int = 3
lowercase_ : Any = ViltForImagesAndTextClassification(a )
elif "irtr" in checkpoint_url:
lowercase_ : Union[str, Any] = True
lowercase_ : Dict = ViltForImageAndTextRetrieval(a )
elif "mlm_itm" in checkpoint_url:
lowercase_ : int = True
lowercase_ : Tuple = ViltForMaskedLM(a )
else:
raise ValueError('Unknown model type' )
# load state_dict of original model, remove and rename some keys
lowercase_ : List[Any] = torch.hub.load_state_dict_from_url(a , map_location='cpu' )['state_dict']
lowercase_ : Union[str, Any] = create_rename_keys(a , a , a , a )
for src, dest in rename_keys:
rename_key(a , a , a )
read_in_q_k_v(a , a )
if mlm_model or irtr_model:
lowercase_ : str = ['itm_score.fc.weight', 'itm_score.fc.bias']
for k in ignore_keys:
state_dict.pop(a , a )
# load state dict into HuggingFace model
model.eval()
if mlm_model:
lowercase_ , lowercase_ : Dict = model.load_state_dict(a , strict=a )
assert missing_keys == ["mlm_score.decoder.bias"]
else:
model.load_state_dict(a )
# Define processor
lowercase_ : Optional[int] = ViltImageProcessor(size=3_8_4 )
lowercase_ : Optional[int] = BertTokenizer.from_pretrained('bert-base-uncased' )
lowercase_ : Any = ViltProcessor(a , a )
# Forward pass on example inputs (image + text)
if nlvr_model:
lowercase_ : Union[str, Any] = Image.open(requests.get('https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg' , stream=a ).raw )
lowercase_ : Optional[Any] = Image.open(requests.get('https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg' , stream=a ).raw )
lowercase_ : Any = (
'The left image contains twice the number of dogs as the right image, and at least two dogs in total are'
' standing.'
)
lowercase_ : Union[str, Any] = processor(a , a , return_tensors='pt' )
lowercase_ : List[str] = processor(a , a , return_tensors='pt' )
lowercase_ : Union[str, Any] = model(
input_ids=encoding_a.input_ids , pixel_values=encoding_a.pixel_values , pixel_values_a=encoding_a.pixel_values , )
else:
lowercase_ : List[str] = Image.open(requests.get('http://images.cocodataset.org/val2017/000000039769.jpg' , stream=a ).raw )
if mlm_model:
lowercase_ : Dict = 'a bunch of [MASK] laying on a [MASK].'
else:
lowercase_ : List[Any] = 'How many cats are there?'
lowercase_ : List[Any] = processor(a , a , return_tensors='pt' )
lowercase_ : Optional[int] = model(**a )
# Verify outputs
if mlm_model:
lowercase_ : Union[str, Any] = torch.Size([1, 1_1, 3_0_5_2_2] )
lowercase_ : Optional[Any] = torch.tensor([-12.50_61, -12.51_23, -12.51_74] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , a , atol=1e-4 )
# verify masked token prediction equals "cats"
lowercase_ : int = outputs.logits[0, 4, :].argmax(-1 ).item()
assert tokenizer.decode([predicted_id] ) == "cats"
elif vqa_model:
lowercase_ : Optional[Any] = torch.Size([1, 3_1_2_9] )
lowercase_ : Tuple = torch.tensor([-15.94_95, -18.14_72, -10.30_41] )
assert torch.allclose(outputs.logits[0, :3] , a , atol=1e-4 )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , a , atol=1e-4 )
# verify vqa prediction equals "2"
lowercase_ : Union[str, Any] = outputs.logits.argmax(-1 ).item()
assert model.config.idalabel[predicted_idx] == "2"
elif nlvr_model:
lowercase_ : Optional[Any] = torch.Size([1, 2] )
lowercase_ : Optional[Any] = torch.tensor([-2.87_21, 2.12_91] )
assert torch.allclose(outputs.logits[0, :3] , a , atol=1e-4 )
assert outputs.logits.shape == expected_shape
Path(a ).mkdir(exist_ok=a )
print(f"Saving model and processor to {pytorch_dump_folder_path}" )
model.save_pretrained(a )
processor.save_pretrained(a )
if __name__ == "__main__":
A: Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://github.com/dandelin/ViLT/releases/download/200k/vilt_200k_mlm_itm.ckpt",
type=str,
help="URL of the checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
A: Union[str, Any] = parser.parse_args()
convert_vilt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 7 | 0 |
'''simple docstring'''
import secrets
from random import shuffle
from string import ascii_letters, ascii_lowercase, ascii_uppercase, digits, punctuation
def _UpperCAmelCase ( a : Dict = 8 ) -> str:
"""simple docstring"""
lowercase_ : Union[str, Any] = ascii_letters + digits + punctuation
return "".join(secrets.choice(a ) for _ in range(a ) )
def _UpperCAmelCase ( a : List[Any] , a : Dict ) -> str:
"""simple docstring"""
# Password Generator = full boot with random_number, random_letters, and
# random_character FUNCTIONS
# Put your code here...
i -= len(a )
lowercase_ : Union[str, Any] = i // 3
lowercase_ : Optional[int] = i % 3
# chars = chars_incl + random_letters(ascii_letters, i / 3 + remainder) +
# random_number(digits, i / 3) + random_characters(punctuation, i / 3)
lowercase_ : Tuple = (
chars_incl
+ random(a , quotient + remainder )
+ random(a , a )
+ random(a , a )
)
lowercase_ : Dict = list(a )
shuffle(a )
return "".join(a )
# random is a generalised function for letters, characters and numbers
def _UpperCAmelCase ( a : Dict , a : Dict ) -> str:
"""simple docstring"""
return "".join(secrets.choice(a ) for _ in range(a ) )
def _UpperCAmelCase ( a : Tuple , a : Tuple ) -> Tuple:
"""simple docstring"""
pass # Put your code here...
def _UpperCAmelCase ( a : List[Any] , a : List[str] ) -> Tuple:
"""simple docstring"""
pass # Put your code here...
def _UpperCAmelCase ( a : Union[str, Any] , a : Optional[int] ) -> List[str]:
"""simple docstring"""
pass # Put your code here...
def _UpperCAmelCase ( a : Dict , a : List[str] = 8 ) -> bool:
"""simple docstring"""
if len(a ) < min_length:
# Your Password must be at least 8 characters long
return False
lowercase_ : Optional[Any] = any(char in ascii_uppercase for char in password )
lowercase_ : Optional[Any] = any(char in ascii_lowercase for char in password )
lowercase_ : List[Any] = any(char in digits for char in password )
lowercase_ : List[Any] = any(char in punctuation for char in password )
return upper and lower and num and spec_char
# Passwords should contain UPPERCASE, lowerase
# numbers, and special characters
def _UpperCAmelCase ( ) -> Dict:
"""simple docstring"""
lowercase_ : Optional[Any] = int(input('Please indicate the max length of your password: ' ).strip() )
lowercase_ : str = input(
'Please indicate the characters that must be in your password: ' ).strip()
print('Password generated:' , password_generator(a ) )
print(
'Alternative Password generated:' , alternative_password_generator(a , a ) , )
print('[If you are thinking of using this passsword, You better save it.]' )
if __name__ == "__main__":
main()
| 710 |
'''simple docstring'''
def _UpperCAmelCase ( a : list ) -> list:
"""simple docstring"""
for i in range(len(a ) - 1 , 0 , -1 ):
lowercase_ : Any = False
for j in range(a , 0 , -1 ):
if unsorted[j] < unsorted[j - 1]:
lowercase_ , lowercase_ : Any = unsorted[j - 1], unsorted[j]
lowercase_ : int = True
for j in range(a ):
if unsorted[j] > unsorted[j + 1]:
lowercase_ , lowercase_ : Union[str, Any] = unsorted[j + 1], unsorted[j]
lowercase_ : Optional[Any] = True
if not swapped:
break
return unsorted
if __name__ == "__main__":
import doctest
doctest.testmod()
A: Union[str, Any] = input("Enter numbers separated by a comma:\n").strip()
A: Tuple = [int(item) for item in user_input.split(",")]
print(f"""{cocktail_shaker_sort(unsorted) = }""")
| 7 | 0 |
'''simple docstring'''
import logging
import os
import sys
from pathlib import Path
from unittest.mock import patch
from parameterized import parameterized
from run_eval import run_generate
from run_eval_search import run_search
from transformers.testing_utils import CaptureStdout, TestCasePlus, slow
from utils import ROUGE_KEYS
logging.basicConfig(level=logging.DEBUG)
A: Dict = logging.getLogger()
def _UpperCAmelCase ( a : str , a : Optional[int] ) -> List[str]:
"""simple docstring"""
lowercase_ : Tuple = '\n'.join(__snake_case )
Path(__snake_case ).open('w' ).writelines(__snake_case )
A: Any = "patrickvonplaten/t5-tiny-random"
A: Tuple = "sshleifer/bart-tiny-random"
A: Dict = "sshleifer/tiny-mbart"
A: str = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
logging.disable(logging.CRITICAL) # remove noisy download output from tracebacks
class __magic_name__ ( UpperCAmelCase_ ):
"""simple docstring"""
def lowerCamelCase__ ( self , _lowercase ) -> Union[str, Any]:
lowercase_ : Optional[int] = Path(self.get_auto_remove_tmp_dir() ) / 'utest_input.source'
lowercase_ : Union[str, Any] = input_file_name.parent / 'utest_output.txt'
assert not output_file_name.exists()
lowercase_ : Dict = [' New York (CNN)When Liana Barrientos was 23 years old, she got married in Westchester County.']
_dump_articles(__a , __a )
lowercase_ : Union[str, Any] = str(Path(self.get_auto_remove_tmp_dir() ) / 'scores.json' )
lowercase_ : str = 'translation_en_to_de' if model == T5_TINY else 'summarization'
lowercase_ : Any = f"\n run_eval_search.py\n {model}\n {input_file_name}\n {output_file_name}\n --score_path {score_path}\n --task {task}\n --num_beams 2\n --length_penalty 2.0\n ".split()
with patch.object(__a , 'argv' , __a ):
run_generate()
assert Path(__a ).exists()
# os.remove(Path(output_file_name))
def lowerCamelCase__ ( self ) -> List[str]:
self.run_eval_tester(__a )
@parameterized.expand([BART_TINY, MBART_TINY] )
@slow
def lowerCamelCase__ ( self , _lowercase ) -> Optional[int]:
self.run_eval_tester(__a )
@parameterized.expand([T5_TINY, MBART_TINY] )
@slow
def lowerCamelCase__ ( self , _lowercase ) -> List[str]:
lowercase_ : Dict = Path(self.get_auto_remove_tmp_dir() ) / 'utest_input.source'
lowercase_ : Optional[int] = input_file_name.parent / 'utest_output.txt'
assert not output_file_name.exists()
lowercase_ : int = {
'en': ['Machine learning is great, isn\'t it?', 'I like to eat bananas', 'Tomorrow is another great day!'],
'de': [
'Maschinelles Lernen ist großartig, oder?',
'Ich esse gerne Bananen',
'Morgen ist wieder ein toller Tag!',
],
}
lowercase_ : Any = Path(self.get_auto_remove_tmp_dir() )
lowercase_ : List[str] = str(tmp_dir / 'scores.json' )
lowercase_ : List[Any] = str(tmp_dir / 'val.target' )
_dump_articles(__a , text['en'] )
_dump_articles(__a , text['de'] )
lowercase_ : Any = 'translation_en_to_de' if model == T5_TINY else 'summarization'
lowercase_ : Tuple = f"\n run_eval_search.py\n {model}\n {str(__a )}\n {str(__a )}\n --score_path {score_path}\n --reference_path {reference_path}\n --task {task}\n ".split()
testargs.extend(['--search', 'num_beams=1:2 length_penalty=0.9:1.0'] )
with patch.object(__a , 'argv' , __a ):
with CaptureStdout() as cs:
run_search()
lowercase_ : Union[str, Any] = [' num_beams | length_penalty', model, 'Best score args']
lowercase_ : Any = ['Info']
if "translation" in task:
expected_strings.append('bleu' )
else:
expected_strings.extend(__a )
for w in expected_strings:
assert w in cs.out
for w in un_expected_strings:
assert w not in cs.out
assert Path(__a ).exists()
os.remove(Path(__a ) )
| 711 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class __magic_name__ ( metaclass=UpperCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = ['transformers', 'torch', 'note_seq']
def __init__( self , *_lowercase , **_lowercase ) -> Dict:
requires_backends(self , ['transformers', 'torch', 'note_seq'] )
@classmethod
def lowerCamelCase__ ( cls , *_lowercase , **_lowercase ) -> List[str]:
requires_backends(cls , ['transformers', 'torch', 'note_seq'] )
@classmethod
def lowerCamelCase__ ( cls , *_lowercase , **_lowercase ) -> Dict:
requires_backends(cls , ['transformers', 'torch', 'note_seq'] )
| 7 | 0 |
import argparse
import os
import torch
from transformers.utils import WEIGHTS_NAME
A: List[Any] = ["small", "medium", "large"]
A: int = "lm_head.decoder.weight"
A: Optional[Any] = "lm_head.weight"
def _UpperCAmelCase ( a : Union[str, Any] , a : int ) -> Tuple:
"""simple docstring"""
lowercase_ : List[str] = torch.load(lowerCamelCase_ )
lowercase_ : Tuple = d.pop(lowerCamelCase_ )
os.makedirs(lowerCamelCase_ , exist_ok=lowerCamelCase_ )
torch.save(lowerCamelCase_ , os.path.join(lowerCamelCase_ , lowerCamelCase_ ) )
if __name__ == "__main__":
A: str = argparse.ArgumentParser()
parser.add_argument("--dialogpt_path", default=".", type=str)
A: Optional[Any] = parser.parse_args()
for MODEL in DIALOGPT_MODELS:
A: List[Any] = os.path.join(args.dialogpt_path, f"""{MODEL}_ft.pkl""")
A: List[str] = f"""./DialoGPT-{MODEL}"""
convert_dialogpt_checkpoint(
checkpoint_path,
pytorch_dump_folder_path,
)
| 712 |
'''simple docstring'''
def _UpperCAmelCase ( a : str , a : str ) -> float:
"""simple docstring"""
def get_matched_characters(a : str , a : str ) -> str:
lowercase_ : Union[str, Any] = []
lowercase_ : Tuple = min(len(_stra ) , len(_stra ) ) // 2
for i, l in enumerate(_stra ):
lowercase_ : Optional[int] = int(max(0 , i - limit ) )
lowercase_ : Union[str, Any] = int(min(i + limit + 1 , len(_stra ) ) )
if l in _stra[left:right]:
matched.append(a )
lowercase_ : Union[str, Any] = f"{_stra[0:_stra.index(a )]} {_stra[_stra.index(a ) + 1:]}"
return "".join(a )
# matching characters
lowercase_ : Union[str, Any] = get_matched_characters(a , a )
lowercase_ : Optional[Any] = get_matched_characters(a , a )
lowercase_ : Optional[int] = len(a )
# transposition
lowercase_ : Dict = (
len([(ca, ca) for ca, ca in zip(a , a ) if ca != ca] ) // 2
)
if not match_count:
lowercase_ : List[str] = 0.0
else:
lowercase_ : Any = (
1
/ 3
* (
match_count / len(a )
+ match_count / len(a )
+ (match_count - transpositions) / match_count
)
)
# common prefix up to 4 characters
lowercase_ : Optional[Any] = 0
for ca, ca in zip(stra[:4] , stra[:4] ):
if ca == ca:
prefix_len += 1
else:
break
return jaro + 0.1 * prefix_len * (1 - jaro)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(jaro_winkler("hello", "world"))
| 7 | 0 |
def _UpperCAmelCase ( a : str ) -> Optional[int]:
"""simple docstring"""
if not all(x.isalpha() for x in string ):
raise ValueError('String must only contain alphabetic characters.' )
lowercase_ : int = sorted(string.lower() )
return len(A__ ) == len(set(A__ ) )
if __name__ == "__main__":
A: Dict = input("Enter a string ").strip()
A: Tuple = is_isogram(input_str)
print(f"""{input_str} is {'an' if isogram else 'not an'} isogram.""")
| 713 |
'''simple docstring'''
from __future__ import annotations
def _UpperCAmelCase ( a : int = 4 ) -> list[list[int]]:
"""simple docstring"""
lowercase_ : Tuple = abs(a ) or 4
return [[1 + x + y * row_size for x in range(a )] for y in range(a )]
def _UpperCAmelCase ( a : list[list[int]] ) -> list[list[int]]:
"""simple docstring"""
return reverse_row(transpose(a ) )
# OR.. transpose(reverse_column(matrix))
def _UpperCAmelCase ( a : list[list[int]] ) -> list[list[int]]:
"""simple docstring"""
return reverse_row(reverse_column(a ) )
# OR.. reverse_column(reverse_row(matrix))
def _UpperCAmelCase ( a : list[list[int]] ) -> list[list[int]]:
"""simple docstring"""
return reverse_column(transpose(a ) )
# OR.. transpose(reverse_row(matrix))
def _UpperCAmelCase ( a : list[list[int]] ) -> list[list[int]]:
"""simple docstring"""
lowercase_ : Any = [list(a ) for x in zip(*a )]
return matrix
def _UpperCAmelCase ( a : list[list[int]] ) -> list[list[int]]:
"""simple docstring"""
lowercase_ : List[str] = matrix[::-1]
return matrix
def _UpperCAmelCase ( a : list[list[int]] ) -> list[list[int]]:
"""simple docstring"""
lowercase_ : str = [x[::-1] for x in matrix]
return matrix
def _UpperCAmelCase ( a : list[list[int]] ) -> None:
"""simple docstring"""
for i in matrix:
print(*a )
if __name__ == "__main__":
A: Dict = make_matrix()
print("\norigin:\n")
print_matrix(matrix)
print("\nrotate 90 counterclockwise:\n")
print_matrix(rotate_aa(matrix))
A: List[Any] = make_matrix()
print("\norigin:\n")
print_matrix(matrix)
print("\nrotate 180:\n")
print_matrix(rotate_aaa(matrix))
A: List[str] = make_matrix()
print("\norigin:\n")
print_matrix(matrix)
print("\nrotate 270 counterclockwise:\n")
print_matrix(rotate_aaa(matrix))
| 7 | 0 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class __magic_name__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowerCamelCase__ ( self ) -> Dict:
lowercase_ : int = TFCamembertModel.from_pretrained('jplu/tf-camembert-base' )
lowercase_ : List[str] = tf.convert_to_tensor(
[[5, 121, 11, 660, 16, 730, 2_5543, 110, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !"
lowercase_ : str = model(_lowercase )["last_hidden_state"]
lowercase_ : List[Any] = tf.TensorShape((1, 10, 768) )
self.assertEqual(output.shape , _lowercase )
# compare the actual values for a slice.
lowercase_ : Optional[Any] = tf.convert_to_tensor(
[[[-0.02_54, 0.02_35, 0.10_27], [0.06_06, -0.18_11, -0.04_18], [-0.15_61, -0.11_27, 0.26_87]]] , dtype=tf.floataa , )
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) ) | 714 |
'''simple docstring'''
# Usage:
# ./gen-card-facebook-wmt19.py
import os
from pathlib import Path
def _UpperCAmelCase ( a : Dict , a : Optional[int] , a : Tuple ) -> Optional[int]:
"""simple docstring"""
lowercase_ : Any = {
'en': 'Machine learning is great, isn\'t it?',
'ru': 'Машинное обучение - это здорово, не так ли?',
'de': 'Maschinelles Lernen ist großartig, oder?',
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
lowercase_ : List[str] = {
'ru-en': ['[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)', '39.20'],
'en-ru': ['[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)', '33.47'],
'en-de': ['[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)', '42.83'],
'de-en': ['[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)', '41.35'],
}
lowercase_ : Optional[Any] = f"{src_lang}-{tgt_lang}"
lowercase_ : Optional[Any] = f"\n---\nlanguage: \n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt19\n- facebook\nlicense: apache-2.0\ndatasets:\n- wmt19\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.\n\nFor more details, please see, [Facebook FAIR's WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).\n\nThe abbreviation FSMT stands for FairSeqMachineTranslation\n\nAll four models are available:\n\n* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)\n* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)\n* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)\n* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = \"facebook/wmt19-{src_lang}-{tgt_lang}\"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = \"{texts[src_lang]}\"\ninput_ids = tokenizer.encode(input, return_tensors=\"pt\")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n- The original (and this ported model) doesn't seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)\n\n## Training data\n\nPretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).\n\n## Eval results\n\npair | fairseq | transformers\n-------|---------|----------\n{pair} | {scores[pair][0]} | {scores[pair][1]}\n\nThe score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn't support:\n- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).\n- re-ranking\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=15\nmkdir -p $DATA_DIR\nsacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\nnote: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt19/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)\n\n\n### BibTeX entry and citation info\n\n```bibtex\n@inproceedings{{...,\n year={{2020}},\n title={{Facebook FAIR's WMT19 News Translation Task Submission}},\n author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},\n booktitle={{Proc. of WMT}},\n}}\n```\n\n\n## TODO\n\n- port model ensemble (fairseq uses 4 model checkpoints)\n\n"
os.makedirs(a , exist_ok=a )
lowercase_ : int = os.path.join(a , 'README.md' )
print(f"Generating {path}" )
with open(a , 'w' , encoding='utf-8' ) as f:
f.write(a )
# make sure we are under the root of the project
A: List[str] = Path(__file__).resolve().parent.parent.parent
A: List[str] = repo_dir / "model_cards"
for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
A , A , A: Any = model_name.split("-")
A: int = model_cards_dir / "facebook" / model_name
write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
| 7 | 0 |
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class __magic_name__ ( UpperCAmelCase_ ):
"""simple docstring"""
def lowerCamelCase__ ( self ) -> List[str]:
lowercase_ : Optional[Any] = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(lowercase_ , 'tf_padding' ) )
self.parent.assertTrue(hasattr(lowercase_ , 'depth_multiplier' ) )
class __magic_name__ :
"""simple docstring"""
def __init__( self , _lowercase , _lowercase=13 , _lowercase=3 , _lowercase=32 , _lowercase=0.25 , _lowercase=8 , _lowercase=8 , _lowercase=6 , _lowercase=32 , _lowercase=True , _lowercase=True , _lowercase=True , _lowercase="relu6" , _lowercase=1280 , _lowercase=0.1 , _lowercase=0.02 , _lowercase=True , _lowercase=True , _lowercase=10 , _lowercase=None , ) -> int:
lowercase_ : Optional[Any] = parent
lowercase_ : Tuple = batch_size
lowercase_ : List[str] = num_channels
lowercase_ : Tuple = image_size
lowercase_ : List[Any] = depth_multiplier
lowercase_ : Optional[int] = depth_divisible_by
lowercase_ : str = min_depth
lowercase_ : Optional[int] = expand_ratio
lowercase_ : Optional[int] = tf_padding
lowercase_ : Tuple = output_stride
lowercase_ : Any = first_layer_is_expansion
lowercase_ : List[Any] = finegrained_output
lowercase_ : Any = hidden_act
lowercase_ : str = last_hidden_size if finegrained_output else int(last_hidden_size * depth_multiplier )
lowercase_ : Optional[int] = classifier_dropout_prob
lowercase_ : Dict = use_labels
lowercase_ : Tuple = is_training
lowercase_ : Any = num_labels
lowercase_ : List[str] = initializer_range
lowercase_ : int = scope
def lowerCamelCase__ ( self ) -> Optional[int]:
lowercase_ : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase_ : str = None
lowercase_ : Optional[int] = None
if self.use_labels:
lowercase_ : Any = ids_tensor([self.batch_size] , self.num_labels )
lowercase_ : str = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
lowercase_ : List[str] = self.get_config()
return config, pixel_values, labels, pixel_labels
def lowerCamelCase__ ( self ) -> int:
return MobileNetVaConfig(
num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , depth_divisible_by=self.depth_divisible_by , min_depth=self.min_depth , expand_ratio=self.expand_ratio , output_stride=self.output_stride , first_layer_is_expansion=self.first_layer_is_expansion , finegrained_output=self.finegrained_output , hidden_act=self.hidden_act , tf_padding=self.tf_padding , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def lowerCamelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase ) -> str:
lowercase_ : Tuple = MobileNetVaModel(config=lowercase_ )
model.to(lowercase_ )
model.eval()
lowercase_ : Any = model(lowercase_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
self.parent.assertEqual(
result.pooler_output.shape , (self.batch_size, self.last_hidden_size) , )
def lowerCamelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase ) -> int:
lowercase_ : Union[str, Any] = self.num_labels
lowercase_ : int = MobileNetVaForImageClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
lowercase_ : str = model(lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase ) -> Union[str, Any]:
lowercase_ : List[str] = self.num_labels
lowercase_ : Optional[int] = MobileNetVaForSemanticSegmentation(lowercase_ )
model.to(lowercase_ )
model.eval()
lowercase_ : Optional[int] = model(lowercase_ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
lowercase_ : Tuple = model(lowercase_ , labels=lowercase_ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def lowerCamelCase__ ( self ) -> Union[str, Any]:
lowercase_ : Any = self.prepare_config_and_inputs()
lowercase_ , lowercase_ , lowercase_ , lowercase_ : Tuple = config_and_inputs
lowercase_ : Optional[int] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class __magic_name__ ( UpperCAmelCase_, UpperCAmelCase_, unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = (
(MobileNetVaModel, MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE_ : List[Any] = (
{
'feature-extraction': MobileNetVaModel,
'image-classification': MobileNetVaForImageClassification,
'image-segmentation': MobileNetVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE_ : int = False
SCREAMING_SNAKE_CASE_ : List[Any] = False
SCREAMING_SNAKE_CASE_ : int = False
SCREAMING_SNAKE_CASE_ : Any = False
def lowerCamelCase__ ( self ) -> List[str]:
lowercase_ : List[Any] = MobileNetVaModelTester(self )
lowercase_ : Optional[int] = MobileNetVaConfigTester(self , config_class=lowercase_ , has_text_modality=lowercase_ )
def lowerCamelCase__ ( self ) -> List[str]:
self.config_tester.run_common_tests()
@unittest.skip(reason='MobileNetV2 does not use inputs_embeds' )
def lowerCamelCase__ ( self ) -> Dict:
pass
@unittest.skip(reason='MobileNetV2 does not support input and output embeddings' )
def lowerCamelCase__ ( self ) -> Optional[Any]:
pass
@unittest.skip(reason='MobileNetV2 does not output attentions' )
def lowerCamelCase__ ( self ) -> List[str]:
pass
def lowerCamelCase__ ( self ) -> int:
lowercase_ , lowercase_ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ : str = model_class(lowercase_ )
lowercase_ : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase_ : Dict = [*signature.parameters.keys()]
lowercase_ : Tuple = ['pixel_values']
self.assertListEqual(arg_names[:1] , lowercase_ )
def lowerCamelCase__ ( self ) -> Dict:
lowercase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_ )
def lowerCamelCase__ ( self ) -> List[Any]:
def check_hidden_states_output(_lowercase , _lowercase , _lowercase ):
lowercase_ : List[str] = model_class(lowercase_ )
model.to(lowercase_ )
model.eval()
with torch.no_grad():
lowercase_ : int = model(**self._prepare_for_class(lowercase_ , lowercase_ ) )
lowercase_ : List[str] = outputs.hidden_states
lowercase_ : int = 16
self.assertEqual(len(lowercase_ ) , lowercase_ )
lowercase_ , lowercase_ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ : Optional[Any] = True
check_hidden_states_output(lowercase_ , lowercase_ , lowercase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase_ : List[Any] = True
check_hidden_states_output(lowercase_ , lowercase_ , lowercase_ )
def lowerCamelCase__ ( self ) -> Optional[int]:
lowercase_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase_ )
def lowerCamelCase__ ( self ) -> str:
lowercase_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*lowercase_ )
@slow
def lowerCamelCase__ ( self ) -> Dict:
for model_name in MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ : List[Any] = MobileNetVaModel.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
def _UpperCAmelCase ( ) -> List[str]:
"""simple docstring"""
lowercase_ : List[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class __magic_name__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def lowerCamelCase__ ( self ) -> Optional[int]:
return (
MobileNetVaImageProcessor.from_pretrained('google/mobilenet_v2_1.0_224' ) if is_vision_available() else None
)
@slow
def lowerCamelCase__ ( self ) -> Dict:
lowercase_ : List[str] = MobileNetVaForImageClassification.from_pretrained('google/mobilenet_v2_1.0_224' ).to(lowercase_ )
lowercase_ : Dict = self.default_image_processor
lowercase_ : Optional[int] = prepare_img()
lowercase_ : Any = image_processor(images=lowercase_ , return_tensors='pt' ).to(lowercase_ )
# forward pass
with torch.no_grad():
lowercase_ : Any = model(**lowercase_ )
# verify the logits
lowercase_ : str = torch.Size((1, 1001) )
self.assertEqual(outputs.logits.shape , lowercase_ )
lowercase_ : str = torch.tensor([0.24_45, -1.19_93, 0.19_05] ).to(lowercase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase_ , atol=1E-4 ) )
@slow
def lowerCamelCase__ ( self ) -> List[Any]:
lowercase_ : int = MobileNetVaForSemanticSegmentation.from_pretrained('google/deeplabv3_mobilenet_v2_1.0_513' )
lowercase_ : Any = model.to(lowercase_ )
lowercase_ : int = MobileNetVaImageProcessor.from_pretrained('google/deeplabv3_mobilenet_v2_1.0_513' )
lowercase_ : str = prepare_img()
lowercase_ : Dict = image_processor(images=lowercase_ , return_tensors='pt' ).to(lowercase_ )
# forward pass
with torch.no_grad():
lowercase_ : Optional[int] = model(**lowercase_ )
lowercase_ : Optional[Any] = outputs.logits
# verify the logits
lowercase_ : List[Any] = torch.Size((1, 21, 65, 65) )
self.assertEqual(logits.shape , lowercase_ )
lowercase_ : Optional[int] = torch.tensor(
[
[[17.57_90, 17.75_81, 18.33_55], [18.32_57, 18.42_30, 18.89_73], [18.61_69, 18.86_50, 19.21_87]],
[[-2.15_95, -2.09_77, -2.37_41], [-2.42_26, -2.30_28, -2.68_35], [-2.78_19, -2.59_91, -2.77_06]],
[[4.20_58, 4.83_17, 4.76_38], [4.41_36, 5.03_61, 4.93_83], [4.50_28, 4.96_44, 4.87_34]],
] , device=lowercase_ , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , lowercase_ , atol=1E-4 ) )
| 715 |
'''simple docstring'''
import json
import logging
import os
import socket
import git
import numpy as np
import torch
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - PID: %(process)d - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
)
A: Tuple = logging.getLogger(__name__)
def _UpperCAmelCase ( a : str ) -> List[Any]:
"""simple docstring"""
lowercase_ : List[str] = git.Repo(search_parent_directories=a )
lowercase_ : Union[str, Any] = {
'repo_id': str(a ),
'repo_sha': str(repo.head.object.hexsha ),
'repo_branch': str(repo.active_branch ),
}
with open(os.path.join(a , 'git_log.json' ) , 'w' ) as f:
json.dump(a , a , indent=4 )
def _UpperCAmelCase ( a : str ) -> Union[str, Any]:
"""simple docstring"""
if params.n_gpu <= 0:
lowercase_ : int = 0
lowercase_ : Union[str, Any] = -1
lowercase_ : List[str] = True
lowercase_ : Optional[Any] = False
return
assert torch.cuda.is_available()
logger.info('Initializing GPUs' )
if params.n_gpu > 1:
assert params.local_rank != -1
lowercase_ : Dict = int(os.environ['WORLD_SIZE'] )
lowercase_ : Union[str, Any] = int(os.environ['N_GPU_NODE'] )
lowercase_ : Optional[int] = int(os.environ['RANK'] )
# number of nodes / node ID
lowercase_ : int = params.world_size // params.n_gpu_per_node
lowercase_ : str = params.global_rank // params.n_gpu_per_node
lowercase_ : Dict = True
assert params.n_nodes == int(os.environ['N_NODES'] )
assert params.node_id == int(os.environ['NODE_RANK'] )
# local job (single GPU)
else:
assert params.local_rank == -1
lowercase_ : str = 1
lowercase_ : Dict = 0
lowercase_ : Tuple = 0
lowercase_ : List[Any] = 0
lowercase_ : int = 1
lowercase_ : Tuple = 1
lowercase_ : str = False
# sanity checks
assert params.n_nodes >= 1
assert 0 <= params.node_id < params.n_nodes
assert 0 <= params.local_rank <= params.global_rank < params.world_size
assert params.world_size == params.n_nodes * params.n_gpu_per_node
# define whether this is the master process / if we are in multi-node distributed mode
lowercase_ : List[str] = params.node_id == 0 and params.local_rank == 0
lowercase_ : Optional[Any] = params.n_nodes > 1
# summary
lowercase_ : int = f"--- Global rank: {params.global_rank} - "
logger.info(PREFIX + 'Number of nodes: %i' % params.n_nodes )
logger.info(PREFIX + 'Node ID : %i' % params.node_id )
logger.info(PREFIX + 'Local rank : %i' % params.local_rank )
logger.info(PREFIX + 'World size : %i' % params.world_size )
logger.info(PREFIX + 'GPUs per node : %i' % params.n_gpu_per_node )
logger.info(PREFIX + 'Master : %s' % str(params.is_master ) )
logger.info(PREFIX + 'Multi-node : %s' % str(params.multi_node ) )
logger.info(PREFIX + 'Multi-GPU : %s' % str(params.multi_gpu ) )
logger.info(PREFIX + 'Hostname : %s' % socket.gethostname() )
# set GPU device
torch.cuda.set_device(params.local_rank )
# initialize multi-GPU
if params.multi_gpu:
logger.info('Initializing PyTorch distributed' )
torch.distributed.init_process_group(
init_method='env://' , backend='nccl' , )
def _UpperCAmelCase ( a : Dict ) -> Optional[int]:
"""simple docstring"""
np.random.seed(args.seed )
torch.manual_seed(args.seed )
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed )
| 7 | 0 |
'''simple docstring'''
import os
import socket
from contextlib import contextmanager
import torch
from ..commands.config.default import write_basic_config # noqa: F401
from ..state import PartialState
from .dataclasses import DistributedType
from .imports import is_deepspeed_available, is_tpu_available
from .transformer_engine import convert_model
from .versions import is_torch_version
if is_deepspeed_available():
from deepspeed import DeepSpeedEngine
if is_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
def _UpperCAmelCase ( a : str ) -> str:
"""simple docstring"""
if is_torch_version('<' , '2.0.0' ) or not hasattr(snake_case__ , '_dynamo' ):
return False
return isinstance(snake_case__ , torch._dynamo.eval_frame.OptimizedModule )
def _UpperCAmelCase ( a : Union[str, Any] , a : bool = True ) -> List[str]:
"""simple docstring"""
lowercase_ : Optional[Any] = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel)
lowercase_ : int = is_compiled_module(snake_case__ )
if is_compiled:
lowercase_ : str = model
lowercase_ : Optional[int] = model._orig_mod
if is_deepspeed_available():
options += (DeepSpeedEngine,)
while isinstance(snake_case__ , snake_case__ ):
lowercase_ : Optional[Any] = model.module
if not keep_fpaa_wrapper:
lowercase_ : int = getattr(snake_case__ , 'forward' )
lowercase_ : int = model.__dict__.pop('_original_forward' , snake_case__ )
if original_forward is not None:
while hasattr(snake_case__ , '__wrapped__' ):
lowercase_ : List[str] = forward.__wrapped__
if forward == original_forward:
break
lowercase_ : Optional[int] = forward
if getattr(snake_case__ , '_converted_to_transformer_engine' , snake_case__ ):
convert_model(snake_case__ , to_transformer_engine=snake_case__ )
if is_compiled:
lowercase_ : Dict = model
lowercase_ : Optional[int] = compiled_model
return model
def _UpperCAmelCase ( ) -> List[Any]:
"""simple docstring"""
PartialState().wait_for_everyone()
def _UpperCAmelCase ( a : Optional[int] , a : Optional[int] ) -> Any:
"""simple docstring"""
if PartialState().distributed_type == DistributedType.TPU:
xm.save(snake_case__ , snake_case__ )
elif PartialState().local_process_index == 0:
torch.save(snake_case__ , snake_case__ )
@contextmanager
def _UpperCAmelCase ( **a : Tuple ) -> str:
"""simple docstring"""
for key, value in kwargs.items():
lowercase_ : List[Any] = str(snake_case__ )
yield
for key in kwargs:
if key.upper() in os.environ:
del os.environ[key.upper()]
def _UpperCAmelCase ( a : Any ) -> List[str]:
"""simple docstring"""
if not hasattr(snake_case__ , '__qualname__' ) and not hasattr(snake_case__ , '__name__' ):
lowercase_ : Any = getattr(snake_case__ , '__class__' , snake_case__ )
if hasattr(snake_case__ , '__qualname__' ):
return obj.__qualname__
if hasattr(snake_case__ , '__name__' ):
return obj.__name__
return str(snake_case__ )
def _UpperCAmelCase ( a : List[str] , a : Optional[int] ) -> List[Any]:
"""simple docstring"""
for key, value in source.items():
if isinstance(snake_case__ , snake_case__ ):
lowercase_ : str = destination.setdefault(snake_case__ , {} )
merge_dicts(snake_case__ , snake_case__ )
else:
lowercase_ : int = value
return destination
def _UpperCAmelCase ( a : int = None ) -> bool:
"""simple docstring"""
if port is None:
lowercase_ : Optional[Any] = 2_9_5_0_0
with socket.socket(socket.AF_INET , socket.SOCK_STREAM ) as s:
return s.connect_ex(('localhost', port) ) == 0
| 716 |
'''simple docstring'''
import os
from distutils.util import strtobool
def _UpperCAmelCase ( a : Any , a : int ) -> Any:
"""simple docstring"""
for e in env_keys:
lowercase_ : Optional[Any] = int(os.environ.get(a , -1 ) )
if val >= 0:
return val
return default
def _UpperCAmelCase ( a : List[Any] , a : Dict=False ) -> Optional[Any]:
"""simple docstring"""
lowercase_ : Optional[int] = os.environ.get(a , str(a ) )
return strtobool(a ) == 1 # As its name indicates `strtobool` actually returns an int...
def _UpperCAmelCase ( a : List[Any] , a : Dict="no" ) -> str:
"""simple docstring"""
lowercase_ : List[Any] = os.environ.get(a , str(a ) )
return value
| 7 | 0 |
'''simple docstring'''
import argparse
from torch import nn
# transformers_old should correspond to branch `save_old_prophetnet_model_structure` here
# original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively
from transformers_old.modeling_prophetnet import (
ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld,
)
from transformers_old.modeling_xlm_prophetnet import (
XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld,
)
from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging
A: Optional[Any] = logging.get_logger(__name__)
logging.set_verbosity_info()
def _UpperCAmelCase ( a : Union[str, Any] , a : int ) -> List[Any]:
"""simple docstring"""
if "xprophetnet" in prophetnet_checkpoint_path:
lowercase_ : Dict = XLMProphetNetForConditionalGenerationOld.from_pretrained(a )
lowercase_ : List[str] = XLMProphetNetForConditionalGeneration.from_pretrained(
a , output_loading_info=a )
else:
lowercase_ : List[Any] = ProphetNetForConditionalGenerationOld.from_pretrained(a )
lowercase_ : List[str] = ProphetNetForConditionalGeneration.from_pretrained(
a , output_loading_info=a )
lowercase_ : Tuple = ['''key_proj''', '''value_proj''', '''query_proj''']
lowercase_ : List[Any] = {
'''self_attn''': '''ngram_self_attn''',
'''cross_attn''': '''encoder_attn''',
'''cross_attn_layer_norm''': '''encoder_attn_layer_norm''',
'''feed_forward_layer_norm''': '''final_layer_norm''',
'''feed_forward''': '''''',
'''intermediate''': '''fc1''',
'''output''': '''fc2''',
'''key_proj''': '''k_proj''',
'''query_proj''': '''q_proj''',
'''value_proj''': '''v_proj''',
'''word_embeddings''': '''embed_tokens''',
'''embeddings_layer_norm''': '''emb_layer_norm''',
'''relative_pos_embeddings''': '''relative_linear''',
'''ngram_embeddings''': '''ngram_input_embed''',
'''position_embeddings''': '''embed_positions''',
}
for key in loading_info["missing_keys"]:
lowercase_ : Optional[int] = key.split('.' )
if attributes[0] == "lm_head":
lowercase_ : Optional[Any] = prophet
lowercase_ : int = prophet_old
else:
lowercase_ : List[str] = prophet.prophetnet
lowercase_ : str = prophet_old.model
lowercase_ : Any = False
for attribute in attributes:
if attribute in mapping:
lowercase_ : Optional[Any] = mapping[attribute]
if not hasattr(a , a ) and len(a ) > 0:
lowercase_ : Union[str, Any] = attribute
elif hasattr(a , a ):
lowercase_ : Optional[int] = attribute
if attribute == "weight":
assert old_model.weight.shape == model.weight.shape, "Shapes have to match!"
lowercase_ : str = old_model.weight
logger.info(f"{attribute} is initialized." )
lowercase_ : List[Any] = True
break
elif attribute == "bias":
assert old_model.bias.shape == model.bias.shape, "Shapes have to match!"
lowercase_ : Union[str, Any] = old_model.bias
logger.info(f"{attribute} is initialized" )
lowercase_ : Dict = True
break
elif attribute in special_keys and hasattr(a , 'in_proj_weight' ):
lowercase_ : List[Any] = old_model.in_proj_weight.shape[0] // 3
lowercase_ : Optional[int] = getattr(a , a )
param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match"
param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match"
if attribute == "query_proj":
lowercase_ : str = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] )
lowercase_ : Any = nn.Parameter(old_model.in_proj_bias[:embed_dim] )
elif attribute == "key_proj":
lowercase_ : Any = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] )
lowercase_ : List[str] = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] )
elif attribute == "value_proj":
lowercase_ : Dict = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] )
lowercase_ : str = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] )
lowercase_ : List[Any] = True
break
elif attribute == "position_embeddings":
assert (
model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1]
), "Hidden size has to match"
assert model.position_embeddings.weight.shape[0] == 5_1_2, "We want 512 position_embeddings."
lowercase_ : Dict = nn.Parameter(old_model.embed_positions.weight[:5_1_2, :] )
lowercase_ : Union[str, Any] = True
break
if attribute.isdigit():
lowercase_ : Dict = model[int(a )]
lowercase_ : Optional[int] = old_model[int(a )]
else:
lowercase_ : Any = getattr(a , a )
if old_attribute == "":
lowercase_ : Tuple = old_model
else:
if not hasattr(a , a ):
raise ValueError(f"{old_model} does not have {old_attribute}" )
lowercase_ : str = getattr(a , a )
if not is_key_init:
raise ValueError(f"{key} was not correctly initialized!" )
print(f"Saving model to {pytorch_dump_folder_path}" )
prophet.save_pretrained(a )
if __name__ == "__main__":
A: Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--prophetnet_checkpoint_path", default=None, type=str, required=True, help="Path the official PyTorch dump."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
A: Optional[Any] = parser.parse_args()
convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
| 717 |
'''simple docstring'''
from typing import Dict, Optional
import numpy as np
import datasets
A: int = "\nIoU is the area of overlap between the predicted segmentation and the ground truth divided by the area of union\nbetween the predicted segmentation and the ground truth. For binary (two classes) or multi-class segmentation,\nthe mean IoU of the image is calculated by taking the IoU of each class and averaging them.\n"
A: List[str] = "\nArgs:\n predictions (`List[ndarray]`):\n List of predicted segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n references (`List[ndarray]`):\n List of ground truth segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n num_labels (`int`):\n Number of classes (categories).\n ignore_index (`int`):\n Index that will be ignored during evaluation.\n nan_to_num (`int`, *optional*):\n If specified, NaN values will be replaced by the number defined by the user.\n label_map (`dict`, *optional*):\n If specified, dictionary mapping old label indices to new label indices.\n reduce_labels (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background,\n and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by 255.\n\nReturns:\n `Dict[str, float | ndarray]` comprising various elements:\n - *mean_iou* (`float`):\n Mean Intersection-over-Union (IoU averaged over all categories).\n - *mean_accuracy* (`float`):\n Mean accuracy (averaged over all categories).\n - *overall_accuracy* (`float`):\n Overall accuracy on all images.\n - *per_category_accuracy* (`ndarray` of shape `(num_labels,)`):\n Per category accuracy.\n - *per_category_iou* (`ndarray` of shape `(num_labels,)`):\n Per category IoU.\n\nExamples:\n\n >>> import numpy as np\n\n >>> mean_iou = datasets.load_metric(\"mean_iou\")\n\n >>> # suppose one has 3 different segmentation maps predicted\n >>> predicted_1 = np.array([[1, 2], [3, 4], [5, 255]])\n >>> actual_1 = np.array([[0, 3], [5, 4], [6, 255]])\n\n >>> predicted_2 = np.array([[2, 7], [9, 2], [3, 6]])\n >>> actual_2 = np.array([[1, 7], [9, 2], [3, 6]])\n\n >>> predicted_3 = np.array([[2, 2, 3], [8, 2, 4], [3, 255, 2]])\n >>> actual_3 = np.array([[1, 2, 2], [8, 2, 1], [3, 255, 1]])\n\n >>> predicted = [predicted_1, predicted_2, predicted_3]\n >>> ground_truth = [actual_1, actual_2, actual_3]\n\n >>> results = mean_iou.compute(predictions=predicted, references=ground_truth, num_labels=10, ignore_index=255, reduce_labels=False)\n >>> print(results) # doctest: +NORMALIZE_WHITESPACE\n {'mean_iou': 0.47750000000000004, 'mean_accuracy': 0.5916666666666666, 'overall_accuracy': 0.5263157894736842, 'per_category_iou': array([0. , 0. , 0.375, 0.4 , 0.5 , 0. , 0.5 , 1. , 1. , 1. ]), 'per_category_accuracy': array([0. , 0. , 0.75 , 0.66666667, 1. , 0. , 0.5 , 1. , 1. , 1. ])}\n"
A: Union[str, Any] = "\\n@software{MMSegmentation_Contributors_OpenMMLab_Semantic_Segmentation_2020,\nauthor = {{MMSegmentation Contributors}},\nlicense = {Apache-2.0},\nmonth = {7},\ntitle = {{OpenMMLab Semantic Segmentation Toolbox and Benchmark}},\nurl = {https://github.com/open-mmlab/mmsegmentation},\nyear = {2020}\n}"
def _UpperCAmelCase ( a : str , a : Union[str, Any] , a : Dict , a : bool , a : Optional[Dict[int, int]] = None , a : bool = False , ) -> str:
"""simple docstring"""
if label_map is not None:
for old_id, new_id in label_map.items():
lowercase_ : Union[str, Any] = new_id
# turn into Numpy arrays
lowercase_ : List[Any] = np.array(a )
lowercase_ : Optional[Any] = np.array(a )
if reduce_labels:
lowercase_ : Any = 2_5_5
lowercase_ : Dict = label - 1
lowercase_ : List[Any] = 2_5_5
lowercase_ : Any = label != ignore_index
lowercase_ : List[Any] = np.not_equal(a , a )
lowercase_ : Optional[int] = pred_label[mask]
lowercase_ : Union[str, Any] = np.array(a )[mask]
lowercase_ : Optional[int] = pred_label[pred_label == label]
lowercase_ : Optional[int] = np.histogram(a , bins=a , range=(0, num_labels - 1) )[0]
lowercase_ : Optional[int] = np.histogram(a , bins=a , range=(0, num_labels - 1) )[0]
lowercase_ : Dict = np.histogram(a , bins=a , range=(0, num_labels - 1) )[0]
lowercase_ : Optional[Any] = area_pred_label + area_label - area_intersect
return area_intersect, area_union, area_pred_label, area_label
def _UpperCAmelCase ( a : int , a : Optional[Any] , a : Optional[int] , a : bool , a : Optional[Dict[int, int]] = None , a : bool = False , ) -> Dict:
"""simple docstring"""
lowercase_ : Dict = np.zeros((num_labels,) , dtype=np.floataa )
lowercase_ : List[str] = np.zeros((num_labels,) , dtype=np.floataa )
lowercase_ : List[Any] = np.zeros((num_labels,) , dtype=np.floataa )
lowercase_ : str = np.zeros((num_labels,) , dtype=np.floataa )
for result, gt_seg_map in zip(a , a ):
lowercase_ , lowercase_ , lowercase_ , lowercase_ : Tuple = intersect_and_union(
a , a , a , a , a , a )
total_area_intersect += area_intersect
total_area_union += area_union
total_area_pred_label += area_pred_label
total_area_label += area_label
return total_area_intersect, total_area_union, total_area_pred_label, total_area_label
def _UpperCAmelCase ( a : Optional[Any] , a : List[str] , a : Optional[Any] , a : bool , a : Optional[int] = None , a : Optional[Dict[int, int]] = None , a : bool = False , ) -> Optional[int]:
"""simple docstring"""
lowercase_ , lowercase_ , lowercase_ , lowercase_ : Optional[Any] = total_intersect_and_union(
a , a , a , a , a , a )
# compute metrics
lowercase_ : str = {}
lowercase_ : str = total_area_intersect.sum() / total_area_label.sum()
lowercase_ : Optional[Any] = total_area_intersect / total_area_union
lowercase_ : List[Any] = total_area_intersect / total_area_label
lowercase_ : Any = np.nanmean(a )
lowercase_ : Optional[Any] = np.nanmean(a )
lowercase_ : int = all_acc
lowercase_ : Union[str, Any] = iou
lowercase_ : Optional[Any] = acc
if nan_to_num is not None:
lowercase_ : Optional[int] = {metric: np.nan_to_num(a , nan=a ) for metric, metric_value in metrics.items()}
return metrics
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class __magic_name__ ( datasets.Metric ):
"""simple docstring"""
def lowerCamelCase__ ( self ) -> int:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
# 1st Seq - height dim, 2nd - width dim
{
'predictions': datasets.Sequence(datasets.Sequence(datasets.Value('uint16' ) ) ),
'references': datasets.Sequence(datasets.Sequence(datasets.Value('uint16' ) ) ),
} ) , reference_urls=[
'https://github.com/open-mmlab/mmsegmentation/blob/71c201b1813267d78764f306a297ca717827c4bf/mmseg/core/evaluation/metrics.py'
] , )
def lowerCamelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase = None , _lowercase = None , _lowercase = False , ) -> Tuple:
lowercase_ : Optional[int] = mean_iou(
results=_lowercase , gt_seg_maps=_lowercase , num_labels=_lowercase , ignore_index=_lowercase , nan_to_num=_lowercase , label_map=_lowercase , reduce_labels=_lowercase , )
return iou_result
| 7 | 0 |
'''simple docstring'''
import os
def _UpperCAmelCase ( a : Optional[int] = "input.txt" ) -> int:
"""simple docstring"""
with open(os.path.join(os.path.dirname(a ) , a ) ) as input_file:
lowercase_ : Optional[Any] = [
[int(a ) for element in line.split(',' )]
for line in input_file.readlines()
]
lowercase_ : Optional[Any] = len(a )
lowercase_ : Dict = len(matrix[0] )
lowercase_ : Any = [[-1 for _ in range(a )] for _ in range(a )]
for i in range(a ):
lowercase_ : Any = matrix[i][0]
for j in range(1 , a ):
for i in range(a ):
lowercase_ : List[str] = minimal_path_sums[i][j - 1] + matrix[i][j]
for i in range(1 , a ):
lowercase_ : str = min(
minimal_path_sums[i][j] , minimal_path_sums[i - 1][j] + matrix[i][j] )
for i in range(rows - 2 , -1 , -1 ):
lowercase_ : int = min(
minimal_path_sums[i][j] , minimal_path_sums[i + 1][j] + matrix[i][j] )
return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 718 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A: Dict = logging.get_logger(__name__)
A: Optional[Any] = {
"google/vit-base-patch16-224": "https://huggingface.co/vit-base-patch16-224/resolve/main/config.json",
# See all ViT models at https://huggingface.co/models?filter=vit
}
class __magic_name__ ( UpperCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = 'vit'
def __init__( self , _lowercase=768 , _lowercase=12 , _lowercase=12 , _lowercase=3072 , _lowercase="gelu" , _lowercase=0.0 , _lowercase=0.0 , _lowercase=0.02 , _lowercase=1E-1_2 , _lowercase=224 , _lowercase=16 , _lowercase=3 , _lowercase=True , _lowercase=16 , **_lowercase , ) -> List[str]:
super().__init__(**_lowercase )
lowercase_ : Optional[int] = hidden_size
lowercase_ : str = num_hidden_layers
lowercase_ : str = num_attention_heads
lowercase_ : int = intermediate_size
lowercase_ : List[Any] = hidden_act
lowercase_ : Any = hidden_dropout_prob
lowercase_ : List[str] = attention_probs_dropout_prob
lowercase_ : str = initializer_range
lowercase_ : List[str] = layer_norm_eps
lowercase_ : Any = image_size
lowercase_ : Tuple = patch_size
lowercase_ : Optional[Any] = num_channels
lowercase_ : str = qkv_bias
lowercase_ : List[str] = encoder_stride
class __magic_name__ ( UpperCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = version.parse('1.11' )
@property
def lowerCamelCase__ ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def lowerCamelCase__ ( self ) -> float:
return 1E-4
| 7 | 0 |
def _UpperCAmelCase ( a : int = 1_0_0_0 ) -> List[Any]:
"""simple docstring"""
lowercase_ : Union[str, Any] = 2**power
lowercase_ : List[str] = str(__a )
lowercase_ : Any = list(__a )
lowercase_ : Optional[Any] = 0
for i in list_num:
sum_of_num += int(__a )
return sum_of_num
if __name__ == "__main__":
A: Tuple = int(input("Enter the power of 2: ").strip())
print("2 ^ ", power, " = ", 2**power)
A: Any = solution(power)
print("Sum of the digits is: ", result)
| 719 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A: int = logging.get_logger(__name__)
A: int = {
"bigcode/gpt_bigcode-santacoder": "https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json",
}
class __magic_name__ ( UpperCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = 'gpt_bigcode'
SCREAMING_SNAKE_CASE_ : int = ['past_key_values']
SCREAMING_SNAKE_CASE_ : Any = {
'hidden_size': 'n_embd',
'max_position_embeddings': 'n_positions',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self , _lowercase=5_0257 , _lowercase=1024 , _lowercase=768 , _lowercase=12 , _lowercase=12 , _lowercase=None , _lowercase="gelu_pytorch_tanh" , _lowercase=0.1 , _lowercase=0.1 , _lowercase=0.1 , _lowercase=1E-5 , _lowercase=0.02 , _lowercase=True , _lowercase=True , _lowercase=5_0256 , _lowercase=5_0256 , _lowercase=True , _lowercase=True , _lowercase=True , **_lowercase , ) -> Any:
lowercase_ : Tuple = vocab_size
lowercase_ : str = n_positions
lowercase_ : List[str] = n_embd
lowercase_ : str = n_layer
lowercase_ : Optional[Any] = n_head
lowercase_ : Optional[int] = n_inner
lowercase_ : Union[str, Any] = activation_function
lowercase_ : Dict = resid_pdrop
lowercase_ : str = embd_pdrop
lowercase_ : Optional[Any] = attn_pdrop
lowercase_ : List[Any] = layer_norm_epsilon
lowercase_ : Optional[int] = initializer_range
lowercase_ : List[Any] = scale_attn_weights
lowercase_ : Any = use_cache
lowercase_ : List[str] = attention_softmax_in_fpaa
lowercase_ : Any = scale_attention_softmax_in_fpaa
lowercase_ : Optional[Any] = multi_query
lowercase_ : Optional[Any] = bos_token_id
lowercase_ : Optional[Any] = eos_token_id
super().__init__(bos_token_id=_lowercase , eos_token_id=_lowercase , **_lowercase )
| 7 | 0 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class __magic_name__ ( _snake_case, unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = ShapEPipeline
SCREAMING_SNAKE_CASE_ : int = ["""prompt"""]
SCREAMING_SNAKE_CASE_ : Optional[int] = ["""prompt"""]
SCREAMING_SNAKE_CASE_ : Optional[Any] = [
"""num_images_per_prompt""",
"""num_inference_steps""",
"""generator""",
"""latents""",
"""guidance_scale""",
"""frame_size""",
"""output_type""",
"""return_dict""",
]
SCREAMING_SNAKE_CASE_ : Optional[Any] = False
@property
def lowerCamelCase__ ( self ) -> str:
return 32
@property
def lowerCamelCase__ ( self ) -> List[Any]:
return 32
@property
def lowerCamelCase__ ( self ) -> List[Any]:
return self.time_input_dim * 4
@property
def lowerCamelCase__ ( self ) -> Dict:
return 8
@property
def lowerCamelCase__ ( self ) -> Optional[Any]:
lowercase_ : Optional[Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
return tokenizer
@property
def lowerCamelCase__ ( self ) -> str:
torch.manual_seed(0 )
lowercase_ : Any = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(snake_case_ )
@property
def lowerCamelCase__ ( self ) -> str:
torch.manual_seed(0 )
lowercase_ : Tuple = {
"num_attention_heads": 2,
"attention_head_dim": 16,
"embedding_dim": self.time_input_dim,
"num_embeddings": 32,
"embedding_proj_dim": self.text_embedder_hidden_size,
"time_embed_dim": self.time_embed_dim,
"num_layers": 1,
"clip_embed_dim": self.time_input_dim * 2,
"additional_embeddings": 0,
"time_embed_act_fn": "gelu",
"norm_in_type": "layer",
"encoder_hid_proj_type": None,
"added_emb_type": None,
}
lowercase_ : Dict = PriorTransformer(**snake_case_ )
return model
@property
def lowerCamelCase__ ( self ) -> Optional[Any]:
torch.manual_seed(0 )
lowercase_ : Dict = {
"param_shapes": (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
"d_latent": self.time_input_dim,
"d_hidden": self.renderer_dim,
"n_output": 12,
"background": (
0.1,
0.1,
0.1,
),
}
lowercase_ : Union[str, Any] = ShapERenderer(**snake_case_ )
return model
def lowerCamelCase__ ( self ) -> Any:
lowercase_ : int = self.dummy_prior
lowercase_ : str = self.dummy_text_encoder
lowercase_ : Optional[int] = self.dummy_tokenizer
lowercase_ : Tuple = self.dummy_renderer
lowercase_ : Dict = HeunDiscreteScheduler(
beta_schedule='exp' , num_train_timesteps=1024 , prediction_type='sample' , use_karras_sigmas=snake_case_ , clip_sample=snake_case_ , clip_sample_range=1.0 , )
lowercase_ : List[str] = {
"prior": prior,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"renderer": renderer,
"scheduler": scheduler,
}
return components
def lowerCamelCase__ ( self , _lowercase , _lowercase=0 ) -> Dict:
if str(snake_case_ ).startswith('mps' ):
lowercase_ : Any = torch.manual_seed(snake_case_ )
else:
lowercase_ : Union[str, Any] = torch.Generator(device=snake_case_ ).manual_seed(snake_case_ )
lowercase_ : Any = {
"prompt": "horse",
"generator": generator,
"num_inference_steps": 1,
"frame_size": 32,
"output_type": "np",
}
return inputs
def lowerCamelCase__ ( self ) -> Optional[int]:
lowercase_ : Any = "cpu"
lowercase_ : int = self.get_dummy_components()
lowercase_ : Tuple = self.pipeline_class(**snake_case_ )
lowercase_ : Any = pipe.to(snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
lowercase_ : Optional[int] = pipe(**self.get_dummy_inputs(snake_case_ ) )
lowercase_ : List[str] = output.images[0]
lowercase_ : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
lowercase_ : List[str] = np.array(
[
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCamelCase__ ( self ) -> List[Any]:
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def lowerCamelCase__ ( self ) -> Any:
lowercase_ : List[Any] = torch_device == "cpu"
lowercase_ : Tuple = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=snake_case_ , relax_max_difference=snake_case_ , )
def lowerCamelCase__ ( self ) -> Optional[Any]:
lowercase_ : List[Any] = self.get_dummy_components()
lowercase_ : str = self.pipeline_class(**snake_case_ )
lowercase_ : Optional[Any] = pipe.to(snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
lowercase_ : int = 1
lowercase_ : Dict = 2
lowercase_ : Union[str, Any] = self.get_dummy_inputs(snake_case_ )
for key in inputs.keys():
if key in self.batch_params:
lowercase_ : Tuple = batch_size * [inputs[key]]
lowercase_ : Optional[Any] = pipe(**snake_case_ , num_images_per_prompt=snake_case_ )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class __magic_name__ ( unittest.TestCase ):
"""simple docstring"""
def lowerCamelCase__ ( self ) -> Tuple:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase__ ( self ) -> Any:
lowercase_ : List[Any] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/shap_e/test_shap_e_np_out.npy' )
lowercase_ : Any = ShapEPipeline.from_pretrained('openai/shap-e' )
lowercase_ : List[Any] = pipe.to(snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
lowercase_ : str = torch.Generator(device=snake_case_ ).manual_seed(0 )
lowercase_ : Dict = pipe(
'a shark' , generator=snake_case_ , guidance_scale=15.0 , num_inference_steps=64 , frame_size=64 , output_type='np' , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(snake_case_ , snake_case_ )
| 720 |
'''simple docstring'''
import unittest
from diffusers.pipelines.pipeline_utils import is_safetensors_compatible
class __magic_name__ ( unittest.TestCase ):
"""simple docstring"""
def lowerCamelCase__ ( self ) -> Optional[Any]:
lowercase_ : Tuple = [
'safety_checker/pytorch_model.bin',
'safety_checker/model.safetensors',
'vae/diffusion_pytorch_model.bin',
'vae/diffusion_pytorch_model.safetensors',
'text_encoder/pytorch_model.bin',
'text_encoder/model.safetensors',
'unet/diffusion_pytorch_model.bin',
'unet/diffusion_pytorch_model.safetensors',
]
self.assertTrue(is_safetensors_compatible(_lowercase ) )
def lowerCamelCase__ ( self ) -> Dict:
lowercase_ : Tuple = [
'unet/diffusion_pytorch_model.bin',
'unet/diffusion_pytorch_model.safetensors',
]
self.assertTrue(is_safetensors_compatible(_lowercase ) )
def lowerCamelCase__ ( self ) -> str:
lowercase_ : int = [
'safety_checker/pytorch_model.bin',
'safety_checker/model.safetensors',
'vae/diffusion_pytorch_model.bin',
'vae/diffusion_pytorch_model.safetensors',
'text_encoder/pytorch_model.bin',
'text_encoder/model.safetensors',
'unet/diffusion_pytorch_model.bin',
# Removed: 'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(_lowercase ) )
def lowerCamelCase__ ( self ) -> Any:
lowercase_ : Dict = [
'text_encoder/pytorch_model.bin',
'text_encoder/model.safetensors',
]
self.assertTrue(is_safetensors_compatible(_lowercase ) )
def lowerCamelCase__ ( self ) -> Optional[Any]:
lowercase_ : List[Any] = [
'safety_checker/pytorch_model.bin',
'safety_checker/model.safetensors',
'vae/diffusion_pytorch_model.bin',
'vae/diffusion_pytorch_model.safetensors',
'text_encoder/pytorch_model.bin',
# Removed: 'text_encoder/model.safetensors',
'unet/diffusion_pytorch_model.bin',
'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(_lowercase ) )
def lowerCamelCase__ ( self ) -> Optional[int]:
lowercase_ : str = [
'safety_checker/pytorch_model.fp16.bin',
'safety_checker/model.fp16.safetensors',
'vae/diffusion_pytorch_model.fp16.bin',
'vae/diffusion_pytorch_model.fp16.safetensors',
'text_encoder/pytorch_model.fp16.bin',
'text_encoder/model.fp16.safetensors',
'unet/diffusion_pytorch_model.fp16.bin',
'unet/diffusion_pytorch_model.fp16.safetensors',
]
lowercase_ : Union[str, Any] = 'fp16'
self.assertTrue(is_safetensors_compatible(_lowercase , variant=_lowercase ) )
def lowerCamelCase__ ( self ) -> int:
lowercase_ : Optional[int] = [
'unet/diffusion_pytorch_model.fp16.bin',
'unet/diffusion_pytorch_model.fp16.safetensors',
]
lowercase_ : Dict = 'fp16'
self.assertTrue(is_safetensors_compatible(_lowercase , variant=_lowercase ) )
def lowerCamelCase__ ( self ) -> int:
# pass variant but use the non-variant filenames
lowercase_ : Optional[int] = [
'unet/diffusion_pytorch_model.bin',
'unet/diffusion_pytorch_model.safetensors',
]
lowercase_ : Dict = 'fp16'
self.assertTrue(is_safetensors_compatible(_lowercase , variant=_lowercase ) )
def lowerCamelCase__ ( self ) -> Union[str, Any]:
lowercase_ : int = [
'safety_checker/pytorch_model.fp16.bin',
'safety_checker/model.fp16.safetensors',
'vae/diffusion_pytorch_model.fp16.bin',
'vae/diffusion_pytorch_model.fp16.safetensors',
'text_encoder/pytorch_model.fp16.bin',
'text_encoder/model.fp16.safetensors',
'unet/diffusion_pytorch_model.fp16.bin',
# Removed: 'unet/diffusion_pytorch_model.fp16.safetensors',
]
lowercase_ : Dict = 'fp16'
self.assertFalse(is_safetensors_compatible(_lowercase , variant=_lowercase ) )
def lowerCamelCase__ ( self ) -> int:
lowercase_ : str = [
'text_encoder/pytorch_model.fp16.bin',
'text_encoder/model.fp16.safetensors',
]
lowercase_ : str = 'fp16'
self.assertTrue(is_safetensors_compatible(_lowercase , variant=_lowercase ) )
def lowerCamelCase__ ( self ) -> List[str]:
# pass variant but use the non-variant filenames
lowercase_ : List[Any] = [
'text_encoder/pytorch_model.bin',
'text_encoder/model.safetensors',
]
lowercase_ : Dict = 'fp16'
self.assertTrue(is_safetensors_compatible(_lowercase , variant=_lowercase ) )
def lowerCamelCase__ ( self ) -> List[str]:
lowercase_ : Union[str, Any] = [
'safety_checker/pytorch_model.fp16.bin',
'safety_checker/model.fp16.safetensors',
'vae/diffusion_pytorch_model.fp16.bin',
'vae/diffusion_pytorch_model.fp16.safetensors',
'text_encoder/pytorch_model.fp16.bin',
# 'text_encoder/model.fp16.safetensors',
'unet/diffusion_pytorch_model.fp16.bin',
'unet/diffusion_pytorch_model.fp16.safetensors',
]
lowercase_ : Dict = 'fp16'
self.assertFalse(is_safetensors_compatible(_lowercase , variant=_lowercase ) )
| 7 | 0 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Callable
def _UpperCAmelCase ( a : Callable[[int | float], int | float] , a : int | float , a : int | float , a : int = 1_0_0 , ) -> float:
"""simple docstring"""
lowercase_ : List[Any] = x_start
lowercase_ : str = fnc(a )
lowercase_ : Optional[Any] = 0.0
for _ in range(a ):
# Approximates small segments of curve as linear and solve
# for trapezoidal area
lowercase_ : Tuple = (x_end - x_start) / steps + xa
lowercase_ : Tuple = fnc(a )
area += abs(fxa + fxa ) * (xa - xa) / 2
# Increment step
lowercase_ : List[str] = xa
lowercase_ : Union[str, Any] = fxa
return area
if __name__ == "__main__":
def _UpperCAmelCase ( a : Any ) -> int:
"""simple docstring"""
return x**3 + x**2
print("f(x) = x^3 + x^2")
print("The area between the curve, x = -5, x = 5 and the x axis is:")
A: int = 1_0
while i <= 1_0_0_0_0_0:
print(f"""with {i} steps: {trapezoidal_area(f, -5, 5, i)}""")
i *= 1_0
| 721 |
'''simple docstring'''
import argparse
A: List[Any] = "docs/source/_static/js/custom.js"
def _UpperCAmelCase ( a : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
with open(a , encoding='utf-8' , newline='\n' ) as f:
lowercase_ : List[Any] = f.readlines()
lowercase_ : Dict = 0
# First let's put the right version
while not lines[index].startswith('const stableVersion =' ):
index += 1
lowercase_ : Dict = f"const stableVersion = \"v{version}\"\n"
# Then update the dictionary
while not lines[index].startswith('const versionMapping = {' ):
index += 1
# We go until the end
while not lines[index].startswith('}' ):
index += 1
# We add the new version at the end
lines[index - 1] += f" \"v{version}\": \"v{version}\",\n"
with open(a , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.writelines(a )
if __name__ == "__main__":
A: str = argparse.ArgumentParser()
parser.add_argument("--version", help="Release version.")
A: List[str] = parser.parse_args()
update_custom_js(args.version)
| 7 | 0 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_barthez import BarthezTokenizer
else:
A: str = None
A: Optional[Any] = logging.get_logger(__name__)
A: Optional[int] = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"}
A: int = {
"vocab_file": {
"moussaKam/mbarthez": "https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model",
"moussaKam/barthez": "https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model",
"moussaKam/barthez-orangesum-title": (
"https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model"
),
},
"tokenizer_file": {
"moussaKam/mbarthez": "https://huggingface.co/moussaKam/mbarthez/resolve/main/tokenizer.json",
"moussaKam/barthez": "https://huggingface.co/moussaKam/barthez/resolve/main/tokenizer.json",
"moussaKam/barthez-orangesum-title": (
"https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/tokenizer.json"
),
},
}
A: Dict = {
"moussaKam/mbarthez": 1_0_2_4,
"moussaKam/barthez": 1_0_2_4,
"moussaKam/barthez-orangesum-title": 1_0_2_4,
}
A: List[str] = "▁"
class __magic_name__ ( __UpperCAmelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ : Optional[int] = ["input_ids", "attention_mask"]
SCREAMING_SNAKE_CASE_ : int = BarthezTokenizer
def __init__( self , _lowercase=None , _lowercase=None , _lowercase="<s>" , _lowercase="</s>" , _lowercase="</s>" , _lowercase="<s>" , _lowercase="<unk>" , _lowercase="<pad>" , _lowercase="<mask>" , **_lowercase , ) -> Dict:
# Mask token behave like a normal word, i.e. include the space before it
lowercase_ : str = AddedToken(_lowerCamelCase , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase , _lowerCamelCase ) else mask_token
super().__init__(
_lowerCamelCase , tokenizer_file=_lowerCamelCase , bos_token=_lowerCamelCase , eos_token=_lowerCamelCase , unk_token=_lowerCamelCase , sep_token=_lowerCamelCase , cls_token=_lowerCamelCase , pad_token=_lowerCamelCase , mask_token=_lowerCamelCase , **_lowerCamelCase , )
lowercase_ : Union[str, Any] = vocab_file
lowercase_ : List[str] = False if not self.vocab_file else True
def lowerCamelCase__ ( self , _lowercase , _lowercase = None ) -> Optional[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowercase_ : Optional[Any] = [self.cls_token_id]
lowercase_ : Optional[int] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowerCamelCase__ ( self , _lowercase , _lowercase = None ) -> Union[str, Any]:
lowercase_ : Optional[int] = [self.sep_token_id]
lowercase_ : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCamelCase__ ( self , _lowercase , _lowercase = None ) -> Optional[int]:
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(_lowerCamelCase ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
lowercase_ : str = os.path.join(
_lowerCamelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCamelCase ):
copyfile(self.vocab_file , _lowerCamelCase )
return (out_vocab_file,)
| 700 |
'''simple docstring'''
def _UpperCAmelCase ( a : list[list[float]] ) -> list[list[float]]:
"""simple docstring"""
lowercase_ : list[list[float]] = []
for data in source_data:
for i, el in enumerate(a ):
if len(a ) < i + 1:
data_lists.append([] )
data_lists[i].append(float(a ) )
return data_lists
def _UpperCAmelCase ( a : list[list[float]] , a : list[int] ) -> list[list[float]]:
"""simple docstring"""
lowercase_ : list[list[float]] = []
for dlist, weight in zip(a , a ):
lowercase_ : Tuple = min(a )
lowercase_ : Any = max(a )
lowercase_ : list[float] = []
# for weight 0 score is 1 - actual score
if weight == 0:
for item in dlist:
try:
score.append(1 - ((item - mind) / (maxd - mind)) )
except ZeroDivisionError:
score.append(1 )
elif weight == 1:
for item in dlist:
try:
score.append((item - mind) / (maxd - mind) )
except ZeroDivisionError:
score.append(0 )
# weight not 0 or 1
else:
lowercase_ : str = f"Invalid weight of {weight:f} provided"
raise ValueError(a )
score_lists.append(a )
return score_lists
def _UpperCAmelCase ( a : list[list[float]] ) -> list[float]:
"""simple docstring"""
lowercase_ : list[float] = [0 for i in range(len(score_lists[0] ) )]
for slist in score_lists:
for j, ele in enumerate(a ):
lowercase_ : List[Any] = final_scores[j] + ele
return final_scores
def _UpperCAmelCase ( a : list[list[float]] , a : list[int] ) -> list[list[float]]:
"""simple docstring"""
lowercase_ : int = get_data(a )
lowercase_ : Optional[int] = calculate_each_score(a , a )
lowercase_ : Dict = generate_final_scores(a )
# append scores to source data
for i, ele in enumerate(a ):
source_data[i].append(a )
return source_data
| 7 | 0 |
'''simple docstring'''
import copy
import inspect
import unittest
from transformers import PretrainedConfig, SwiftFormerConfig
from transformers.testing_utils import (
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwiftFormerForImageClassification, SwiftFormerModel
from transformers.models.swiftformer.modeling_swiftformer import SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __magic_name__ :
"""simple docstring"""
def __init__( self , _lowercase , _lowercase=13 , _lowercase=3 , _lowercase=True , _lowercase=True , _lowercase=0.1 , _lowercase=0.1 , _lowercase=224 , _lowercase=1000 , _lowercase=[3, 3, 6, 4] , _lowercase=[48, 56, 112, 220] , ) -> str:
lowercase_ : Tuple = parent
lowercase_ : List[Any] = batch_size
lowercase_ : List[str] = num_channels
lowercase_ : int = is_training
lowercase_ : str = use_labels
lowercase_ : Tuple = hidden_dropout_prob
lowercase_ : Union[str, Any] = attention_probs_dropout_prob
lowercase_ : Optional[Any] = num_labels
lowercase_ : str = image_size
lowercase_ : Dict = layer_depths
lowercase_ : Optional[int] = embed_dims
def lowerCamelCase__ ( self ) -> Union[str, Any]:
lowercase_ : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase_ : str = None
if self.use_labels:
lowercase_ : str = ids_tensor([self.batch_size] , self.num_labels )
lowercase_ : List[str] = self.get_config()
return config, pixel_values, labels
def lowerCamelCase__ ( self ) -> Tuple:
return SwiftFormerConfig(
depths=self.layer_depths , embed_dims=self.embed_dims , mlp_ratio=4 , downsamples=[True, True, True, True] , hidden_act='gelu' , num_labels=self.num_labels , down_patch_size=3 , down_stride=2 , down_pad=1 , drop_rate=0.0 , drop_path_rate=0.0 , use_layer_scale=__lowerCamelCase , layer_scale_init_value=1E-5 , )
def lowerCamelCase__ ( self , _lowercase , _lowercase , _lowercase ) -> Any:
lowercase_ : List[Any] = SwiftFormerModel(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
lowercase_ : List[Any] = model(__lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dims[-1], 7, 7) )
def lowerCamelCase__ ( self , _lowercase , _lowercase , _lowercase ) -> Union[str, Any]:
lowercase_ : List[str] = self.num_labels
lowercase_ : Optional[Any] = SwiftFormerForImageClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
lowercase_ : Dict = model(__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
lowercase_ : Dict = SwiftFormerForImageClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
lowercase_ : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase_ : Tuple = model(__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase__ ( self ) -> Any:
(lowercase_) : Optional[Any] = self.prepare_config_and_inputs()
lowercase_ : Union[str, Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __magic_name__ ( lowerCamelCase__, lowerCamelCase__, unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = (SwiftFormerModel, SwiftFormerForImageClassification) if is_torch_available() else ()
SCREAMING_SNAKE_CASE_ : Tuple = (
{'feature-extraction': SwiftFormerModel, 'image-classification': SwiftFormerForImageClassification}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE_ : Optional[Any] = False
SCREAMING_SNAKE_CASE_ : int = False
SCREAMING_SNAKE_CASE_ : int = False
SCREAMING_SNAKE_CASE_ : Union[str, Any] = False
SCREAMING_SNAKE_CASE_ : Any = False
def lowerCamelCase__ ( self ) -> Any:
lowercase_ : int = SwiftFormerModelTester(self )
lowercase_ : List[str] = ConfigTester(
self , config_class=__lowerCamelCase , has_text_modality=__lowerCamelCase , hidden_size=37 , num_attention_heads=12 , num_hidden_layers=12 , )
def lowerCamelCase__ ( self ) -> List[Any]:
self.config_tester.run_common_tests()
@unittest.skip(reason='SwiftFormer does not use inputs_embeds' )
def lowerCamelCase__ ( self ) -> List[Any]:
pass
def lowerCamelCase__ ( self ) -> int:
lowercase_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ : Tuple = model_class(__lowerCamelCase )
lowercase_ : Tuple = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__lowerCamelCase , nn.Linear ) )
def lowerCamelCase__ ( self ) -> Union[str, Any]:
lowercase_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ : Optional[int] = model_class(__lowerCamelCase )
lowercase_ : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase_ : Tuple = [*signature.parameters.keys()]
lowercase_ : int = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __lowerCamelCase )
def lowerCamelCase__ ( self ) -> int:
lowercase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def lowerCamelCase__ ( self ) -> List[Any]:
lowercase_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowerCamelCase )
@slow
def lowerCamelCase__ ( self ) -> Optional[Any]:
for model_name in SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ : str = SwiftFormerModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
@unittest.skip(reason='SwiftFormer does not output attentions' )
def lowerCamelCase__ ( self ) -> Union[str, Any]:
pass
def lowerCamelCase__ ( self ) -> Tuple:
def check_hidden_states_output(_lowercase , _lowercase , _lowercase ):
lowercase_ : Optional[Any] = model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
with torch.no_grad():
lowercase_ : str = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
lowercase_ : Dict = outputs.hidden_states
lowercase_ : Dict = 8
self.assertEqual(len(__lowerCamelCase ) , __lowerCamelCase ) # TODO
# SwiftFormer's feature maps are of shape (batch_size, embed_dims, height, width)
# with the width and height being successively divided by 2, after every 2 blocks
for i in range(len(__lowerCamelCase ) ):
self.assertEqual(
hidden_states[i].shape , torch.Size(
[
self.model_tester.batch_size,
self.model_tester.embed_dims[i // 2],
(self.model_tester.image_size // 4) // 2 ** (i // 2),
(self.model_tester.image_size // 4) // 2 ** (i // 2),
] ) , )
lowercase_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ : Dict = True
check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase_ : Optional[int] = True
check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def lowerCamelCase__ ( self ) -> List[str]:
def _config_zero_init(_lowercase ):
lowercase_ : List[Any] = copy.deepcopy(__lowerCamelCase )
for key in configs_no_init.__dict__.keys():
if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key:
setattr(__lowerCamelCase , __lowerCamelCase , 1E-1_0 )
if isinstance(getattr(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) , __lowerCamelCase ):
lowercase_ : List[str] = _config_zero_init(getattr(__lowerCamelCase , __lowerCamelCase ) )
setattr(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
return configs_no_init
lowercase_ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ : Union[str, Any] = _config_zero_init(__lowerCamelCase )
for model_class in self.all_model_classes:
lowercase_ : Tuple = model_class(config=__lowerCamelCase )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9) / 1E9).round().item() , [0.0, 1.0] , msg=f"Parameter {name} of model {model_class} seems not properly initialized" , )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def lowerCamelCase__ ( self ) -> Tuple:
pass
def _UpperCAmelCase ( ) -> Dict:
"""simple docstring"""
lowercase_ : Union[str, Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class __magic_name__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def lowerCamelCase__ ( self ) -> List[Any]:
return ViTImageProcessor.from_pretrained('MBZUAI/swiftformer-xs' ) if is_vision_available() else None
@slow
def lowerCamelCase__ ( self ) -> Union[str, Any]:
lowercase_ : Optional[int] = SwiftFormerForImageClassification.from_pretrained('MBZUAI/swiftformer-xs' ).to(__lowerCamelCase )
lowercase_ : int = self.default_image_processor
lowercase_ : int = prepare_img()
lowercase_ : Optional[int] = image_processor(images=__lowerCamelCase , return_tensors='pt' ).to(__lowerCamelCase )
# forward pass
with torch.no_grad():
lowercase_ : Optional[int] = model(**__lowerCamelCase )
# verify the logits
lowercase_ : Optional[int] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , __lowerCamelCase )
lowercase_ : int = torch.tensor([[-2.1_7_0_3E0_0, 2.1_1_0_7E0_0, -2.0_8_1_1E0_0]] ).to(__lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowerCamelCase , atol=1E-4 ) )
| 701 |
'''simple docstring'''
def _UpperCAmelCase ( a : int , a : int ) -> int:
"""simple docstring"""
while second != 0:
lowercase_ : Any = first & second
first ^= second
lowercase_ : List[str] = c << 1
return first
if __name__ == "__main__":
import doctest
doctest.testmod()
A: Union[str, Any] = int(input("Enter the first number: ").strip())
A: Union[str, Any] = int(input("Enter the second number: ").strip())
print(f"""{add(first, second) = }""")
| 7 | 0 |
'''simple docstring'''
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
A: str = "Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine"
def _UpperCAmelCase ( ) -> Any:
"""simple docstring"""
lowercase_ : int = _ask_options(
'In which compute environment are you running?' , ['This machine', 'AWS (Amazon SageMaker)'] , _convert_compute_environment , )
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
lowercase_ : Any = get_sagemaker_input()
else:
lowercase_ : List[Any] = get_cluster_input()
return config
def _UpperCAmelCase ( a : int=None ) -> Optional[Any]:
"""simple docstring"""
if subparsers is not None:
lowercase_ : Tuple = subparsers.add_parser('config' , description=UpperCamelCase__ )
else:
lowercase_ : Dict = argparse.ArgumentParser('Accelerate config command' , description=UpperCamelCase__ )
parser.add_argument(
'--config_file' , default=UpperCamelCase__ , help=(
'The path to use to store the config file. Will default to a file named default_config.yaml in the cache '
'location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '
'such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '
'with \'huggingface\'.'
) , )
if subparsers is not None:
parser.set_defaults(func=UpperCamelCase__ )
return parser
def _UpperCAmelCase ( a : List[Any] ) -> str:
"""simple docstring"""
lowercase_ : Union[str, Any] = get_user_input()
if args.config_file is not None:
lowercase_ : Any = args.config_file
else:
if not os.path.isdir(UpperCamelCase__ ):
os.makedirs(UpperCamelCase__ )
lowercase_ : Union[str, Any] = default_yaml_config_file
if config_file.endswith('.json' ):
config.to_json_file(UpperCamelCase__ )
else:
config.to_yaml_file(UpperCamelCase__ )
print(f"accelerate configuration saved at {config_file}" )
def _UpperCAmelCase ( ) -> int:
"""simple docstring"""
lowercase_ : Any = config_command_parser()
lowercase_ : str = parser.parse_args()
config_command(UpperCamelCase__ )
if __name__ == "__main__":
main()
| 702 |
'''simple docstring'''
class __magic_name__ :
"""simple docstring"""
def __init__( self , _lowercase ) -> Union[str, Any]:
lowercase_ : Dict = n
lowercase_ : Dict = [None] * self.n
lowercase_ : Tuple = 0 # index of the first element
lowercase_ : List[Any] = 0
lowercase_ : List[Any] = 0
def __len__( self ) -> int:
return self.size
def lowerCamelCase__ ( self ) -> bool:
return self.size == 0
def lowerCamelCase__ ( self ) -> List[Any]:
return False if self.is_empty() else self.array[self.front]
def lowerCamelCase__ ( self , _lowercase ) -> Any:
if self.size >= self.n:
raise Exception('QUEUE IS FULL' )
lowercase_ : Tuple = data
lowercase_ : List[Any] = (self.rear + 1) % self.n
self.size += 1
return self
def lowerCamelCase__ ( self ) -> Any:
if self.size == 0:
raise Exception('UNDERFLOW' )
lowercase_ : Dict = self.array[self.front]
lowercase_ : Tuple = None
lowercase_ : int = (self.front + 1) % self.n
self.size -= 1
return temp
| 7 | 0 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm import create_model
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import BitConfig, BitForImageClassification, BitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
A: int = logging.get_logger(__name__)
def _UpperCAmelCase ( a : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
lowercase_ : List[Any] = "huggingface/label-files"
lowercase_ : Optional[Any] = "imagenet-1k-id2label.json"
lowercase_ : List[Any] = json.load(open(hf_hub_download(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , repo_type='dataset' ) , 'r' ) )
lowercase_ : List[Any] = {int(__SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
lowercase_ : List[str] = {v: k for k, v in idalabel.items()}
lowercase_ : Optional[Any] = "std_conv" if "bit" in model_name else False
# note that when using BiT as backbone for ViT-hybrid checkpoints,
# one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same",
# config.conv_layer = "std_conv_same"
lowercase_ : Optional[int] = BitConfig(
conv_layer=__SCREAMING_SNAKE_CASE , num_labels=1_0_0_0 , idalabel=__SCREAMING_SNAKE_CASE , labelaid=__SCREAMING_SNAKE_CASE , )
return config
def _UpperCAmelCase ( a : str ) -> Optional[int]:
"""simple docstring"""
if "stem.conv" in name:
lowercase_ : List[str] = name.replace('stem.conv' , 'bit.embedder.convolution' )
if "blocks" in name:
lowercase_ : Dict = name.replace('blocks' , 'layers' )
if "head.fc" in name:
lowercase_ : str = name.replace('head.fc' , 'classifier.1' )
if name.startswith('norm' ):
lowercase_ : Union[str, Any] = "bit." + name
if "bit" not in name and "classifier" not in name:
lowercase_ : Optional[Any] = "bit.encoder." + name
return name
def _UpperCAmelCase ( ) -> str:
"""simple docstring"""
lowercase_ : Optional[Any] = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowercase_ : str = Image.open(requests.get(__SCREAMING_SNAKE_CASE , stream=__SCREAMING_SNAKE_CASE ).raw )
return im
@torch.no_grad()
def _UpperCAmelCase ( a : Optional[Any] , a : List[str] , a : Optional[Any]=False ) -> Dict:
"""simple docstring"""
lowercase_ : Any = get_config(__SCREAMING_SNAKE_CASE )
# load original model from timm
lowercase_ : str = create_model(__SCREAMING_SNAKE_CASE , pretrained=__SCREAMING_SNAKE_CASE )
timm_model.eval()
# load state_dict of original model
lowercase_ : List[Any] = timm_model.state_dict()
for key in state_dict.copy().keys():
lowercase_ : Union[str, Any] = state_dict.pop(__SCREAMING_SNAKE_CASE )
lowercase_ : Tuple = val.squeeze() if "head" in key else val
# load HuggingFace model
lowercase_ : Union[str, Any] = BitForImageClassification(__SCREAMING_SNAKE_CASE )
model.eval()
model.load_state_dict(__SCREAMING_SNAKE_CASE )
# create image processor
lowercase_ : Optional[int] = create_transform(**resolve_data_config({} , model=__SCREAMING_SNAKE_CASE ) )
lowercase_ : str = transform.transforms
lowercase_ : str = {
"bilinear": PILImageResampling.BILINEAR,
"bicubic": PILImageResampling.BICUBIC,
"nearest": PILImageResampling.NEAREST,
}
lowercase_ : str = BitImageProcessor(
do_resize=__SCREAMING_SNAKE_CASE , size={'shortest_edge': timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=__SCREAMING_SNAKE_CASE , crop_size={'height': timm_transforms[1].size[0], 'width': timm_transforms[1].size[1]} , do_normalize=__SCREAMING_SNAKE_CASE , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
lowercase_ : str = prepare_img()
lowercase_ : Any = transform(__SCREAMING_SNAKE_CASE ).unsqueeze(0 )
lowercase_ : List[Any] = processor(__SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values
# verify pixel values
assert torch.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# verify logits
with torch.no_grad():
lowercase_ : List[Any] = model(__SCREAMING_SNAKE_CASE )
lowercase_ : List[str] = outputs.logits
print('Logits:' , logits[0, :3] )
print('Predicted class:' , model.config.idalabel[logits.argmax(-1 ).item()] )
lowercase_ : Tuple = timm_model(__SCREAMING_SNAKE_CASE )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(__SCREAMING_SNAKE_CASE , outputs.logits , atol=1e-3 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
Path(__SCREAMING_SNAKE_CASE ).mkdir(exist_ok=__SCREAMING_SNAKE_CASE )
print(f"Saving model {model_name} and processor to {pytorch_dump_folder_path}" )
model.save_pretrained(__SCREAMING_SNAKE_CASE )
processor.save_pretrained(__SCREAMING_SNAKE_CASE )
if push_to_hub:
print(f"Pushing model {model_name} and processor to the hub" )
model.push_to_hub(f"ybelkada/{model_name}" )
processor.push_to_hub(f"ybelkada/{model_name}" )
if __name__ == "__main__":
A: Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="resnetv2_50x1_bitm",
type=str,
help="Name of the BiT timm model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether to push the model to the hub.",
)
A: Optional[Any] = parser.parse_args()
convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 703 |
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
A: List[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
A: Union[str, Any] = "\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline\n >>> from diffusers.utils import load_image\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... \"kandinsky-community/kandinsky-2-2-prior\", torch_dtype=torch.float16\n ... )\n >>> pipe_prior.to(\"cuda\")\n\n >>> prompt = \"A red cartoon frog, 4k\"\n >>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False)\n\n >>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained(\n ... \"kandinsky-community/kandinsky-2-2-decoder\", torch_dtype=torch.float16\n ... )\n >>> pipe.to(\"cuda\")\n\n >>> init_image = load_image(\n ... \"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main\"\n ... \"/kandinsky/frog.png\"\n ... )\n\n >>> image = pipe(\n ... image=init_image,\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=100,\n ... strength=0.2,\n ... ).images\n\n >>> image[0].save(\"red_frog.png\")\n ```\n"
def _UpperCAmelCase ( a : Tuple , a : Union[str, Any] , a : List[Any]=8 ) -> Dict:
"""simple docstring"""
lowercase_ : List[Any] = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
lowercase_ : List[str] = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
def _UpperCAmelCase ( a : Any , a : Dict=5_1_2 , a : Optional[Any]=5_1_2 ) -> Tuple:
"""simple docstring"""
lowercase_ : int = pil_image.resize((w, h) , resample=Image.BICUBIC , reducing_gap=1 )
lowercase_ : int = np.array(pil_image.convert('RGB' ) )
lowercase_ : Optional[int] = arr.astype(np.floataa ) / 1_27.5 - 1
lowercase_ : Any = np.transpose(a , [2, 0, 1] )
lowercase_ : Any = torch.from_numpy(a ).unsqueeze(0 )
return image
class __magic_name__ ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self , _lowercase , _lowercase , _lowercase , ) -> List[Any]:
super().__init__()
self.register_modules(
unet=_lowercase , scheduler=_lowercase , movq=_lowercase , )
lowercase_ : Dict = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def lowerCamelCase__ ( self , _lowercase , _lowercase , _lowercase ) -> int:
# get the original timestep using init_timestep
lowercase_ : List[Any] = min(int(num_inference_steps * strength ) , _lowercase )
lowercase_ : Tuple = max(num_inference_steps - init_timestep , 0 )
lowercase_ : Optional[Any] = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def lowerCamelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase=None ) -> Any:
if not isinstance(_lowercase , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(_lowercase )}" )
lowercase_ : Dict = image.to(device=_lowercase , dtype=_lowercase )
lowercase_ : Dict = batch_size * num_images_per_prompt
if image.shape[1] == 4:
lowercase_ : str = image
else:
if isinstance(_lowercase , _lowercase ) and len(_lowercase ) != batch_size:
raise ValueError(
f"You have passed a list of generators of length {len(_lowercase )}, but requested an effective batch"
f" size of {batch_size}. Make sure the batch size matches the length of the generators." )
elif isinstance(_lowercase , _lowercase ):
lowercase_ : List[Any] = [
self.movq.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(_lowercase )
]
lowercase_ : Union[str, Any] = torch.cat(_lowercase , dim=0 )
else:
lowercase_ : Union[str, Any] = self.movq.encode(_lowercase ).latent_dist.sample(_lowercase )
lowercase_ : str = self.movq.config.scaling_factor * init_latents
lowercase_ : int = torch.cat([init_latents] , dim=0 )
lowercase_ : Dict = init_latents.shape
lowercase_ : Dict = randn_tensor(_lowercase , generator=_lowercase , device=_lowercase , dtype=_lowercase )
# get latents
lowercase_ : List[str] = self.scheduler.add_noise(_lowercase , _lowercase , _lowercase )
lowercase_ : Optional[Any] = init_latents
return latents
def lowerCamelCase__ ( self , _lowercase=0 ) -> int:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
lowercase_ : List[Any] = torch.device(f"cuda:{gpu_id}" )
lowercase_ : Optional[Any] = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(_lowercase , _lowercase )
def lowerCamelCase__ ( self , _lowercase=0 ) -> int:
if is_accelerate_available() and is_accelerate_version('>=' , '0.17.0.dev0' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.' )
lowercase_ : List[Any] = torch.device(f"cuda:{gpu_id}" )
if self.device.type != "cpu":
self.to('cpu' , silence_dtype_warnings=_lowercase )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
lowercase_ : Tuple = None
for cpu_offloaded_model in [self.unet, self.movq]:
lowercase_ , lowercase_ : Dict = cpu_offload_with_hook(_lowercase , _lowercase , prev_module_hook=_lowercase )
# We'll offload the last model manually.
lowercase_ : List[str] = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def lowerCamelCase__ ( self ) -> List[str]:
if not hasattr(self.unet , '_hf_hook' ):
return self.device
for module in self.unet.modules():
if (
hasattr(_lowercase , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(_lowercase )
def __call__( self , _lowercase , _lowercase , _lowercase , _lowercase = 512 , _lowercase = 512 , _lowercase = 100 , _lowercase = 4.0 , _lowercase = 0.3 , _lowercase = 1 , _lowercase = None , _lowercase = "pil" , _lowercase = True , ) -> str:
lowercase_ : List[Any] = self._execution_device
lowercase_ : List[Any] = guidance_scale > 1.0
if isinstance(_lowercase , _lowercase ):
lowercase_ : Union[str, Any] = torch.cat(_lowercase , dim=0 )
lowercase_ : Optional[Any] = image_embeds.shape[0]
if isinstance(_lowercase , _lowercase ):
lowercase_ : List[str] = torch.cat(_lowercase , dim=0 )
if do_classifier_free_guidance:
lowercase_ : List[str] = image_embeds.repeat_interleave(_lowercase , dim=0 )
lowercase_ : Dict = negative_image_embeds.repeat_interleave(_lowercase , dim=0 )
lowercase_ : Dict = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=_lowercase )
if not isinstance(_lowercase , _lowercase ):
lowercase_ : Union[str, Any] = [image]
if not all(isinstance(_lowercase , (PIL.Image.Image, torch.Tensor) ) for i in image ):
raise ValueError(
f"Input is in incorrect format: {[type(_lowercase ) for i in image]}. Currently, we only support PIL image and pytorch tensor" )
lowercase_ : List[Any] = torch.cat([prepare_image(_lowercase , _lowercase , _lowercase ) for i in image] , dim=0 )
lowercase_ : Dict = image.to(dtype=image_embeds.dtype , device=_lowercase )
lowercase_ : Dict = self.movq.encode(_lowercase )['latents']
lowercase_ : Optional[Any] = latents.repeat_interleave(_lowercase , dim=0 )
self.scheduler.set_timesteps(_lowercase , device=_lowercase )
lowercase_ , lowercase_ : str = self.get_timesteps(_lowercase , _lowercase , _lowercase )
lowercase_ : int = timesteps[:1].repeat(batch_size * num_images_per_prompt )
lowercase_ , lowercase_ : Union[str, Any] = downscale_height_and_width(_lowercase , _lowercase , self.movq_scale_factor )
lowercase_ : List[str] = self.prepare_latents(
_lowercase , _lowercase , _lowercase , _lowercase , image_embeds.dtype , _lowercase , _lowercase )
for i, t in enumerate(self.progress_bar(_lowercase ) ):
# expand the latents if we are doing classifier free guidance
lowercase_ : int = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowercase_ : str = {'image_embeds': image_embeds}
lowercase_ : str = self.unet(
sample=_lowercase , timestep=_lowercase , encoder_hidden_states=_lowercase , added_cond_kwargs=_lowercase , return_dict=_lowercase , )[0]
if do_classifier_free_guidance:
lowercase_ , lowercase_ : Dict = noise_pred.split(latents.shape[1] , dim=1 )
lowercase_ , lowercase_ : Optional[int] = noise_pred.chunk(2 )
lowercase_ , lowercase_ : Tuple = variance_pred.chunk(2 )
lowercase_ : List[str] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
lowercase_ : Tuple = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , 'variance_type' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
lowercase_ , lowercase_ : List[str] = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
lowercase_ : Dict = self.scheduler.step(
_lowercase , _lowercase , _lowercase , generator=_lowercase , )[0]
# post-processing
lowercase_ : Any = self.movq.decode(_lowercase , force_not_quantize=_lowercase )['sample']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f"Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}" )
if output_type in ["np", "pil"]:
lowercase_ : Dict = image * 0.5 + 0.5
lowercase_ : Dict = image.clamp(0 , 1 )
lowercase_ : int = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
lowercase_ : int = self.numpy_to_pil(_lowercase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_lowercase )
| 7 | 0 |
'''simple docstring'''
import argparse
import gc
import json
import os
import re
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast, RwkvConfig
from transformers.modeling_utils import WEIGHTS_INDEX_NAME, shard_checkpoint
A: int = {
"169M": 1_2,
"430M": 2_4,
"1B5": 2_4,
"3B": 3_2,
"7B": 3_2,
"14B": 4_0,
}
A: Dict = {
"169M": 7_6_8,
"430M": 1_0_2_4,
"1B5": 2_0_4_8,
"3B": 2_5_6_0,
"7B": 4_0_9_6,
"14B": 5_1_2_0,
}
def _UpperCAmelCase ( a : Optional[Any] ) -> Tuple:
"""simple docstring"""
lowercase_ : List[str] = list(state_dict.keys() )
for name in state_dict_keys:
lowercase_ : str = state_dict.pop(a )
# emb -> embedding
if name.startswith('emb.' ):
lowercase_ : Optional[int] = name.replace('emb.' , 'embeddings.' )
# ln_0 -> pre_ln (only present at block 0)
if name.startswith('blocks.0.ln0' ):
lowercase_ : List[Any] = name.replace('blocks.0.ln0' , 'blocks.0.pre_ln' )
# att -> attention
lowercase_ : int = re.sub(R'blocks\.(\d+)\.att' , R'blocks.\1.attention' , a )
# ffn -> feed_forward
lowercase_ : Optional[Any] = re.sub(R'blocks\.(\d+)\.ffn' , R'blocks.\1.feed_forward' , a )
# time_mix_k -> time_mix_key and reshape
if name.endswith('.time_mix_k' ):
lowercase_ : Dict = name.replace('.time_mix_k' , '.time_mix_key' )
# time_mix_v -> time_mix_value and reshape
if name.endswith('.time_mix_v' ):
lowercase_ : Union[str, Any] = name.replace('.time_mix_v' , '.time_mix_value' )
# time_mix_r -> time_mix_key and reshape
if name.endswith('.time_mix_r' ):
lowercase_ : Optional[Any] = name.replace('.time_mix_r' , '.time_mix_receptance' )
if name != "head.weight":
lowercase_ : Dict = 'rwkv.' + name
lowercase_ : Any = weight
return state_dict
def _UpperCAmelCase ( a : Tuple , a : Any , a : List[str] , a : Dict=None , a : Tuple=None , a : int=False , a : Any=None ) -> Dict:
"""simple docstring"""
# 1. If possible, build the tokenizer.
if tokenizer_file is None:
print('No `--tokenizer_file` provided, we will use the default tokenizer.' )
lowercase_ : int = 5_0_2_7_7
lowercase_ : Union[str, Any] = AutoTokenizer.from_pretrained('EleutherAI/gpt-neox-20b' )
else:
lowercase_ : Dict = PreTrainedTokenizerFast(tokenizer_file=a )
lowercase_ : Tuple = len(a )
tokenizer.save_pretrained(a )
# 2. Build the config
lowercase_ : Dict = list(NUM_HIDDEN_LAYERS_MAPPING.keys() )
if size is None:
# Try to infer size from the checkpoint name
for candidate in possible_sizes:
if candidate in checkpoint_file:
lowercase_ : Union[str, Any] = candidate
break
if size is None:
raise ValueError('Could not infer the size, please provide it with the `--size` argument.' )
if size not in possible_sizes:
raise ValueError(f"`size` should be one of {possible_sizes}, got {size}." )
lowercase_ : List[Any] = RwkvConfig(
vocab_size=a , num_hidden_layers=NUM_HIDDEN_LAYERS_MAPPING[size] , hidden_size=HIDEN_SIZE_MAPPING[size] , )
config.save_pretrained(a )
# 3. Download model file then convert state_dict
lowercase_ : Union[str, Any] = hf_hub_download(a , a )
lowercase_ : Optional[int] = torch.load(a , map_location='cpu' )
lowercase_ : Tuple = convert_state_dict(a )
# 4. Split in shards and save
lowercase_ , lowercase_ : str = shard_checkpoint(a )
for shard_file, shard in shards.items():
torch.save(a , os.path.join(a , a ) )
if index is not None:
lowercase_ : List[Any] = os.path.join(a , a )
# Save the index as well
with open(a , 'w' , encoding='utf-8' ) as f:
lowercase_ : int = json.dumps(a , indent=2 , sort_keys=a ) + '\n'
f.write(a )
# 5. Clean up shards (for some reason the file PyTorch saves take the same space as the whole state_dict
print(
'Cleaning up shards. This may error with an OOM error, it this is the case don\'t worry you still have converted the model.' )
lowercase_ : List[Any] = list(shards.keys() )
del state_dict
del shards
gc.collect()
for shard_file in shard_files:
lowercase_ : Any = torch.load(os.path.join(a , a ) )
torch.save({k: v.cpu().clone() for k, v in state_dict.items()} , os.path.join(a , a ) )
del state_dict
gc.collect()
if push_to_hub:
if model_name is None:
raise ValueError('Please provide a `model_name` to push the model to the Hub.' )
lowercase_ : List[str] = AutoModelForCausalLM.from_pretrained(a )
model.push_to_hub(a , max_shard_size='2GB' )
tokenizer.push_to_hub(a )
if __name__ == "__main__":
A: Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--repo_id", default=None, type=str, required=True, help="Repo ID from which to pull the checkpoint."
)
parser.add_argument(
"--checkpoint_file", default=None, type=str, required=True, help="Name of the checkpoint file in the repo."
)
parser.add_argument(
"--output_dir", default=None, type=str, required=True, help="Where to save the converted model."
)
parser.add_argument(
"--tokenizer_file",
default=None,
type=str,
help="Path to the tokenizer file to use (if not provided, only the model is converted).",
)
parser.add_argument(
"--size",
default=None,
type=str,
help="Size of the model. Will be inferred from the `checkpoint_file` if not passed.",
)
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Push to the Hub the converted model.",
)
parser.add_argument(
"--model_name",
default=None,
type=str,
help="Name of the pushed model on the Hub, including the username / organization.",
)
A: Union[str, Any] = parser.parse_args()
convert_rmkv_checkpoint_to_hf_format(
args.repo_id,
args.checkpoint_file,
args.output_dir,
size=args.size,
tokenizer_file=args.tokenizer_file,
push_to_hub=args.push_to_hub,
model_name=args.model_name,
)
| 704 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ..utils import _LazyModule
A: int = {
"config": [
"EXTERNAL_DATA_FORMAT_SIZE_LIMIT",
"OnnxConfig",
"OnnxConfigWithPast",
"OnnxSeq2SeqConfigWithPast",
"PatchingSpec",
],
"convert": ["export", "validate_model_outputs"],
"features": ["FeaturesManager"],
"utils": ["ParameterFormat", "compute_serialized_parameters_size"],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
A: Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 7 | 0 |
'''simple docstring'''
import os
from huggingface_hub.constants import HUGGINGFACE_HUB_CACHE, hf_cache_home
A: List[str] = HUGGINGFACE_HUB_CACHE
A: Union[str, Any] = 'config.json'
A: List[Any] = 'diffusion_pytorch_model.bin'
A: Dict = 'diffusion_flax_model.msgpack'
A: Union[str, Any] = 'model.onnx'
A: Optional[Any] = 'diffusion_pytorch_model.safetensors'
A: Optional[int] = 'weights.pb'
A: Tuple = 'https://huggingface.co'
A: Any = default_cache_path
A: str = 'diffusers_modules'
A: int = os.getenv("HF_MODULES_CACHE", os.path.join(hf_cache_home, "modules"))
A: str = ['fp16', 'non-ema']
A: int = '.self_attn'
| 705 |
'''simple docstring'''
import io
import json
import unittest
from parameterized import parameterized
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device
from utils import calculate_bleu
A: Any = get_tests_dir() + "/test_data/fsmt/fsmt_val_data.json"
with io.open(filename, "r", encoding="utf-8") as f:
A: List[Any] = json.load(f)
@require_torch
class __magic_name__ ( unittest.TestCase ):
"""simple docstring"""
def lowerCamelCase__ ( self , _lowercase ) -> Tuple:
return FSMTTokenizer.from_pretrained(_lowercase )
def lowerCamelCase__ ( self , _lowercase ) -> Optional[int]:
lowercase_ : str = FSMTForConditionalGeneration.from_pretrained(_lowercase ).to(_lowercase )
if torch_device == "cuda":
model.half()
return model
@parameterized.expand(
[
['en-ru', 26.0],
['ru-en', 22.0],
['en-de', 22.0],
['de-en', 29.0],
] )
@slow
def lowerCamelCase__ ( self , _lowercase , _lowercase ) -> Optional[int]:
# note: this test is not testing the best performance since it only evals a small batch
# but it should be enough to detect a regression in the output quality
lowercase_ : Optional[Any] = f"facebook/wmt19-{pair}"
lowercase_ : str = self.get_tokenizer(_lowercase )
lowercase_ : Any = self.get_model(_lowercase )
lowercase_ : Any = bleu_data[pair]['src']
lowercase_ : Any = bleu_data[pair]['tgt']
lowercase_ : Dict = tokenizer(_lowercase , return_tensors='pt' , truncation=_lowercase , padding='longest' ).to(_lowercase )
lowercase_ : str = model.generate(
input_ids=batch.input_ids , num_beams=8 , )
lowercase_ : Any = tokenizer.batch_decode(
_lowercase , skip_special_tokens=_lowercase , clean_up_tokenization_spaces=_lowercase )
lowercase_ : Union[str, Any] = calculate_bleu(_lowercase , _lowercase )
print(_lowercase )
self.assertGreaterEqual(scores['bleu'] , _lowercase )
| 7 | 0 |
'''simple docstring'''
import unittest
from transformers import PegasusConfig, PegasusTokenizer, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
A: Any = "platform"
import jax
import jax.numpy as jnp
import numpy as np
from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel
@require_flax
class __magic_name__ :
SCREAMING_SNAKE_CASE_ : int = PegasusConfig
SCREAMING_SNAKE_CASE_ : Optional[int] = {}
SCREAMING_SNAKE_CASE_ : int = 'gelu'
def __init__( self , _lowercase , _lowercase=13 , _lowercase=7 , _lowercase=True , _lowercase=False , _lowercase=99 , _lowercase=32 , _lowercase=5 , _lowercase=4 , _lowercase=37 , _lowercase=0.1 , _lowercase=0.1 , _lowercase=20 , _lowercase=2 , _lowercase=1 , _lowercase=0 , ) -> Union[str, Any]:
lowercase_ : Optional[Any] = parent
lowercase_ : Tuple = batch_size
lowercase_ : List[Any] = seq_length
lowercase_ : Tuple = is_training
lowercase_ : Tuple = use_labels
lowercase_ : str = vocab_size
lowercase_ : int = hidden_size
lowercase_ : List[str] = num_hidden_layers
lowercase_ : Any = num_attention_heads
lowercase_ : Optional[int] = intermediate_size
lowercase_ : Optional[Any] = hidden_dropout_prob
lowercase_ : int = attention_probs_dropout_prob
lowercase_ : str = max_position_embeddings
lowercase_ : Any = eos_token_id
lowercase_ : Optional[Any] = pad_token_id
lowercase_ : Optional[Any] = bos_token_id
def lowerCamelCase__ ( self ) -> Dict:
lowercase_ : str = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ).clip(3 , self.vocab_size )
lowercase_ : Optional[int] = np.expand_dims(np.array([self.eos_token_id] * self.batch_size ) , 1 )
lowercase_ : Tuple = np.concatenate([input_ids, eos_tensor] , axis=1 )
lowercase_ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase_ : List[Any] = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
lowercase_ : Optional[Any] = prepare_pegasus_inputs_dict(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
return config, inputs_dict
def lowerCamelCase__ ( self , _lowercase , _lowercase , _lowercase ) -> List[Any]:
lowercase_ : str = 20
lowercase_ : Optional[Any] = model_class_name(__UpperCamelCase )
lowercase_ : Any = model.encode(inputs_dict['input_ids'] )
lowercase_ , lowercase_ : Optional[int] = (
inputs_dict['decoder_input_ids'],
inputs_dict['decoder_attention_mask'],
)
lowercase_ : List[str] = model.init_cache(decoder_input_ids.shape[0] , __UpperCamelCase , __UpperCamelCase )
lowercase_ : List[Any] = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='i4' )
lowercase_ : str = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
lowercase_ : Dict = model.decode(
decoder_input_ids[:, :-1] , __UpperCamelCase , decoder_attention_mask=__UpperCamelCase , past_key_values=__UpperCamelCase , decoder_position_ids=__UpperCamelCase , )
lowercase_ : List[Any] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='i4' )
lowercase_ : Optional[int] = model.decode(
decoder_input_ids[:, -1:] , __UpperCamelCase , decoder_attention_mask=__UpperCamelCase , past_key_values=outputs_cache.past_key_values , decoder_position_ids=__UpperCamelCase , )
lowercase_ : Optional[int] = model.decode(__UpperCamelCase , __UpperCamelCase )
lowercase_ : Optional[Any] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f"Max diff is {diff}" )
def lowerCamelCase__ ( self , _lowercase , _lowercase , _lowercase ) -> Any:
lowercase_ : int = 20
lowercase_ : List[str] = model_class_name(__UpperCamelCase )
lowercase_ : int = model.encode(inputs_dict['input_ids'] )
lowercase_ , lowercase_ : List[Any] = (
inputs_dict['decoder_input_ids'],
inputs_dict['decoder_attention_mask'],
)
lowercase_ : Optional[int] = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
lowercase_ : int = model.init_cache(decoder_input_ids.shape[0] , __UpperCamelCase , __UpperCamelCase )
lowercase_ : List[Any] = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
lowercase_ : Optional[Any] = model.decode(
decoder_input_ids[:, :-1] , __UpperCamelCase , decoder_attention_mask=__UpperCamelCase , past_key_values=__UpperCamelCase , decoder_position_ids=__UpperCamelCase , )
lowercase_ : Tuple = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='i4' )
lowercase_ : int = model.decode(
decoder_input_ids[:, -1:] , __UpperCamelCase , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=__UpperCamelCase , decoder_position_ids=__UpperCamelCase , )
lowercase_ : Union[str, Any] = model.decode(__UpperCamelCase , __UpperCamelCase , decoder_attention_mask=__UpperCamelCase )
lowercase_ : str = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f"Max diff is {diff}" )
def _UpperCAmelCase ( a : Optional[int] , a : int , a : Tuple , a : Any=None , a : Dict=None , ) -> str:
"""simple docstring"""
if attention_mask is None:
lowercase_ : Tuple = np.not_equal(_SCREAMING_SNAKE_CASE , config.pad_token_id ).astype(np.inta )
if decoder_attention_mask is None:
lowercase_ : List[Any] = np.concatenate(
[
np.ones(decoder_input_ids[:, :1].shape , dtype=np.inta ),
np.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ).astype(np.inta ),
] , axis=-1 , )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
}
@require_flax
class __magic_name__ ( UpperCAmelCase_, unittest.TestCase ):
SCREAMING_SNAKE_CASE_ : List[Any] = (
(
FlaxPegasusForConditionalGeneration,
FlaxPegasusModel,
)
if is_flax_available()
else ()
)
SCREAMING_SNAKE_CASE_ : str = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else ()
SCREAMING_SNAKE_CASE_ : List[Any] = True
SCREAMING_SNAKE_CASE_ : Optional[Any] = False
SCREAMING_SNAKE_CASE_ : str = False
SCREAMING_SNAKE_CASE_ : Union[str, Any] = False
def lowerCamelCase__ ( self ) -> Union[str, Any]:
lowercase_ : Tuple = FlaxPegasusModelTester(self )
lowercase_ : Dict = ConfigTester(self , config_class=__UpperCamelCase )
def lowerCamelCase__ ( self ) -> int:
self.config_tester.run_common_tests()
def lowerCamelCase__ ( self ) -> List[Any]:
lowercase_ , lowercase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
def lowerCamelCase__ ( self ) -> Any:
lowercase_ , lowercase_ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
def lowerCamelCase__ ( self ) -> Optional[int]:
lowercase_ , lowercase_ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowercase_ : List[Any] = self._prepare_for_class(__UpperCamelCase , __UpperCamelCase )
lowercase_ : int = model_class(__UpperCamelCase )
@jax.jit
def encode_jitted(_lowercase , _lowercase=None , **_lowercase ):
return model.encode(input_ids=__UpperCamelCase , attention_mask=__UpperCamelCase )
with self.subTest('JIT Enabled' ):
lowercase_ : Tuple = encode_jitted(**__UpperCamelCase ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
lowercase_ : List[Any] = encode_jitted(**__UpperCamelCase ).to_tuple()
self.assertEqual(len(__UpperCamelCase ) , len(__UpperCamelCase ) )
for jitted_output, output in zip(__UpperCamelCase , __UpperCamelCase ):
self.assertEqual(jitted_output.shape , output.shape )
def lowerCamelCase__ ( self ) -> List[Any]:
lowercase_ , lowercase_ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowercase_ : Any = model_class(__UpperCamelCase )
lowercase_ : List[Any] = model.encode(inputs_dict['input_ids'] , inputs_dict['attention_mask'] )
lowercase_ : int = {
'decoder_input_ids': inputs_dict['decoder_input_ids'],
'decoder_attention_mask': inputs_dict['decoder_attention_mask'],
'encoder_outputs': encoder_outputs,
}
@jax.jit
def decode_jitted(_lowercase , _lowercase , _lowercase ):
return model.decode(
decoder_input_ids=__UpperCamelCase , decoder_attention_mask=__UpperCamelCase , encoder_outputs=__UpperCamelCase , )
with self.subTest('JIT Enabled' ):
lowercase_ : List[str] = decode_jitted(**__UpperCamelCase ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
lowercase_ : Tuple = decode_jitted(**__UpperCamelCase ).to_tuple()
self.assertEqual(len(__UpperCamelCase ) , len(__UpperCamelCase ) )
for jitted_output, output in zip(__UpperCamelCase , __UpperCamelCase ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def lowerCamelCase__ ( self ) -> Optional[int]:
for model_class_name in self.all_model_classes:
lowercase_ : Dict = model_class_name.from_pretrained('google/pegasus-large' , from_pt=__UpperCamelCase )
lowercase_ : Tuple = np.ones((1, 1) )
lowercase_ : int = model(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
@slow
def lowerCamelCase__ ( self ) -> Any:
lowercase_ : Dict = FlaxPegasusForConditionalGeneration.from_pretrained('google/pegasus-xsum' )
lowercase_ : Any = PegasusTokenizer.from_pretrained('google/pegasus-xsum' )
lowercase_ : Any = [
' PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.',
' The London trio are up for best UK act and best album, as well as getting two nominations in the best song category."We got told like this morning \'Oh I think you\'re nominated\'", said Dappy."And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!"Bandmate Fazer added: "We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations."The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around."At the end of the day we\'re grateful to be where we are in our careers."If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans."Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border."We just done Edinburgh the other day," said Dappy."We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!" ',
]
lowercase_ : Dict = [
'California\'s largest electricity provider has turned off power to hundreds of thousands of customers.',
'Pop group N-Dubz have revealed they were surprised to get four nominations for this year\'s Mobo Awards.',
]
lowercase_ : Union[str, Any] = tokenizer(__UpperCamelCase , return_tensors='np' , truncation=__UpperCamelCase , max_length=512 , padding=__UpperCamelCase )
lowercase_ : List[str] = model.generate(**__UpperCamelCase , num_beams=2 ).sequences
lowercase_ : str = tokenizer.batch_decode(__UpperCamelCase , skip_special_tokens=__UpperCamelCase )
assert tgt_text == decoded
| 706 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A: int = {
"configuration_trajectory_transformer": [
"TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"TrajectoryTransformerConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A: Union[str, Any] = [
"TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TrajectoryTransformerModel",
"TrajectoryTransformerPreTrainedModel",
"load_tf_weights_in_trajectory_transformer",
]
if TYPE_CHECKING:
from .configuration_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TrajectoryTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TrajectoryTransformerModel,
TrajectoryTransformerPreTrainedModel,
load_tf_weights_in_trajectory_transformer,
)
else:
import sys
A: int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 7 | 0 |
'''simple docstring'''
class __magic_name__ :
"""simple docstring"""
def __init__( self ) -> Optional[Any]:
lowercase_ : Any = """"""
lowercase_ : List[str] = """"""
lowercase_ : List[Any] = []
def lowerCamelCase__ ( self , _lowercase , _lowercase ) -> Any:
if m == -1:
return n + 1
elif n == -1:
return m + 1
elif self.dp[m][n] > -1:
return self.dp[m][n]
else:
if self.worda[m] == self.worda[n]:
lowercase_ : int = self.__min_dist_top_down_dp(m - 1 , n - 1 )
else:
lowercase_ : Tuple = self.__min_dist_top_down_dp(_lowercase , n - 1 )
lowercase_ : str = self.__min_dist_top_down_dp(m - 1 , _lowercase )
lowercase_ : List[Any] = self.__min_dist_top_down_dp(m - 1 , n - 1 )
lowercase_ : Optional[int] = 1 + min(_lowercase , _lowercase , _lowercase )
return self.dp[m][n]
def lowerCamelCase__ ( self , _lowercase , _lowercase ) -> Any:
lowercase_ : List[Any] = worda
lowercase_ : int = worda
lowercase_ : str = [[-1 for _ in range(len(_lowercase ) )] for _ in range(len(_lowercase ) )]
return self.__min_dist_top_down_dp(len(_lowercase ) - 1 , len(_lowercase ) - 1 )
def lowerCamelCase__ ( self , _lowercase , _lowercase ) -> Dict:
lowercase_ : str = worda
lowercase_ : Any = worda
lowercase_ : str = len(_lowercase )
lowercase_ : Tuple = len(_lowercase )
lowercase_ : Union[str, Any] = [[0 for _ in range(n + 1 )] for _ in range(m + 1 )]
for i in range(m + 1 ):
for j in range(n + 1 ):
if i == 0: # first string is empty
lowercase_ : List[Any] = j
elif j == 0: # second string is empty
lowercase_ : List[Any] = i
elif worda[i - 1] == worda[j - 1]: # last characters are equal
lowercase_ : List[Any] = self.dp[i - 1][j - 1]
else:
lowercase_ : int = self.dp[i][j - 1]
lowercase_ : List[Any] = self.dp[i - 1][j]
lowercase_ : Union[str, Any] = self.dp[i - 1][j - 1]
lowercase_ : List[Any] = 1 + min(_lowercase , _lowercase , _lowercase )
return self.dp[m][n]
if __name__ == "__main__":
A: Any = EditDistance()
print("****************** Testing Edit Distance DP Algorithm ******************")
print()
A: Dict = input("Enter the first string: ").strip()
A: List[Any] = input("Enter the second string: ").strip()
print()
print(f"""The minimum edit distance is: {solver.min_dist_top_down(Sa, Sa)}""")
print(f"""The minimum edit distance is: {solver.min_dist_bottom_up(Sa, Sa)}""")
print()
print("*************** End of Testing Edit Distance DP Algorithm ***************")
| 707 |
'''simple docstring'''
def _UpperCAmelCase ( a : str ) -> str:
"""simple docstring"""
lowercase_ : Dict = 0
# if input_string is "aba" than new_input_string become "a|b|a"
lowercase_ : Dict = ''
lowercase_ : Any = ''
# append each character + "|" in new_string for range(0, length-1)
for i in input_string[: len(a ) - 1]:
new_input_string += i + "|"
# append last character
new_input_string += input_string[-1]
# we will store the starting and ending of previous furthest ending palindromic
# substring
lowercase_ , lowercase_ : Dict = 0, 0
# length[i] shows the length of palindromic substring with center i
lowercase_ : List[Any] = [1 for i in range(len(a ) )]
# for each character in new_string find corresponding palindromic string
lowercase_ : Dict = 0
for j in range(len(a ) ):
lowercase_ : Tuple = 1 if j > r else min(length[l + r - j] // 2 , r - j + 1 )
while (
j - k >= 0
and j + k < len(a )
and new_input_string[k + j] == new_input_string[j - k]
):
k += 1
lowercase_ : int = 2 * k - 1
# does this string is ending after the previously explored end (that is r) ?
# if yes the update the new r to the last index of this
if j + k - 1 > r:
lowercase_ : Tuple = j - k + 1 # noqa: E741
lowercase_ : Tuple = j + k - 1
# update max_length and start position
if max_length < length[j]:
lowercase_ : Tuple = length[j]
lowercase_ : List[Any] = j
# create that string
lowercase_ : str = new_input_string[start - max_length // 2 : start + max_length // 2 + 1]
for i in s:
if i != "|":
output_string += i
return output_string
if __name__ == "__main__":
import doctest
doctest.testmod()
| 7 | 0 |
'''simple docstring'''
import json
import os
import tempfile
import datasets
from utils import generate_example_dataset, get_duration
A: Optional[Any] = 5_0_0_0_0
A: Dict = 5_0_0_0
A , A: List[str] = os.path.split(__file__)
A: List[str] = os.path.join(RESULTS_BASEPATH, "results", RESULTS_FILENAME.replace(".py", ".json"))
@get_duration
def _UpperCAmelCase ( a : datasets.Dataset , a : Optional[int] ) -> List[str]:
"""simple docstring"""
for i in range(lowerCAmelCase__ ):
lowercase_ : Optional[int] = dataset[i]
@get_duration
def _UpperCAmelCase ( a : datasets.Dataset , a : Tuple , a : str ) -> Optional[Any]:
"""simple docstring"""
for i in range(0 , len(lowerCAmelCase__ ) , lowerCAmelCase__ ):
lowercase_ : str = dataset[i : i + batch_size]
@get_duration
def _UpperCAmelCase ( a : datasets.Dataset , a : List[Any] , a : Tuple ) -> List[Any]:
"""simple docstring"""
with dataset.formatted_as(type=lowerCAmelCase__ ):
for i in range(lowerCAmelCase__ ):
lowercase_ : Tuple = dataset[i]
@get_duration
def _UpperCAmelCase ( a : datasets.Dataset , a : List[Any] , a : Dict , a : int ) -> Optional[Any]:
"""simple docstring"""
with dataset.formatted_as(type=lowerCAmelCase__ ):
for i in range(0 , lowerCAmelCase__ , lowerCAmelCase__ ):
lowercase_ : Union[str, Any] = dataset[i : i + batch_size]
def _UpperCAmelCase ( ) -> str:
"""simple docstring"""
lowercase_ : str = {'num examples': SPEED_TEST_N_EXAMPLES}
lowercase_ : Any = [
(read, {'length': SMALL_TEST}),
(read, {'length': SPEED_TEST_N_EXAMPLES}),
(read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 1_0}),
(read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 1_0_0}),
(read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 1_0_0_0}),
(read_formatted, {'type': 'numpy', 'length': SMALL_TEST}),
(read_formatted, {'type': 'pandas', 'length': SMALL_TEST}),
(read_formatted, {'type': 'torch', 'length': SMALL_TEST}),
(read_formatted, {'type': 'tensorflow', 'length': SMALL_TEST}),
(read_formatted_batch, {'type': 'numpy', 'length': SMALL_TEST, 'batch_size': 1_0}),
(read_formatted_batch, {'type': 'numpy', 'length': SMALL_TEST, 'batch_size': 1_0_0_0}),
]
lowercase_ : str = [
(read, {'length': SMALL_TEST}),
(read, {'length': SPEED_TEST_N_EXAMPLES}),
(read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 1_0}),
(read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 1_0_0}),
(read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 1_0_0_0}),
(read_formatted, {'type': 'numpy', 'length': SMALL_TEST}),
(read_formatted_batch, {'type': 'numpy', 'length': SMALL_TEST, 'batch_size': 1_0}),
(read_formatted_batch, {'type': 'numpy', 'length': SMALL_TEST, 'batch_size': 1_0_0_0}),
]
with tempfile.TemporaryDirectory() as tmp_dir:
print('generating dataset' )
lowercase_ : str = datasets.Features(
{'list': datasets.Sequence(datasets.Value('float32' ) ), 'numbers': datasets.Value('float32' )} )
lowercase_ : List[Any] = generate_example_dataset(
os.path.join(lowerCAmelCase__ , 'dataset.arrow' ) , lowerCAmelCase__ , num_examples=lowerCAmelCase__ , seq_shapes={'list': (1_0_0,)} , )
print('first set of iterations' )
for func, kwargs in functions:
print(func.__name__ , str(lowerCAmelCase__ ) )
lowercase_ : Tuple = func(lowerCAmelCase__ , **lowerCAmelCase__ )
print('shuffling dataset' )
lowercase_ : str = dataset.shuffle()
print('Second set of iterations (after shuffling' )
for func, kwargs in functions_shuffled:
print('shuffled ' , func.__name__ , str(lowerCAmelCase__ ) )
lowercase_ : Optional[Any] = func(
lowerCAmelCase__ , **lowerCAmelCase__ )
with open(lowerCAmelCase__ , 'wb' ) as f:
f.write(json.dumps(lowerCAmelCase__ ).encode('utf-8' ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_iterating()
| 708 |
'''simple docstring'''
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class __magic_name__ ( UpperCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ''
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 'hf-legacy' # "hf://"" is reserved for hffs
def __init__( self , _lowercase = None , _lowercase = None , **_lowercase , ) -> Optional[Any]:
super().__init__(self , **_lowercase )
lowercase_ : int = repo_info
lowercase_ : List[Any] = token
lowercase_ : Union[str, Any] = None
def lowerCamelCase__ ( self ) -> Optional[Any]:
if self.dir_cache is None:
lowercase_ : Optional[Any] = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
lowercase_ : str = {
'name': hf_file.rfilename,
'size': None,
'type': 'file',
}
self.dir_cache.update(
{
str(_lowercase ): {'name': str(_lowercase ), 'size': None, 'type': 'directory'}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def lowerCamelCase__ ( self , _lowercase , _lowercase = "rb" , **_lowercase , ) -> Dict:
if not isinstance(self.repo_info , _lowercase ):
raise NotImplementedError(f"Open is only implemented for dataset repositories, but got {self.repo_info}" )
lowercase_ : Optional[int] = hf_hub_url(self.repo_info.id , _lowercase , revision=self.repo_info.sha )
return fsspec.open(
_lowercase , mode=_lowercase , headers=get_authentication_headers_for_url(_lowercase , use_auth_token=self.token ) , client_kwargs={'trust_env': True} , ).open()
def lowerCamelCase__ ( self , _lowercase , **_lowercase ) -> Tuple:
self._get_dirs()
lowercase_ : str = self._strip_protocol(_lowercase )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(_lowercase )
def lowerCamelCase__ ( self , _lowercase , _lowercase=False , **_lowercase ) -> List[str]:
self._get_dirs()
lowercase_ : List[str] = PurePosixPath(path.strip('/' ) )
lowercase_ : List[str] = {}
for p, f in self.dir_cache.items():
lowercase_ : Tuple = PurePosixPath(p.strip('/' ) )
lowercase_ : Optional[int] = p.parent
if root == path:
lowercase_ : List[str] = f
lowercase_ : List[str] = list(paths.values() )
if detail:
return out
else:
return sorted(f['name'] for f in out )
| 7 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A: Optional[Any] = {
"configuration_autoformer": [
"AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"AutoformerConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A: Any = [
"AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"AutoformerForPrediction",
"AutoformerModel",
"AutoformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_autoformer import (
AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_autoformer import (
AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
AutoformerForPrediction,
AutoformerModel,
AutoformerPreTrainedModel,
)
else:
import sys
A: int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 709 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
BertTokenizer,
ViltConfig,
ViltForImageAndTextRetrieval,
ViltForImagesAndTextClassification,
ViltForMaskedLM,
ViltForQuestionAnswering,
ViltImageProcessor,
ViltProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
A: List[Any] = logging.get_logger(__name__)
def _UpperCAmelCase ( a : Any , a : Dict=False , a : Union[str, Any]=False , a : Tuple=False ) -> List[str]:
"""simple docstring"""
lowercase_ : int = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"transformer.blocks.{i}.norm1.weight", f"vilt.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((f"transformer.blocks.{i}.norm1.bias", f"vilt.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append(
(f"transformer.blocks.{i}.attn.proj.weight", f"vilt.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append(
(f"transformer.blocks.{i}.attn.proj.bias", f"vilt.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((f"transformer.blocks.{i}.norm2.weight", f"vilt.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((f"transformer.blocks.{i}.norm2.bias", f"vilt.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append(
(f"transformer.blocks.{i}.mlp.fc1.weight", f"vilt.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((f"transformer.blocks.{i}.mlp.fc1.bias", f"vilt.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((f"transformer.blocks.{i}.mlp.fc2.weight", f"vilt.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((f"transformer.blocks.{i}.mlp.fc2.bias", f"vilt.encoder.layer.{i}.output.dense.bias") )
# embeddings
rename_keys.extend(
[
# text embeddings
('text_embeddings.word_embeddings.weight', 'vilt.embeddings.text_embeddings.word_embeddings.weight'),
(
'text_embeddings.position_embeddings.weight',
'vilt.embeddings.text_embeddings.position_embeddings.weight',
),
('text_embeddings.position_ids', 'vilt.embeddings.text_embeddings.position_ids'),
(
'text_embeddings.token_type_embeddings.weight',
'vilt.embeddings.text_embeddings.token_type_embeddings.weight',
),
('text_embeddings.LayerNorm.weight', 'vilt.embeddings.text_embeddings.LayerNorm.weight'),
('text_embeddings.LayerNorm.bias', 'vilt.embeddings.text_embeddings.LayerNorm.bias'),
# patch embeddings
('transformer.cls_token', 'vilt.embeddings.cls_token'),
('transformer.patch_embed.proj.weight', 'vilt.embeddings.patch_embeddings.projection.weight'),
('transformer.patch_embed.proj.bias', 'vilt.embeddings.patch_embeddings.projection.bias'),
('transformer.pos_embed', 'vilt.embeddings.position_embeddings'),
# token type embeddings
('token_type_embeddings.weight', 'vilt.embeddings.token_type_embeddings.weight'),
] )
# final layernorm + pooler
rename_keys.extend(
[
('transformer.norm.weight', 'vilt.layernorm.weight'),
('transformer.norm.bias', 'vilt.layernorm.bias'),
('pooler.dense.weight', 'vilt.pooler.dense.weight'),
('pooler.dense.bias', 'vilt.pooler.dense.bias'),
] )
# classifier head(s)
if vqa_model:
# classification head
rename_keys.extend(
[
('vqa_classifier.0.weight', 'classifier.0.weight'),
('vqa_classifier.0.bias', 'classifier.0.bias'),
('vqa_classifier.1.weight', 'classifier.1.weight'),
('vqa_classifier.1.bias', 'classifier.1.bias'),
('vqa_classifier.3.weight', 'classifier.3.weight'),
('vqa_classifier.3.bias', 'classifier.3.bias'),
] )
elif nlvr_model:
# classification head
rename_keys.extend(
[
('nlvr2_classifier.0.weight', 'classifier.0.weight'),
('nlvr2_classifier.0.bias', 'classifier.0.bias'),
('nlvr2_classifier.1.weight', 'classifier.1.weight'),
('nlvr2_classifier.1.bias', 'classifier.1.bias'),
('nlvr2_classifier.3.weight', 'classifier.3.weight'),
('nlvr2_classifier.3.bias', 'classifier.3.bias'),
] )
else:
pass
return rename_keys
def _UpperCAmelCase ( a : Dict , a : Tuple ) -> Dict:
"""simple docstring"""
for i in range(config.num_hidden_layers ):
lowercase_ : Optional[int] = 'vilt.'
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowercase_ : str = state_dict.pop(f"transformer.blocks.{i}.attn.qkv.weight" )
lowercase_ : int = state_dict.pop(f"transformer.blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
lowercase_ : Dict = in_proj_weight[
: config.hidden_size, :
]
lowercase_ : List[str] = in_proj_bias[: config.hidden_size]
lowercase_ : int = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowercase_ : Optional[int] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowercase_ : Tuple = in_proj_weight[
-config.hidden_size :, :
]
lowercase_ : Dict = in_proj_bias[-config.hidden_size :]
def _UpperCAmelCase ( a : List[str] ) -> Optional[int]:
"""simple docstring"""
lowercase_ : Union[str, Any] = ['head.weight', 'head.bias']
for k in ignore_keys:
state_dict.pop(a , a )
def _UpperCAmelCase ( a : Optional[Any] , a : Tuple , a : Union[str, Any] ) -> Tuple:
"""simple docstring"""
lowercase_ : List[Any] = dct.pop(a )
lowercase_ : Dict = val
@torch.no_grad()
def _UpperCAmelCase ( a : List[Any] , a : List[Any] ) -> Optional[Any]:
"""simple docstring"""
lowercase_ : str = ViltConfig(image_size=3_8_4 , patch_size=3_2 , tie_word_embeddings=a )
lowercase_ : int = False
lowercase_ : Union[str, Any] = False
lowercase_ : List[str] = False
lowercase_ : str = False
if "vqa" in checkpoint_url:
lowercase_ : str = True
lowercase_ : Optional[int] = 3_1_2_9
lowercase_ : Any = 'huggingface/label-files'
lowercase_ : Optional[Any] = 'vqa2-id2label.json'
lowercase_ : int = json.load(open(hf_hub_download(a , a , repo_type='dataset' ) , 'r' ) )
lowercase_ : Optional[int] = {int(a ): v for k, v in idalabel.items()}
lowercase_ : List[Any] = idalabel
lowercase_ : str = {v: k for k, v in idalabel.items()}
lowercase_ : List[Any] = ViltForQuestionAnswering(a )
elif "nlvr" in checkpoint_url:
lowercase_ : Dict = True
lowercase_ : List[str] = 2
lowercase_ : Tuple = {0: 'False', 1: 'True'}
lowercase_ : Optional[int] = {v: k for k, v in config.idalabel.items()}
lowercase_ : int = 3
lowercase_ : Any = ViltForImagesAndTextClassification(a )
elif "irtr" in checkpoint_url:
lowercase_ : Union[str, Any] = True
lowercase_ : Dict = ViltForImageAndTextRetrieval(a )
elif "mlm_itm" in checkpoint_url:
lowercase_ : int = True
lowercase_ : Tuple = ViltForMaskedLM(a )
else:
raise ValueError('Unknown model type' )
# load state_dict of original model, remove and rename some keys
lowercase_ : List[Any] = torch.hub.load_state_dict_from_url(a , map_location='cpu' )['state_dict']
lowercase_ : Union[str, Any] = create_rename_keys(a , a , a , a )
for src, dest in rename_keys:
rename_key(a , a , a )
read_in_q_k_v(a , a )
if mlm_model or irtr_model:
lowercase_ : str = ['itm_score.fc.weight', 'itm_score.fc.bias']
for k in ignore_keys:
state_dict.pop(a , a )
# load state dict into HuggingFace model
model.eval()
if mlm_model:
lowercase_ , lowercase_ : Dict = model.load_state_dict(a , strict=a )
assert missing_keys == ["mlm_score.decoder.bias"]
else:
model.load_state_dict(a )
# Define processor
lowercase_ : Optional[int] = ViltImageProcessor(size=3_8_4 )
lowercase_ : Optional[int] = BertTokenizer.from_pretrained('bert-base-uncased' )
lowercase_ : Any = ViltProcessor(a , a )
# Forward pass on example inputs (image + text)
if nlvr_model:
lowercase_ : Union[str, Any] = Image.open(requests.get('https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg' , stream=a ).raw )
lowercase_ : Optional[Any] = Image.open(requests.get('https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg' , stream=a ).raw )
lowercase_ : Any = (
'The left image contains twice the number of dogs as the right image, and at least two dogs in total are'
' standing.'
)
lowercase_ : Union[str, Any] = processor(a , a , return_tensors='pt' )
lowercase_ : List[str] = processor(a , a , return_tensors='pt' )
lowercase_ : Union[str, Any] = model(
input_ids=encoding_a.input_ids , pixel_values=encoding_a.pixel_values , pixel_values_a=encoding_a.pixel_values , )
else:
lowercase_ : List[str] = Image.open(requests.get('http://images.cocodataset.org/val2017/000000039769.jpg' , stream=a ).raw )
if mlm_model:
lowercase_ : Dict = 'a bunch of [MASK] laying on a [MASK].'
else:
lowercase_ : List[Any] = 'How many cats are there?'
lowercase_ : List[Any] = processor(a , a , return_tensors='pt' )
lowercase_ : Optional[int] = model(**a )
# Verify outputs
if mlm_model:
lowercase_ : Union[str, Any] = torch.Size([1, 1_1, 3_0_5_2_2] )
lowercase_ : Optional[Any] = torch.tensor([-12.50_61, -12.51_23, -12.51_74] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , a , atol=1e-4 )
# verify masked token prediction equals "cats"
lowercase_ : int = outputs.logits[0, 4, :].argmax(-1 ).item()
assert tokenizer.decode([predicted_id] ) == "cats"
elif vqa_model:
lowercase_ : Optional[Any] = torch.Size([1, 3_1_2_9] )
lowercase_ : Tuple = torch.tensor([-15.94_95, -18.14_72, -10.30_41] )
assert torch.allclose(outputs.logits[0, :3] , a , atol=1e-4 )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , a , atol=1e-4 )
# verify vqa prediction equals "2"
lowercase_ : Union[str, Any] = outputs.logits.argmax(-1 ).item()
assert model.config.idalabel[predicted_idx] == "2"
elif nlvr_model:
lowercase_ : Optional[Any] = torch.Size([1, 2] )
lowercase_ : Optional[Any] = torch.tensor([-2.87_21, 2.12_91] )
assert torch.allclose(outputs.logits[0, :3] , a , atol=1e-4 )
assert outputs.logits.shape == expected_shape
Path(a ).mkdir(exist_ok=a )
print(f"Saving model and processor to {pytorch_dump_folder_path}" )
model.save_pretrained(a )
processor.save_pretrained(a )
if __name__ == "__main__":
A: Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://github.com/dandelin/ViLT/releases/download/200k/vilt_200k_mlm_itm.ckpt",
type=str,
help="URL of the checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
A: Union[str, Any] = parser.parse_args()
convert_vilt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 7 | 0 |
'''simple docstring'''
import numpy as np
from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey
def _UpperCAmelCase ( a : int , a : int , a : int , a : int , a : int , a : int ) -> str:
"""simple docstring"""
# prepare kernel
# the kernel size have to be odd
if (ksize % 2) == 0:
lowercase_ : Optional[Any] = ksize + 1
lowercase_ : Union[str, Any] = np.zeros((ksize, ksize) , dtype=np.floataa )
# each value
for y in range(a ):
for x in range(a ):
# distance from center
lowercase_ : Dict = x - ksize // 2
lowercase_ : Optional[Any] = y - ksize // 2
# degree to radiant
lowercase_ : int = theta / 1_8_0 * np.pi
lowercase_ : List[Any] = np.cos(_theta )
lowercase_ : int = np.sin(_theta )
# get kernel x
lowercase_ : Dict = cos_theta * px + sin_theta * py
# get kernel y
lowercase_ : Tuple = -sin_theta * px + cos_theta * py
# fill kernel
lowercase_ : Optional[int] = np.exp(
-(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi )
return gabor
if __name__ == "__main__":
import doctest
doctest.testmod()
# read original image
__a: str = imread("../image_data/lena.jpg")
# turn image in gray scale value
__a: List[str] = cvtColor(img, COLOR_BGR2GRAY)
# Apply multiple Kernel to detect edges
__a: Any = np.zeros(gray.shape[:2])
for theta in [0, 3_0, 6_0, 9_0, 1_2_0, 1_5_0]:
__a: List[Any] = gabor_filter_kernel(1_0, 8, theta, 1_0, 0, 0)
out += filteraD(gray, CV_8UC3, kernel_aa)
__a: List[str] = out / out.max() * 2_5_5
__a: List[str] = out.astype(np.uinta)
imshow("Original", gray)
imshow("Gabor filter with 20x20 mask and 6 directions", out)
waitKey(0)
| 710 |
'''simple docstring'''
def _UpperCAmelCase ( a : list ) -> list:
"""simple docstring"""
for i in range(len(a ) - 1 , 0 , -1 ):
lowercase_ : Any = False
for j in range(a , 0 , -1 ):
if unsorted[j] < unsorted[j - 1]:
lowercase_ , lowercase_ : Any = unsorted[j - 1], unsorted[j]
lowercase_ : int = True
for j in range(a ):
if unsorted[j] > unsorted[j + 1]:
lowercase_ , lowercase_ : Union[str, Any] = unsorted[j + 1], unsorted[j]
lowercase_ : Optional[Any] = True
if not swapped:
break
return unsorted
if __name__ == "__main__":
import doctest
doctest.testmod()
A: Union[str, Any] = input("Enter numbers separated by a comma:\n").strip()
A: Tuple = [int(item) for item in user_input.split(",")]
print(f"""{cocktail_shaker_sort(unsorted) = }""")
| 7 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A: Optional[Any] = logging.get_logger(__name__)
A: str = {
"MIT/ast-finetuned-audioset-10-10-0.4593": (
"https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json"
),
}
class __magic_name__ ( __UpperCAmelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = 'audio-spectrogram-transformer'
def __init__( self , _lowercase=768 , _lowercase=12 , _lowercase=12 , _lowercase=3072 , _lowercase="gelu" , _lowercase=0.0 , _lowercase=0.0 , _lowercase=0.02 , _lowercase=1E-1_2 , _lowercase=16 , _lowercase=True , _lowercase=10 , _lowercase=10 , _lowercase=1024 , _lowercase=128 , **_lowercase , ) -> Tuple:
super().__init__(**lowerCAmelCase_ )
lowercase_ : List[Any] = hidden_size
lowercase_ : Dict = num_hidden_layers
lowercase_ : Union[str, Any] = num_attention_heads
lowercase_ : int = intermediate_size
lowercase_ : Dict = hidden_act
lowercase_ : List[str] = hidden_dropout_prob
lowercase_ : Optional[int] = attention_probs_dropout_prob
lowercase_ : Union[str, Any] = initializer_range
lowercase_ : Union[str, Any] = layer_norm_eps
lowercase_ : List[Any] = patch_size
lowercase_ : str = qkv_bias
lowercase_ : Any = frequency_stride
lowercase_ : Optional[int] = time_stride
lowercase_ : str = max_length
lowercase_ : Optional[Any] = num_mel_bins
| 711 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class __magic_name__ ( metaclass=UpperCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = ['transformers', 'torch', 'note_seq']
def __init__( self , *_lowercase , **_lowercase ) -> Dict:
requires_backends(self , ['transformers', 'torch', 'note_seq'] )
@classmethod
def lowerCamelCase__ ( cls , *_lowercase , **_lowercase ) -> List[str]:
requires_backends(cls , ['transformers', 'torch', 'note_seq'] )
@classmethod
def lowerCamelCase__ ( cls , *_lowercase , **_lowercase ) -> Dict:
requires_backends(cls , ['transformers', 'torch', 'note_seq'] )
| 7 | 0 |
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
A: List[str] = 1_6
A: Optional[int] = 3_2
def _UpperCAmelCase ( a : Accelerator , a : int = 1_6 , a : str = "bert-base-cased" ) -> Any:
"""simple docstring"""
lowercase_ : Union[str, Any] = AutoTokenizer.from_pretrained(a_ )
lowercase_ : Optional[int] = load_dataset('glue' , 'mrpc' )
def tokenize_function(a : List[Any] ):
# max_length=None => use the model max length (it's actually the default)
lowercase_ : Tuple = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=a_ , max_length=a_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
lowercase_ : Optional[int] = datasets.map(
a_ , batched=a_ , remove_columns=['idx', 'sentence1', 'sentence2'] , load_from_cache_file=a_ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowercase_ : Any = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(a : Dict ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(a_ , padding='max_length' , max_length=1_2_8 , return_tensors='pt' )
return tokenizer.pad(a_ , padding='longest' , return_tensors='pt' )
# Instantiate dataloaders.
lowercase_ : int = DataLoader(
tokenized_datasets['train'] , shuffle=a_ , collate_fn=a_ , batch_size=a_ )
lowercase_ : int = DataLoader(
tokenized_datasets['validation'] , shuffle=a_ , collate_fn=a_ , batch_size=a_ )
return train_dataloader, eval_dataloader
def _UpperCAmelCase ( a : Optional[Any] , a : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
# Initialize accelerator
lowercase_ : str = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowercase_ : Optional[int] = config['''lr''']
lowercase_ : Optional[int] = int(config['num_epochs'] )
lowercase_ : Dict = int(config['seed'] )
lowercase_ : str = int(config['batch_size'] )
lowercase_ : List[str] = args.model_name_or_path
set_seed(a_ )
lowercase_ : List[Any] = get_dataloaders(a_ , a_ , a_ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowercase_ : List[str] = AutoModelForSequenceClassification.from_pretrained(a_ , return_dict=a_ )
# Instantiate optimizer
lowercase_ : Tuple = (
AdamW
if accelerator.state.deepspeed_plugin is None
or '''optimizer''' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
lowercase_ : Any = optimizer_cls(params=model.parameters() , lr=a_ )
if accelerator.state.deepspeed_plugin is not None:
lowercase_ : int = accelerator.state.deepspeed_plugin.deepspeed_config[
'''gradient_accumulation_steps'''
]
else:
lowercase_ : Dict = 1
lowercase_ : List[Any] = (len(a_ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
lowercase_ : Optional[int] = get_linear_schedule_with_warmup(
optimizer=a_ , num_warmup_steps=0 , num_training_steps=a_ , )
else:
lowercase_ : Optional[int] = DummyScheduler(a_ , total_num_steps=a_ , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowercase_ : Tuple = accelerator.prepare(
a_ , a_ , a_ , a_ , a_ )
# We need to keep track of how many total steps we have iterated over
lowercase_ : Tuple = 0
# We also need to keep track of the stating epoch so files are named properly
lowercase_ : Tuple = 0
# Now we train the model
lowercase_ : Tuple = evaluate.load('glue' , 'mrpc' )
lowercase_ : Optional[Any] = 0
lowercase_ : Tuple = {}
for epoch in range(a_ , a_ ):
model.train()
for step, batch in enumerate(a_ ):
lowercase_ : Any = model(**a_ )
lowercase_ : int = outputs.loss
lowercase_ : List[Any] = loss / gradient_accumulation_steps
accelerator.backward(a_ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
model.eval()
lowercase_ : str = 0
for step, batch in enumerate(a_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowercase_ : int = model(**a_ )
lowercase_ : List[Any] = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
lowercase_ : Optional[Any] = accelerator.gather(
(predictions, batch['labels']) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(a_ ) - 1:
lowercase_ : Any = predictions[: len(eval_dataloader.dataset ) - samples_seen]
lowercase_ : Any = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=a_ , references=a_ , )
lowercase_ : Tuple = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"epoch {epoch}:" , a_ )
lowercase_ : int = eval_metric['''accuracy''']
if best_performance < eval_metric["accuracy"]:
lowercase_ : Dict = eval_metric['''accuracy''']
if args.performance_lower_bound is not None:
assert (
args.performance_lower_bound <= best_performance
), f"Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}"
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , 'all_results.json' ) , 'w' ) as f:
json.dump(a_ , a_ )
def _UpperCAmelCase ( ) -> Union[str, Any]:
"""simple docstring"""
lowercase_ : str = argparse.ArgumentParser(description='Simple example of training script tracking peak GPU memory usage.' )
parser.add_argument(
'--model_name_or_path' , type=a_ , default='bert-base-cased' , help='Path to pretrained model or model identifier from huggingface.co/models.' , required=a_ , )
parser.add_argument(
'--output_dir' , type=a_ , default='.' , help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.' , )
parser.add_argument(
'--performance_lower_bound' , type=a_ , default=a_ , help='Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value.' , )
parser.add_argument(
'--num_epochs' , type=a_ , default=3 , help='Number of train epochs.' , )
lowercase_ : Optional[Any] = parser.parse_args()
lowercase_ : Optional[int] = {'''lr''': 2e-5, '''num_epochs''': args.num_epochs, '''seed''': 4_2, '''batch_size''': 1_6}
training_function(a_ , a_ )
if __name__ == "__main__":
main()
| 712 |
'''simple docstring'''
def _UpperCAmelCase ( a : str , a : str ) -> float:
"""simple docstring"""
def get_matched_characters(a : str , a : str ) -> str:
lowercase_ : Union[str, Any] = []
lowercase_ : Tuple = min(len(_stra ) , len(_stra ) ) // 2
for i, l in enumerate(_stra ):
lowercase_ : Optional[int] = int(max(0 , i - limit ) )
lowercase_ : Union[str, Any] = int(min(i + limit + 1 , len(_stra ) ) )
if l in _stra[left:right]:
matched.append(a )
lowercase_ : Union[str, Any] = f"{_stra[0:_stra.index(a )]} {_stra[_stra.index(a ) + 1:]}"
return "".join(a )
# matching characters
lowercase_ : Union[str, Any] = get_matched_characters(a , a )
lowercase_ : Optional[Any] = get_matched_characters(a , a )
lowercase_ : Optional[int] = len(a )
# transposition
lowercase_ : Dict = (
len([(ca, ca) for ca, ca in zip(a , a ) if ca != ca] ) // 2
)
if not match_count:
lowercase_ : List[str] = 0.0
else:
lowercase_ : Any = (
1
/ 3
* (
match_count / len(a )
+ match_count / len(a )
+ (match_count - transpositions) / match_count
)
)
# common prefix up to 4 characters
lowercase_ : Optional[Any] = 0
for ca, ca in zip(stra[:4] , stra[:4] ):
if ca == ca:
prefix_len += 1
else:
break
return jaro + 0.1 * prefix_len * (1 - jaro)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(jaro_winkler("hello", "world"))
| 7 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.