code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
lowercase_ = logging.get_logger(__name__)
def UpperCAmelCase ( _lowercase : Optional[Any] , _lowercase : Optional[Any]=False ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase_ = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"""blocks.{i}.norm1.weight""", F"""deit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""blocks.{i}.norm1.bias""", F"""deit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append((F"""blocks.{i}.attn.proj.weight""", F"""deit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.attn.proj.bias""", F"""deit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((F"""blocks.{i}.norm2.weight""", F"""deit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""blocks.{i}.norm2.bias""", F"""deit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.weight""", F"""deit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.bias""", F"""deit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.weight""", F"""deit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.bias""", F"""deit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
('''cls_token''', '''deit.embeddings.cls_token'''),
('''dist_token''', '''deit.embeddings.distillation_token'''),
('''patch_embed.proj.weight''', '''deit.embeddings.patch_embeddings.projection.weight'''),
('''patch_embed.proj.bias''', '''deit.embeddings.patch_embeddings.projection.bias'''),
('''pos_embed''', '''deit.embeddings.position_embeddings'''),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('''norm.weight''', '''layernorm.weight'''),
('''norm.bias''', '''layernorm.bias'''),
('''pre_logits.fc.weight''', '''pooler.dense.weight'''),
('''pre_logits.fc.bias''', '''pooler.dense.bias'''),
] )
# if just the base model, we should remove "deit" from all keys that start with "deit"
lowerCAmelCase_ = [(pair[0], pair[1][4:]) if pair[1].startswith('''deit''' ) else pair for pair in rename_keys]
else:
# layernorm + classification heads
rename_keys.extend(
[
('''norm.weight''', '''deit.layernorm.weight'''),
('''norm.bias''', '''deit.layernorm.bias'''),
('''head.weight''', '''cls_classifier.weight'''),
('''head.bias''', '''cls_classifier.bias'''),
('''head_dist.weight''', '''distillation_classifier.weight'''),
('''head_dist.bias''', '''distillation_classifier.bias'''),
] )
return rename_keys
def UpperCAmelCase ( _lowercase : str , _lowercase : str , _lowercase : Union[str, Any]=False ) -> Union[str, Any]:
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
lowerCAmelCase_ = ''''''
else:
lowerCAmelCase_ = '''deit.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCAmelCase_ = state_dict.pop(F"""blocks.{i}.attn.qkv.weight""" )
lowerCAmelCase_ = state_dict.pop(F"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
lowerCAmelCase_ = in_proj_weight[
: config.hidden_size, :
]
lowerCAmelCase_ = in_proj_bias[: config.hidden_size]
lowerCAmelCase_ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCAmelCase_ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCAmelCase_ = in_proj_weight[
-config.hidden_size :, :
]
lowerCAmelCase_ = in_proj_bias[-config.hidden_size :]
def UpperCAmelCase ( _lowercase : str , _lowercase : List[Any] , _lowercase : int ) -> int:
"""simple docstring"""
lowerCAmelCase_ = dct.pop(_lowercase )
lowerCAmelCase_ = val
def UpperCAmelCase ( ) -> int:
"""simple docstring"""
lowerCAmelCase_ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowerCAmelCase_ = Image.open(requests.get(_lowercase , stream=_lowercase ).raw )
return im
@torch.no_grad()
def UpperCAmelCase ( _lowercase : Tuple , _lowercase : Optional[int] ) -> Any:
"""simple docstring"""
lowerCAmelCase_ = DeiTConfig()
# all deit models have fine-tuned heads
lowerCAmelCase_ = False
# dataset (fine-tuned on ImageNet 2012), patch_size and image_size
lowerCAmelCase_ = 1_0_0_0
lowerCAmelCase_ = '''huggingface/label-files'''
lowerCAmelCase_ = '''imagenet-1k-id2label.json'''
lowerCAmelCase_ = json.load(open(hf_hub_download(_lowercase , _lowercase , repo_type='''dataset''' ) , '''r''' ) )
lowerCAmelCase_ = {int(_lowercase ): v for k, v in idalabel.items()}
lowerCAmelCase_ = idalabel
lowerCAmelCase_ = {v: k for k, v in idalabel.items()}
lowerCAmelCase_ = int(deit_name[-6:-4] )
lowerCAmelCase_ = int(deit_name[-3:] )
# size of the architecture
if deit_name[9:].startswith('''tiny''' ):
lowerCAmelCase_ = 1_9_2
lowerCAmelCase_ = 7_6_8
lowerCAmelCase_ = 1_2
lowerCAmelCase_ = 3
elif deit_name[9:].startswith('''small''' ):
lowerCAmelCase_ = 3_8_4
lowerCAmelCase_ = 1_5_3_6
lowerCAmelCase_ = 1_2
lowerCAmelCase_ = 6
if deit_name[9:].startswith('''base''' ):
pass
elif deit_name[4:].startswith('''large''' ):
lowerCAmelCase_ = 1_0_2_4
lowerCAmelCase_ = 4_0_9_6
lowerCAmelCase_ = 2_4
lowerCAmelCase_ = 1_6
# load original model from timm
lowerCAmelCase_ = timm.create_model(_lowercase , pretrained=_lowercase )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
lowerCAmelCase_ = timm_model.state_dict()
lowerCAmelCase_ = create_rename_keys(_lowercase , _lowercase )
for src, dest in rename_keys:
rename_key(_lowercase , _lowercase , _lowercase )
read_in_q_k_v(_lowercase , _lowercase , _lowercase )
# load HuggingFace model
lowerCAmelCase_ = DeiTForImageClassificationWithTeacher(_lowercase ).eval()
model.load_state_dict(_lowercase )
# Check outputs on an image, prepared by DeiTImageProcessor
lowerCAmelCase_ = int(
(2_5_6 / 2_2_4) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103
lowerCAmelCase_ = DeiTImageProcessor(size=_lowercase , crop_size=config.image_size )
lowerCAmelCase_ = image_processor(images=prepare_img() , return_tensors='''pt''' )
lowerCAmelCase_ = encoding['''pixel_values''']
lowerCAmelCase_ = model(_lowercase )
lowerCAmelCase_ = timm_model(_lowercase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(_lowercase , outputs.logits , atol=1E-3 )
Path(_lowercase ).mkdir(exist_ok=_lowercase )
print(F"""Saving model {deit_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(_lowercase )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(_lowercase )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--deit_name',
default='vit_deit_base_distilled_patch16_224',
type=str,
help='Name of the DeiT timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
lowercase_ = parser.parse_args()
convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path) | 552 |
"""simple docstring"""
from collections import Counter
from timeit import timeit
def UpperCAmelCase ( _lowercase : str = "" , ) -> bool:
"""simple docstring"""
return sum(c % 2 for c in Counter(input_str.replace(''' ''' , '''''' ).lower() ).values() ) < 2
def UpperCAmelCase ( _lowercase : str = "" ) -> bool:
"""simple docstring"""
if len(_lowercase ) == 0:
return True
lowerCAmelCase_ = input_str.replace(''' ''' , '''''' ).lower()
# character_freq_dict: Stores the frequency of every character in the input string
lowerCAmelCase_ = {}
for character in lower_case_input_str:
lowerCAmelCase_ = character_freq_dict.get(_lowercase , 0 ) + 1
lowerCAmelCase_ = 0
for character_count in character_freq_dict.values():
if character_count % 2:
odd_char += 1
if odd_char > 1:
return False
return True
def UpperCAmelCase ( _lowercase : str = "" ) -> None:
"""simple docstring"""
print('''\nFor string = ''' , _lowercase , ''':''' )
print(
'''> can_string_be_rearranged_as_palindrome_counter()''' , '''\tans =''' , can_string_be_rearranged_as_palindrome_counter(_lowercase ) , '''\ttime =''' , timeit(
'''z.can_string_be_rearranged_as_palindrome_counter(z.check_str)''' , setup='''import __main__ as z''' , ) , '''seconds''' , )
print(
'''> can_string_be_rearranged_as_palindrome()''' , '''\tans =''' , can_string_be_rearranged_as_palindrome(_lowercase ) , '''\ttime =''' , timeit(
'''z.can_string_be_rearranged_as_palindrome(z.check_str)''' , setup='''import __main__ as z''' , ) , '''seconds''' , )
if __name__ == "__main__":
lowercase_ = input(
'Enter string to determine if it can be rearranged as a palindrome or not: '
).strip()
benchmark(check_str)
lowercase_ = can_string_be_rearranged_as_palindrome_counter(check_str)
print(f"""{check_str} can {'' if status else 'not '}be rearranged as a palindrome""") | 552 | 1 |
from __future__ import annotations
from collections.abc import Callable
from typing import Any, Generic, TypeVar
UpperCAmelCase_ = TypeVar("""T""")
class UpperCamelCase__ ( Generic[T] ):
'''simple docstring'''
def __init__( self, snake_case__, snake_case__ ) -> Optional[int]:
"""simple docstring"""
lowercase_ : Any | T = None
lowercase_ : int = len(a_ )
lowercase_ : list[T] = [any_type for _ in range(self.N )] + arr
lowercase_ : List[str] = fnc
self.build()
def snake_case__ ( self ) -> Union[str, Any]:
"""simple docstring"""
for p in range(self.N - 1, 0, -1 ):
lowercase_ : Optional[int] = self.fn(self.st[p * 2], self.st[p * 2 + 1] )
def snake_case__ ( self, snake_case__, snake_case__ ) -> str:
"""simple docstring"""
p += self.N
lowercase_ : List[str] = v
while p > 1:
lowercase_ : Optional[int] = p // 2
lowercase_ : int = self.fn(self.st[p * 2], self.st[p * 2 + 1] )
def snake_case__ ( self, snake_case__, snake_case__ ) -> List[str]: # noqa: E741
"""simple docstring"""
lowercase_ : Optional[int] = l + self.N, r + self.N
lowercase_ : T | None = None
while l <= r:
if l % 2 == 1:
lowercase_ : int = self.st[l] if res is None else self.fn(a_, self.st[l] )
if r % 2 == 0:
lowercase_ : str = self.st[r] if res is None else self.fn(a_, self.st[r] )
lowercase_ : Optional[int] = (l + 1) // 2, (r - 1) // 2
return res
if __name__ == "__main__":
from functools import reduce
UpperCAmelCase_ = [1, 10, -2, 9, -3, 8, 4, -7, 5, 6, 11, -12]
UpperCAmelCase_ = {
0: 7,
1: 2,
2: 6,
3: -14,
4: 5,
5: 4,
6: 7,
7: -10,
8: 9,
9: 10,
10: 12,
11: 1,
}
UpperCAmelCase_ = SegmentTree(test_array, min)
UpperCAmelCase_ = SegmentTree(test_array, max)
UpperCAmelCase_ = SegmentTree(test_array, lambda a, b: a + b)
def __magic_name__ ( ) -> Optional[Any]:
"""simple docstring"""
for i in range(len(snake_case__ ) ):
for j in range(snake_case__ , len(snake_case__ ) ):
lowercase_ : List[Any] = reduce(snake_case__ , test_array[i : j + 1] )
lowercase_ : Dict = reduce(snake_case__ , test_array[i : j + 1] )
lowercase_ : Optional[Any] = reduce(lambda lowercase , lowercase : a + b , test_array[i : j + 1] )
assert min_range == min_segment_tree.query(snake_case__ , snake_case__ )
assert max_range == max_segment_tree.query(snake_case__ , snake_case__ )
assert sum_range == sum_segment_tree.query(snake_case__ , snake_case__ )
test_all_segments()
for index, value in test_updates.items():
UpperCAmelCase_ = value
min_segment_tree.update(index, value)
max_segment_tree.update(index, value)
sum_segment_tree.update(index, value)
test_all_segments() | 706 |
import os
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_doctest_list.py
UpperCAmelCase_ = """."""
if __name__ == "__main__":
UpperCAmelCase_ = os.path.join(REPO_PATH, """utils/documentation_tests.txt""")
UpperCAmelCase_ = []
UpperCAmelCase_ = []
with open(doctest_file_path) as fp:
for line in fp:
UpperCAmelCase_ = line.strip()
UpperCAmelCase_ = os.path.join(REPO_PATH, line)
if not (os.path.isfile(path) or os.path.isdir(path)):
non_existent_paths.append(line)
all_paths.append(path)
if len(non_existent_paths) > 0:
UpperCAmelCase_ = """\n""".join(non_existent_paths)
raise ValueError(F'''`utils/documentation_tests.txt` contains non-existent paths:\n{non_existent_paths}''')
if all_paths != sorted(all_paths):
raise ValueError("""Files in `utils/documentation_tests.txt` are not in alphabetical order.""") | 436 | 0 |
"""simple docstring"""
import functools
import gc
import inspect
import torch
from .imports import is_npu_available, is_xpu_available
def __UpperCamelCase ( *SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
if not isinstance(__UpperCamelCase , __UpperCamelCase ):
__snake_case = list(__UpperCamelCase )
for i in range(len(__UpperCamelCase ) ):
__snake_case = None
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
return objects
def __UpperCamelCase ( SCREAMING_SNAKE_CASE ) -> bool:
"""simple docstring"""
__snake_case = [
"CUDA out of memory.", # CUDA OOM
"cuDNN error: CUDNN_STATUS_NOT_SUPPORTED.", # CUDNN SNAFU
"DefaultCPUAllocator: can\'t allocate memory", # CPU OOM
]
if isinstance(__UpperCamelCase , __UpperCamelCase ) and len(exception.args ) == 1:
return any(err in exception.args[0] for err in _statements )
return False
def __UpperCamelCase ( SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = 1_28 ) -> List[str]:
"""simple docstring"""
if function is None:
return functools.partial(__UpperCamelCase , starting_batch_size=__UpperCamelCase )
__snake_case = starting_batch_size
def decorator(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ):
nonlocal batch_size
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
__snake_case = list(inspect.signature(__UpperCamelCase ).parameters.keys() )
# Guard against user error
if len(__UpperCamelCase ) < (len(__UpperCamelCase ) + 1):
__snake_case = ", ".join([F'''{arg}={value}''' for arg, value in zip(params[1:] , args[1:] )] )
raise TypeError(
F'''Batch size was passed into `{function.__name__}` as the first argument when called.'''
F'''Remove this as the decorator already does so: `{function.__name__}({arg_str})`''' )
while True:
if batch_size == 0:
raise RuntimeError("No executable batch size found, reached zero." )
try:
return function(__UpperCamelCase , *__UpperCamelCase , **__UpperCamelCase )
except Exception as e:
if should_reduce_batch_size(__UpperCamelCase ):
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
batch_size //= 2
else:
raise
return decorator
| 163 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
SCREAMING_SNAKE_CASE__ = {'''configuration_mra''': ['''MRA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MraConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
'''MRA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MraForMaskedLM''',
'''MraForMultipleChoice''',
'''MraForQuestionAnswering''',
'''MraForSequenceClassification''',
'''MraForTokenClassification''',
'''MraLayer''',
'''MraModel''',
'''MraPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 9 | 0 |
import random
import unittest
import numpy as np
import torch
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionUpscalePipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class lowerCamelCase ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
# TODO: is there an appropriate internal test set?
UpperCAmelCase : List[Any] = 'ssube/stable-diffusion-x4-upscaler-onnx'
def snake_case_ ( self : Any , __snake_case : Any=0 ) -> Optional[Any]:
_a : Optional[Any] = floats_tensor((1, 3, 128, 128) , rng=random.Random(__snake_case ) )
_a : Optional[int] = torch.manual_seed(__snake_case )
_a : Tuple = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 3,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def snake_case_ ( self : str ) -> Dict:
_a : int = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
pipe.set_progress_bar_config(disable=__snake_case )
_a : Optional[int] = self.get_dummy_inputs()
_a : Union[str, Any] = pipe(**__snake_case ).images
_a : Optional[Any] = image[0, -3:, -3:, -1].flatten()
# started as 128, should now be 512
assert image.shape == (1, 512, 512, 3)
_a : Optional[Any] = np.array(
[0.6_974_782, 0.68_902_093, 0.70_135_885, 0.7_583_618, 0.7_804_545, 0.7_854_912, 0.78_667_426, 0.78_743_863, 0.78_070_223] )
assert np.abs(image_slice - expected_slice ).max() < 1E-1
def snake_case_ ( self : List[Any] ) -> List[Any]:
_a : Optional[int] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
_a : List[str] = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
_a : int = self.get_dummy_inputs()
_a : List[str] = pipe(**__snake_case ).images
_a : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
_a : str = np.array(
[0.6_898_892, 0.59_240_556, 0.52_499_527, 0.58_866_215, 0.52_258_235, 0.52_572_715, 0.62_414_473, 0.6_174_387, 0.6_214_964] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def snake_case_ ( self : Dict ) -> List[Any]:
_a : List[str] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
_a : Dict = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__snake_case )
_a : Dict = self.get_dummy_inputs()
_a : Dict = pipe(**__snake_case ).images
_a : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
_a : int = np.array(
[0.7_659_278, 0.76_437_664, 0.75_579_107, 0.7_691_116, 0.77_666_986, 0.7_727_672, 0.7_758_664, 0.7_812_226, 0.76_942_515] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def snake_case_ ( self : str ) -> Any:
_a : Dict = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
_a : Tuple = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__snake_case )
_a : int = self.get_dummy_inputs()
_a : List[Any] = pipe(**__snake_case ).images
_a : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
_a : Tuple = np.array(
[0.6_974_782, 0.68_902_093, 0.70_135_885, 0.7_583_618, 0.7_804_545, 0.7_854_912, 0.78_667_426, 0.78_743_863, 0.78_070_223] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def snake_case_ ( self : Dict ) -> Tuple:
_a : Tuple = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
_a : Optional[int] = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__snake_case )
_a : Tuple = self.get_dummy_inputs()
_a : Dict = pipe(**__snake_case ).images
_a : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
_a : Union[str, Any] = np.array(
[0.77_424_496, 0.773_601, 0.7_645_288, 0.7_769_598, 0.7_772_739, 0.7_738_688, 0.78_187_233, 0.77_879_584, 0.767_043] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
@nightly
@require_onnxruntime
@require_torch_gpu
class lowerCamelCase ( unittest.TestCase ):
@property
def snake_case_ ( self : Optional[int] ) -> List[str]:
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def snake_case_ ( self : int ) -> List[Any]:
_a : int = ort.SessionOptions()
_a : Tuple = False
return options
def snake_case_ ( self : Tuple ) -> str:
_a : List[str] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
_a : Any = init_image.resize((128, 128) )
# using the PNDM scheduler by default
_a : Any = OnnxStableDiffusionUpscalePipeline.from_pretrained(
'''ssube/stable-diffusion-x4-upscaler-onnx''' , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__snake_case )
_a : Tuple = '''A fantasy landscape, trending on artstation'''
_a : Tuple = torch.manual_seed(0 )
_a : str = pipe(
prompt=__snake_case , image=__snake_case , guidance_scale=7.5 , num_inference_steps=10 , generator=__snake_case , output_type='''np''' , )
_a : Tuple = output.images
_a : List[str] = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 512, 3)
_a : Any = np.array([0.4_883, 0.4_947, 0.4_980, 0.4_975, 0.4_982, 0.4_980, 0.5_000, 0.5_006, 0.4_972] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
def snake_case_ ( self : Union[str, Any] ) -> str:
_a : Any = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
_a : List[str] = init_image.resize((128, 128) )
_a : List[Any] = LMSDiscreteScheduler.from_pretrained(
'''ssube/stable-diffusion-x4-upscaler-onnx''' , subfolder='''scheduler''' )
_a : Any = OnnxStableDiffusionUpscalePipeline.from_pretrained(
'''ssube/stable-diffusion-x4-upscaler-onnx''' , scheduler=__snake_case , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__snake_case )
_a : Dict = '''A fantasy landscape, trending on artstation'''
_a : int = torch.manual_seed(0 )
_a : Union[str, Any] = pipe(
prompt=__snake_case , image=__snake_case , guidance_scale=7.5 , num_inference_steps=20 , generator=__snake_case , output_type='''np''' , )
_a : Optional[Any] = output.images
_a : Dict = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 512, 3)
_a : Tuple = np.array(
[0.50_173_753, 0.50_223_356, 0.502_039, 0.50_233_036, 0.5_023_725, 0.5_022_601, 0.5_018_758, 0.50_234_085, 0.50_241_566] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
| 249 |
import requests
from bsa import BeautifulSoup
def lowerCamelCase_ ( UpperCamelCase_ = "AAPL" ):
_a : List[str] = f"""https://in.finance.yahoo.com/quote/{symbol}?s={symbol}"""
_a : Any = BeautifulSoup(requests.get(UpperCamelCase_ ).text , '''html.parser''' )
_a : Optional[int] = '''My(6px) Pos(r) smartphone_Mt(6px)'''
return soup.find('''div''' , class_=class_ ).find('''span''' ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(f'''Current {symbol:<4} stock price is {stock_price(symbol):>8}''')
| 249 | 1 |
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
__magic_name__ = logging.getLogger(__name__)
@dataclass(frozen=__UpperCamelCase )
class _SCREAMING_SNAKE_CASE :
_A : str
_A : str
_A : Optional[str] = None
_A : Optional[str] = None
_A : Optional[str] = None
@dataclass(frozen=__UpperCamelCase )
class _SCREAMING_SNAKE_CASE :
_A : List[int]
_A : Optional[List[int]] = None
_A : Optional[List[int]] = None
_A : Optional[Union[int, float]] = None
_A : Optional[int] = None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class _SCREAMING_SNAKE_CASE ( __UpperCamelCase ):
_A : List[InputFeatures]
def __init__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , lowerCamelCase=False , lowerCamelCase = False , ):
snake_case__ = hans_processors[task]()
snake_case__ = os.path.join(
lowerCamelCase , "cached_{}_{}_{}_{}".format(
"dev" if evaluate else "train" , tokenizer.__class__.__name__ , str(lowerCamelCase ) , lowerCamelCase , ) , )
snake_case__ = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
snake_case__ , snake_case__ = label_list[2], label_list[1]
snake_case__ = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
snake_case__ = cached_features_file + ".lock"
with FileLock(lowerCamelCase ):
if os.path.exists(lowerCamelCase ) and not overwrite_cache:
logger.info(F"""Loading features from cached file {cached_features_file}""" )
snake_case__ = torch.load(lowerCamelCase )
else:
logger.info(F"""Creating features from dataset file at {data_dir}""" )
snake_case__ = (
processor.get_dev_examples(lowerCamelCase ) if evaluate else processor.get_train_examples(lowerCamelCase )
)
logger.info("Training examples: %s" , len(lowerCamelCase ) )
snake_case__ = hans_convert_examples_to_features(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
logger.info("Saving features into cached file %s" , lowerCamelCase )
torch.save(self.features , lowerCamelCase )
def __len__( self ):
return len(self.features )
def __getitem__( self , lowerCamelCase ):
return self.features[i]
def A_ ( self ):
return self.label_list
if is_tf_available():
import tensorflow as tf
class _SCREAMING_SNAKE_CASE :
_A : List[InputFeatures]
def __init__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = 1_28 , lowerCamelCase=False , lowerCamelCase = False , ):
snake_case__ = hans_processors[task]()
snake_case__ = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
snake_case__ , snake_case__ = label_list[2], label_list[1]
snake_case__ = label_list
snake_case__ = processor.get_dev_examples(lowerCamelCase ) if evaluate else processor.get_train_examples(lowerCamelCase )
snake_case__ = hans_convert_examples_to_features(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
def gen():
for ex_index, ex in tqdm.tqdm(enumerate(self.features ) , desc="convert examples to features" ):
if ex_index % 1_00_00 == 0:
logger.info("Writing example %d of %d" % (ex_index, len(lowerCamelCase )) )
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
snake_case__ = tf.data.Dataset.from_generator(
lowerCamelCase , (
{
"example_id": tf.intaa,
"input_ids": tf.intaa,
"attention_mask": tf.intaa,
"token_type_ids": tf.intaa,
},
tf.intaa,
) , (
{
"example_id": tf.TensorShape([] ),
"input_ids": tf.TensorShape([None, None] ),
"attention_mask": tf.TensorShape([None, None] ),
"token_type_ids": tf.TensorShape([None, None] ),
},
tf.TensorShape([] ),
) , )
def A_ ( self ):
return self.dataset
def __len__( self ):
return len(self.features )
def __getitem__( self , lowerCamelCase ):
return self.features[i]
def A_ ( self ):
return self.label_list
class _SCREAMING_SNAKE_CASE ( __UpperCamelCase ):
def A_ ( self , lowerCamelCase ):
return self._create_examples(self._read_tsv(os.path.join(lowerCamelCase , "heuristics_train_set.txt" ) ) , "train" )
def A_ ( self , lowerCamelCase ):
return self._create_examples(self._read_tsv(os.path.join(lowerCamelCase , "heuristics_evaluation_set.txt" ) ) , "dev" )
def A_ ( self ):
return ["contradiction", "entailment", "neutral"]
def A_ ( self , lowerCamelCase , lowerCamelCase ):
snake_case__ = []
for i, line in enumerate(lowerCamelCase ):
if i == 0:
continue
snake_case__ = "%s-%s" % (set_type, line[0])
snake_case__ = line[5]
snake_case__ = line[6]
snake_case__ = line[7][2:] if line[7].startswith("ex" ) else line[7]
snake_case__ = line[0]
examples.append(InputExample(guid=lowerCamelCase , text_a=lowerCamelCase , text_b=lowerCamelCase , label=lowerCamelCase , pairID=lowerCamelCase ) )
return examples
def SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , ):
snake_case__ = {label: i for i, label in enumerate(__lowerCAmelCase )}
snake_case__ = []
for ex_index, example in tqdm.tqdm(enumerate(__lowerCAmelCase ) , desc="convert examples to features" ):
if ex_index % 10_000 == 0:
logger.info("Writing example %d" % (ex_index) )
snake_case__ = tokenizer(
example.text_a , example.text_b , add_special_tokens=__lowerCAmelCase , max_length=__lowerCAmelCase , padding="max_length" , truncation=__lowerCAmelCase , return_overflowing_tokens=__lowerCAmelCase , )
snake_case__ = label_map[example.label] if example.label in label_map else 0
snake_case__ = int(example.pairID )
features.append(InputFeatures(**__lowerCAmelCase , label=__lowerCAmelCase , pairID=__lowerCAmelCase ) )
for i, example in enumerate(examples[:5] ):
logger.info("*** Example ***" )
logger.info(F"""guid: {example}""" )
logger.info(F"""features: {features[i]}""" )
return features
__magic_name__ = {
'''hans''': 3,
}
__magic_name__ = {
'''hans''': HansProcessor,
}
| 276 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import is_speech_available, is_vision_available
from transformers.testing_utils import require_torch
if is_vision_available():
from transformers import TvltImageProcessor
if is_speech_available():
from transformers import TvltFeatureExtractor
from transformers import TvltProcessor
@require_torch
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def A_ ( self ):
snake_case__ = "ZinengTang/tvlt-base"
snake_case__ = tempfile.mkdtemp()
def A_ ( self , **lowerCamelCase ):
return TvltImageProcessor.from_pretrained(self.checkpoint , **lowerCamelCase )
def A_ ( self , **lowerCamelCase ):
return TvltFeatureExtractor.from_pretrained(self.checkpoint , **lowerCamelCase )
def A_ ( self ):
shutil.rmtree(self.tmpdirname )
def A_ ( self ):
snake_case__ = self.get_image_processor()
snake_case__ = self.get_feature_extractor()
snake_case__ = TvltProcessor(image_processor=lowerCamelCase , feature_extractor=lowerCamelCase )
processor.save_pretrained(self.tmpdirname )
snake_case__ = TvltProcessor.from_pretrained(self.tmpdirname )
self.assertIsInstance(processor.feature_extractor , lowerCamelCase )
self.assertIsInstance(processor.image_processor , lowerCamelCase )
def A_ ( self ):
snake_case__ = self.get_image_processor()
snake_case__ = self.get_feature_extractor()
snake_case__ = TvltProcessor(image_processor=lowerCamelCase , feature_extractor=lowerCamelCase )
snake_case__ = np.ones([1_20_00] )
snake_case__ = feature_extractor(lowerCamelCase , return_tensors="np" )
snake_case__ = processor(audio=lowerCamelCase , return_tensors="np" )
for key in audio_dict.keys():
self.assertAlmostEqual(audio_dict[key].sum() , input_processor[key].sum() , delta=1e-2 )
def A_ ( self ):
snake_case__ = self.get_image_processor()
snake_case__ = self.get_feature_extractor()
snake_case__ = TvltProcessor(image_processor=lowerCamelCase , feature_extractor=lowerCamelCase )
snake_case__ = np.ones([3, 2_24, 2_24] )
snake_case__ = image_processor(lowerCamelCase , return_tensors="np" )
snake_case__ = processor(images=lowerCamelCase , return_tensors="np" )
for key in image_dict.keys():
self.assertAlmostEqual(image_dict[key].sum() , input_processor[key].sum() , delta=1e-2 )
def A_ ( self ):
snake_case__ = self.get_image_processor()
snake_case__ = self.get_feature_extractor()
snake_case__ = TvltProcessor(image_processor=lowerCamelCase , feature_extractor=lowerCamelCase )
snake_case__ = np.ones([1_20_00] )
snake_case__ = np.ones([3, 2_24, 2_24] )
snake_case__ = processor(audio=lowerCamelCase , images=lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , ["audio_values", "audio_mask", "pixel_values", "pixel_mask"] )
# test if it raises when no input is passed
with pytest.raises(lowerCamelCase ):
processor()
def A_ ( self ):
snake_case__ = self.get_image_processor()
snake_case__ = self.get_feature_extractor()
snake_case__ = TvltProcessor(image_processor=lowerCamelCase , feature_extractor=lowerCamelCase )
self.assertListEqual(
processor.model_input_names , image_processor.model_input_names + feature_extractor.model_input_names , msg="`processor` and `image_processor`+`feature_extractor` model input names do not match" , )
| 276 | 1 |
import os
import unittest
from transformers import BatchEncoding
from transformers.models.bert.tokenization_bert import (
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.models.prophetnet.tokenization_prophetnet import VOCAB_FILES_NAMES, ProphetNetTokenizer
from transformers.testing_utils import require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
class a__ ( __snake_case , unittest.TestCase ):
A__ : Optional[int] = ProphetNetTokenizer
A__ : List[str] = False
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
super().setUp()
__a = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
__a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase ) -> Optional[int]:
__a = 'UNwant\u00E9d,running'
__a = 'unwanted, running'
return input_text, output_text
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
__a = self.tokenizer_class(self.vocab_file )
__a = tokenizer.tokenize('UNwant\u00E9d,running' )
self.assertListEqual(UpperCAmelCase , ['un', '##want', '##ed', ',', 'runn', '##ing'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase ) , [9, 6, 7, 1_2, 1_0, 1_1] )
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
__a = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize('ah\u535A\u63A8zz' ) , ['ah', '\u535A', '\u63A8', 'zz'] )
def __SCREAMING_SNAKE_CASE ( self ) -> int:
__a = BasicTokenizer(do_lower_case=UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['hello', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
__a = BasicTokenizer(do_lower_case=UpperCAmelCase , strip_accents=UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hällo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['h\u00E9llo'] )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
__a = BasicTokenizer(do_lower_case=UpperCAmelCase , strip_accents=UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def __SCREAMING_SNAKE_CASE ( self ) -> int:
__a = BasicTokenizer(do_lower_case=UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
__a = BasicTokenizer(do_lower_case=UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?'] )
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
__a = BasicTokenizer(do_lower_case=UpperCAmelCase , strip_accents=UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HäLLo', '!', 'how', 'Are', 'yoU', '?'] )
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
__a = BasicTokenizer(do_lower_case=UpperCAmelCase , strip_accents=UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HaLLo', '!', 'how', 'Are', 'yoU', '?'] )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
__a = BasicTokenizer(do_lower_case=UpperCAmelCase , never_split=['[UNK]'] )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? [UNK]' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?', '[UNK]'] )
def __SCREAMING_SNAKE_CASE ( self ) -> int:
__a = ['[UNK]', '[CLS]', '[SEP]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing']
__a = {}
for i, token in enumerate(UpperCAmelCase ):
__a = i
__a = WordpieceTokenizer(vocab=UpperCAmelCase , unk_token='[UNK]' )
self.assertListEqual(tokenizer.tokenize('' ) , [] )
self.assertListEqual(tokenizer.tokenize('unwanted running' ) , ['un', '##want', '##ed', 'runn', '##ing'] )
self.assertListEqual(tokenizer.tokenize('unwantedX running' ) , ['[UNK]', 'runn', '##ing'] )
@require_torch
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
__a = self.tokenizer_class.from_pretrained('microsoft/prophetnet-large-uncased' )
__a = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
__a = [1_0_3_7, 2_1_4_6, 2_0_4_2_3, 2_0_0_5, 7_6_8_0, 7_8_4_9, 3_9_8_9, 1_0_1_2, 1_0_2]
__a = tokenizer(UpperCAmelCase , padding=UpperCAmelCase , return_tensors='pt' )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
__a = list(batch.input_ids.numpy()[0] )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
self.assertTrue(_is_whitespace(' ' ) )
self.assertTrue(_is_whitespace('\t' ) )
self.assertTrue(_is_whitespace('\r' ) )
self.assertTrue(_is_whitespace('\n' ) )
self.assertTrue(_is_whitespace('\u00A0' ) )
self.assertFalse(_is_whitespace('A' ) )
self.assertFalse(_is_whitespace('-' ) )
def __SCREAMING_SNAKE_CASE ( self ) -> int:
self.assertTrue(_is_control('\u0005' ) )
self.assertFalse(_is_control('A' ) )
self.assertFalse(_is_control(' ' ) )
self.assertFalse(_is_control('\t' ) )
self.assertFalse(_is_control('\r' ) )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
self.assertTrue(_is_punctuation('-' ) )
self.assertTrue(_is_punctuation('$' ) )
self.assertTrue(_is_punctuation('`' ) )
self.assertTrue(_is_punctuation('.' ) )
self.assertFalse(_is_punctuation('A' ) )
self.assertFalse(_is_punctuation(' ' ) )
@slow
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
__a = self.tokenizer_class.from_pretrained('microsoft/prophetnet-large-uncased' )
__a = tokenizer.encode('sequence builders' , add_special_tokens=UpperCAmelCase )
__a = tokenizer.encode('multi-sequence build' , add_special_tokens=UpperCAmelCase )
__a = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase )
__a = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase , UpperCAmelCase )
assert encoded_sentence == text + [1_0_2]
assert encoded_pair == text + [1_0_2] + text_a + [1_0_2]
| 706 | import argparse
import os
import transformers
from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS
from .utils import logging
logging.set_verbosity_info()
lowerCamelCase_ : Dict = logging.get_logger(__name__)
lowerCamelCase_ : Optional[int] = {name: getattr(transformers, name + """Fast""") for name in SLOW_TO_FAST_CONVERTERS}
def lowerCAmelCase( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES:
raise ValueError(f'''Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys() )}.''' )
if tokenizer_name is None:
__a = TOKENIZER_CLASSES
else:
__a = {tokenizer_name: getattr(__lowerCamelCase , tokenizer_name + 'Fast' )}
logger.info(f'''Loading tokenizer classes: {tokenizer_names}''' )
for tokenizer_name in tokenizer_names:
__a = TOKENIZER_CLASSES[tokenizer_name]
__a = True
if checkpoint_name is None:
__a = list(tokenizer_class.max_model_input_sizes.keys() )
else:
__a = [checkpoint_name]
logger.info(f'''For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}''' )
for checkpoint in checkpoint_names:
logger.info(f'''Loading {tokenizer_class.__class__.__name__} {checkpoint}''' )
# Load tokenizer
__a = tokenizer_class.from_pretrained(__lowerCamelCase , force_download=__lowerCamelCase )
# Save fast tokenizer
logger.info(f'''Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}''' )
# For organization names we create sub-directories
if "/" in checkpoint:
__a , __a = checkpoint.split('/' )
__a = os.path.join(__lowerCamelCase , __lowerCamelCase )
elif add_prefix:
__a = checkpoint
__a = dump_path
else:
__a = None
__a = dump_path
logger.info(f'''=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}''' )
if checkpoint in list(tokenizer.pretrained_vocab_files_map.values() )[0]:
__a = list(tokenizer.pretrained_vocab_files_map.values() )[0][checkpoint]
__a = file_path.split(__lowerCamelCase )[-1][0]
if next_char == "/":
__a = os.path.join(__lowerCamelCase , __lowerCamelCase )
__a = None
logger.info(f'''=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}''' )
__a = tokenizer.save_pretrained(
__lowerCamelCase , legacy_format=__lowerCamelCase , filename_prefix=__lowerCamelCase )
logger.info(f'''=> File names {file_names}''' )
for file_name in file_names:
if not file_name.endswith('tokenizer.json' ):
os.remove(__lowerCamelCase )
logger.info(f'''=> removing {file_name}''' )
if __name__ == "__main__":
lowerCamelCase_ : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--dump_path""", default=None, type=str, required=True, help="""Path to output generated fast tokenizer files."""
)
parser.add_argument(
"""--tokenizer_name""",
default=None,
type=str,
help=(
F'''Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will '''
"""download and convert all the checkpoints from AWS."""
),
)
parser.add_argument(
"""--checkpoint_name""",
default=None,
type=str,
help="""Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.""",
)
parser.add_argument(
"""--force_download""",
action="""store_true""",
help="""Re-download checkpoints.""",
)
lowerCamelCase_ : List[str] = parser.parse_args()
convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download)
| 246 | 0 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SwiftFormerConfig,
SwiftFormerForImageClassification,
ViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = torch.device('''cpu''')
def snake_case ( ):
UpperCAmelCase_ : str = "http://images.cocodataset.org/val2017/000000039769.jpg"
UpperCAmelCase_ : str = Image.open(requests.get(A__ ,stream=A__ ).raw )
return im
def snake_case ( A__ ):
if swiftformer_name == "swiftformer_xs":
return torch.tensor([-2.1703e00, 2.1107e00, -2.0811e00, 8.8685e-01, 2.4360e-01] )
elif swiftformer_name == "swiftformer_s":
return torch.tensor([3.9636e-01, 2.3478e-01, -1.6963e00, -1.7381e00, -8.6337e-01] )
elif swiftformer_name == "swiftformer_l1":
return torch.tensor([-4.2768e-01, -4.7429e-01, -1.0897e00, -1.0248e00, 3.5523e-02] )
elif swiftformer_name == "swiftformer_l3":
return torch.tensor([-2.5330e-01, 2.4211e-01, -6.0185e-01, -8.2789e-01, -6.0446e-02] )
def snake_case ( A__ ,A__ ,A__ ):
UpperCAmelCase_ : Tuple = dct.pop(A__ )
UpperCAmelCase_ : Optional[Any] = val
def snake_case ( A__ ):
UpperCAmelCase_ : List[str] = []
for k in state_dict.keys():
UpperCAmelCase_ : Union[str, Any] = k
if ".pwconv" in k:
UpperCAmelCase_ : Dict = k_new.replace(".pwconv" ,".point_wise_conv" )
if ".dwconv" in k:
UpperCAmelCase_ : Any = k_new.replace(".dwconv" ,".depth_wise_conv" )
if ".Proj." in k:
UpperCAmelCase_ : Dict = k_new.replace(".Proj." ,".proj." )
if "patch_embed" in k_new:
UpperCAmelCase_ : Tuple = k_new.replace("patch_embed" ,"swiftformer.patch_embed.patch_embedding" )
if "network" in k_new:
UpperCAmelCase_ : List[Any] = k_new.split("." )
if ls[2].isdigit():
UpperCAmelCase_ : Tuple = "swiftformer.encoder.network." + ls[1] + ".blocks." + ls[2] + "." + ".".join(ls[3:] )
else:
UpperCAmelCase_ : Optional[Any] = k_new.replace("network" ,"swiftformer.encoder.network" )
rename_keys.append((k, k_new) )
return rename_keys
@torch.no_grad()
def snake_case ( A__ ,A__ ,A__ ):
UpperCAmelCase_ : Optional[int] = SwiftFormerConfig()
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
UpperCAmelCase_ : Optional[Any] = 10_00
UpperCAmelCase_ : str = "huggingface/label-files"
UpperCAmelCase_ : str = "imagenet-1k-id2label.json"
UpperCAmelCase_ : List[str] = json.load(open(hf_hub_download(A__ ,A__ ,repo_type="dataset" ) ,"r" ) )
UpperCAmelCase_ : Tuple = {int(A__ ): v for k, v in idalabel.items()}
UpperCAmelCase_ : List[Any] = idalabel
UpperCAmelCase_ : Optional[Any] = {v: k for k, v in idalabel.items()}
# size of the architecture
if swiftformer_name == "swiftformer_xs":
UpperCAmelCase_ : Tuple = [3, 3, 6, 4]
UpperCAmelCase_ : str = [48, 56, 1_12, 2_20]
elif swiftformer_name == "swiftformer_s":
UpperCAmelCase_ : Optional[Any] = [3, 3, 9, 6]
UpperCAmelCase_ : Optional[Any] = [48, 64, 1_68, 2_24]
elif swiftformer_name == "swiftformer_l1":
UpperCAmelCase_ : int = [4, 3, 10, 5]
UpperCAmelCase_ : Union[str, Any] = [48, 96, 1_92, 3_84]
elif swiftformer_name == "swiftformer_l3":
UpperCAmelCase_ : Dict = [4, 4, 12, 6]
UpperCAmelCase_ : Optional[int] = [64, 1_28, 3_20, 5_12]
# load state_dict of original model, remove and rename some keys
if original_ckpt:
if original_ckpt.startswith("https" ):
UpperCAmelCase_ : List[Any] = torch.hub.load_state_dict_from_url(A__ ,map_location="cpu" ,check_hash=A__ )
else:
UpperCAmelCase_ : Any = torch.load(A__ ,map_location="cpu" )
UpperCAmelCase_ : List[str] = checkpoint
UpperCAmelCase_ : Dict = create_rename_keys(A__ )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(A__ ,A__ ,A__ )
# load HuggingFace model
UpperCAmelCase_ : Optional[int] = SwiftFormerForImageClassification(A__ ).eval()
hf_model.load_state_dict(A__ )
# prepare test inputs
UpperCAmelCase_ : Tuple = prepare_img()
UpperCAmelCase_ : int = ViTImageProcessor.from_pretrained("preprocessor_config" )
UpperCAmelCase_ : int = processor(images=A__ ,return_tensors="pt" )
# compare outputs from both models
UpperCAmelCase_ : List[Any] = get_expected_output(A__ )
UpperCAmelCase_ : int = hf_model(inputs["pixel_values"] ).logits
assert hf_logits.shape == torch.Size([1, 10_00] )
assert torch.allclose(hf_logits[0, 0:5] ,A__ ,atol=1e-3 )
Path(A__ ).mkdir(exist_ok=A__ )
print(F"""Saving model {swiftformer_name} to {pytorch_dump_folder_path}""" )
hf_model.save_pretrained(A__ )
if __name__ == "__main__":
lowerCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--swiftformer_name''',
default='''swiftformer_xs''',
choices=['''swiftformer_xs''', '''swiftformer_s''', '''swiftformer_l1''', '''swiftformer_l3'''],
type=str,
help='''Name of the SwiftFormer model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''./converted_outputs/''',
type=str,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument('''--original_ckpt''', default=None, type=str, help='''Path to the original model checkpoint.''')
lowerCamelCase_ = parser.parse_args()
convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
| 95 |
import argparse
import glob
import importlib.util
import os
import re
import black
from doc_builder.style_doc import style_docstrings_in_code
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
_lowercase = '''src/diffusers'''
_lowercase = '''.'''
# This is to make sure the diffusers module imported is the one in the repo.
_lowercase = importlib.util.spec_from_file_location(
'''diffusers''',
os.path.join(DIFFUSERS_PATH, '''__init__.py'''),
submodule_search_locations=[DIFFUSERS_PATH],
)
_lowercase = spec.loader.load_module()
def UpperCamelCase ( snake_case__ , snake_case__):
return line.startswith(snake_case__) or len(snake_case__) <= 1 or re.search(R"^\s*\)(\s*->.*:|:)\s*$" , snake_case__) is not None
def UpperCamelCase ( snake_case__):
lowerCAmelCase_ : Tuple = object_name.split(".")
lowerCAmelCase_ : Union[str, Any] = 0
# First let's find the module where our object lives.
lowerCAmelCase_ : Union[str, Any] = parts[i]
while i < len(snake_case__) and not os.path.isfile(os.path.join(snake_case__ , F'''{module}.py''')):
i += 1
if i < len(snake_case__):
lowerCAmelCase_ : Dict = os.path.join(snake_case__ , parts[i])
if i >= len(snake_case__):
raise ValueError(F'''`object_name` should begin with the name of a module of diffusers but got {object_name}.''')
with open(os.path.join(snake_case__ , F'''{module}.py''') , "r" , encoding="utf-8" , newline="\n") as f:
lowerCAmelCase_ : Optional[Any] = f.readlines()
# Now let's find the class / func in the code!
lowerCAmelCase_ : Union[str, Any] = ""
lowerCAmelCase_ : int = 0
for name in parts[i + 1 :]:
while (
line_index < len(snake_case__) and re.search(RF'''^{indent}(class|def)\s+{name}(\(|\:)''' , lines[line_index]) is None
):
line_index += 1
indent += " "
line_index += 1
if line_index >= len(snake_case__):
raise ValueError(F''' {object_name} does not match any function or class in {module}.''')
# We found the beginning of the class / func, now let's find the end (when the indent diminishes).
lowerCAmelCase_ : Union[str, Any] = line_index
while line_index < len(snake_case__) and _should_continue(lines[line_index] , snake_case__):
line_index += 1
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1]) <= 1:
line_index -= 1
lowerCAmelCase_ : List[str] = lines[start_index:line_index]
return "".join(snake_case__)
_lowercase = re.compile(r'''^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)''')
_lowercase = re.compile(r'''^\s*(\S+)->(\S+)(\s+.*|$)''')
_lowercase = re.compile(r'''<FILL\s+[^>]*>''')
def UpperCamelCase ( snake_case__):
lowerCAmelCase_ : Any = code.split("\n")
lowerCAmelCase_ : Any = 0
while idx < len(snake_case__) and len(lines[idx]) == 0:
idx += 1
if idx < len(snake_case__):
return re.search(R"^(\s*)\S" , lines[idx]).groups()[0]
return ""
def UpperCamelCase ( snake_case__):
lowerCAmelCase_ : Dict = len(get_indent(snake_case__)) > 0
if has_indent:
lowerCAmelCase_ : Dict = F'''class Bla:\n{code}'''
lowerCAmelCase_ : Optional[int] = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_19 , preview=snake_case__)
lowerCAmelCase_ : Optional[Any] = black.format_str(snake_case__ , mode=snake_case__)
lowerCAmelCase_ , lowerCAmelCase_ : List[Any] = style_docstrings_in_code(snake_case__)
return result[len("class Bla:\n") :] if has_indent else result
def UpperCamelCase ( snake_case__ , snake_case__=False):
with open(snake_case__ , "r" , encoding="utf-8" , newline="\n") as f:
lowerCAmelCase_ : Tuple = f.readlines()
lowerCAmelCase_ : Tuple = []
lowerCAmelCase_ : Union[str, Any] = 0
# Not a for loop cause `lines` is going to change (if `overwrite=True`).
while line_index < len(snake_case__):
lowerCAmelCase_ : Optional[int] = _re_copy_warning.search(lines[line_index])
if search is None:
line_index += 1
continue
# There is some copied code here, let's retrieve the original.
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : str = search.groups()
lowerCAmelCase_ : int = find_code_in_diffusers(snake_case__)
lowerCAmelCase_ : Dict = get_indent(snake_case__)
lowerCAmelCase_ : Union[str, Any] = line_index + 1 if indent == theoretical_indent else line_index + 2
lowerCAmelCase_ : str = theoretical_indent
lowerCAmelCase_ : Union[str, Any] = start_index
# Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment.
lowerCAmelCase_ : Optional[int] = True
while line_index < len(snake_case__) and should_continue:
line_index += 1
if line_index >= len(snake_case__):
break
lowerCAmelCase_ : Dict = lines[line_index]
lowerCAmelCase_ : List[str] = _should_continue(snake_case__ , snake_case__) and re.search(F'''^{indent}# End copy''' , snake_case__) is None
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1]) <= 1:
line_index -= 1
lowerCAmelCase_ : Dict = lines[start_index:line_index]
lowerCAmelCase_ : Optional[int] = "".join(snake_case__)
# Remove any nested `Copied from` comments to avoid circular copies
lowerCAmelCase_ : List[Any] = [line for line in theoretical_code.split("\n") if _re_copy_warning.search(snake_case__) is None]
lowerCAmelCase_ : Optional[Any] = "\n".join(snake_case__)
# Before comparing, use the `replace_pattern` on the original code.
if len(snake_case__) > 0:
lowerCAmelCase_ : List[str] = replace_pattern.replace("with" , "").split(",")
lowerCAmelCase_ : Tuple = [_re_replace_pattern.search(snake_case__) for p in patterns]
for pattern in patterns:
if pattern is None:
continue
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : List[str] = pattern.groups()
lowerCAmelCase_ : int = re.sub(snake_case__ , snake_case__ , snake_case__)
if option.strip() == "all-casing":
lowerCAmelCase_ : List[str] = re.sub(obja.lower() , obja.lower() , snake_case__)
lowerCAmelCase_ : int = re.sub(obja.upper() , obja.upper() , snake_case__)
# Blackify after replacement. To be able to do that, we need the header (class or function definition)
# from the previous line
lowerCAmelCase_ : List[Any] = blackify(lines[start_index - 1] + theoretical_code)
lowerCAmelCase_ : Union[str, Any] = theoretical_code[len(lines[start_index - 1]) :]
# Test for a diff and act accordingly.
if observed_code != theoretical_code:
diffs.append([object_name, start_index])
if overwrite:
lowerCAmelCase_ : List[Any] = lines[:start_index] + [theoretical_code] + lines[line_index:]
lowerCAmelCase_ : Union[str, Any] = start_index + 1
if overwrite and len(snake_case__) > 0:
# Warn the user a file has been modified.
print(F'''Detected changes, rewriting {filename}.''')
with open(snake_case__ , "w" , encoding="utf-8" , newline="\n") as f:
f.writelines(snake_case__)
return diffs
def UpperCamelCase ( snake_case__ = False):
lowerCAmelCase_ : Tuple = glob.glob(os.path.join(snake_case__ , "**/*.py") , recursive=snake_case__)
lowerCAmelCase_ : int = []
for filename in all_files:
lowerCAmelCase_ : Union[str, Any] = is_copy_consistent(snake_case__ , snake_case__)
diffs += [F'''- {filename}: copy does not match {d[0]} at line {d[1]}''' for d in new_diffs]
if not overwrite and len(snake_case__) > 0:
lowerCAmelCase_ : Optional[Any] = "\n".join(snake_case__)
raise Exception(
"Found the following copy inconsistencies:\n"
+ diff
+ "\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them.")
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
_lowercase = parser.parse_args()
check_copies(args.fix_and_overwrite)
| 659 | 0 |
import argparse
import glob
import logging
import os
import sys
import time
from collections import defaultdict
from pathlib import Path
from typing import Dict, List, Tuple
import numpy as np
import pytorch_lightning as pl
import torch
from callbacks import SeqaSeqLoggingCallback, get_checkpoint_callback, get_early_stopping_callback
from torch import nn
from torch.utils.data import DataLoader
from transformers import MBartTokenizer, TaForConditionalGeneration
from transformers.models.bart.modeling_bart import shift_tokens_right
from utils import (
ROUGE_KEYS,
LegacySeqaSeqDataset,
SeqaSeqDataset,
assert_all_frozen,
calculate_bleu,
calculate_rouge,
check_output_dir,
flatten_list,
freeze_embeds,
freeze_params,
get_git_info,
label_smoothed_nll_loss,
lmap,
pickle_save,
save_git_info,
save_json,
use_task_specific_params,
)
# need the parent dir module
sys.path.insert(2, str(Path(__file__).resolve().parents[1]))
from lightning_base import BaseTransformer, add_generic_args, generic_train # noqa
snake_case__ : Dict = logging.getLogger(__name__)
class SCREAMING_SNAKE_CASE_ (a__ ):
'''simple docstring'''
_a = "summarization"
_a = ["loss"]
_a = ROUGE_KEYS
_a = "rouge2"
def __init__( self : Any , __a : Dict , **__a : Dict ) ->str:
if hparams.sortish_sampler and hparams.gpus > 1:
lowerCamelCase_ : str = False
elif hparams.max_tokens_per_batch is not None:
if hparams.gpus > 1:
raise NotImplementedError("""Dynamic Batch size does not work for multi-gpu training""" )
if hparams.sortish_sampler:
raise ValueError("""--sortish_sampler and --max_tokens_per_batch may not be used simultaneously""" )
super().__init__(__a , num_labels=__a , mode=self.mode , **__a )
use_task_specific_params(self.model , """summarization""" )
save_git_info(self.hparams.output_dir )
lowerCamelCase_ : List[str] = Path(self.output_dir ) / """metrics.json"""
lowerCamelCase_ : Optional[int] = Path(self.output_dir ) / """hparams.pkl"""
pickle_save(self.hparams , self.hparams_save_path )
lowerCamelCase_ : Optional[Any] = 0
lowerCamelCase_ : Optional[int] = defaultdict(__a )
lowerCamelCase_ : Any = self.config.model_type
lowerCamelCase_ : Dict = self.config.tgt_vocab_size if self.model_type == """fsmt""" else self.config.vocab_size
lowerCamelCase_ : dict = {
"data_dir": self.hparams.data_dir,
"max_source_length": self.hparams.max_source_length,
"prefix": self.model.config.prefix or "",
}
lowerCamelCase_ : Optional[Any] = {
"""train""": self.hparams.n_train,
"""val""": self.hparams.n_val,
"""test""": self.hparams.n_test,
}
lowerCamelCase_ : str = {k: v if v >= 0 else None for k, v in n_observations_per_split.items()}
lowerCamelCase_ : Tuple = {
"""train""": self.hparams.max_target_length,
"""val""": self.hparams.val_max_target_length,
"""test""": self.hparams.test_max_target_length,
}
assert self.target_lens["train"] <= self.target_lens["val"], F'''target_lens: {self.target_lens}'''
assert self.target_lens["train"] <= self.target_lens["test"], F'''target_lens: {self.target_lens}'''
if self.hparams.freeze_embeds:
freeze_embeds(self.model )
if self.hparams.freeze_encoder:
freeze_params(self.model.get_encoder() )
assert_all_frozen(self.model.get_encoder() )
lowerCamelCase_ : Any = get_git_info()["""repo_sha"""]
lowerCamelCase_ : Tuple = hparams.num_workers
lowerCamelCase_ : int = None # default to config
if self.model.config.decoder_start_token_id is None and isinstance(self.tokenizer , __a ):
lowerCamelCase_ : List[Any] = self.tokenizer.lang_code_to_id[hparams.tgt_lang]
lowerCamelCase_ : Dict = self.decoder_start_token_id
lowerCamelCase_ : Tuple = (
SeqaSeqDataset if hasattr(self.tokenizer , """prepare_seq2seq_batch""" ) else LegacySeqaSeqDataset
)
lowerCamelCase_ : str = False
lowerCamelCase_ : int = self.model.config.num_beams if self.hparams.eval_beams is None else self.hparams.eval_beams
if self.hparams.eval_max_gen_length is not None:
lowerCamelCase_ : Optional[Any] = self.hparams.eval_max_gen_length
else:
lowerCamelCase_ : Dict = self.model.config.max_length
lowerCamelCase_ : str = self.default_val_metric if self.hparams.val_metric is None else self.hparams.val_metric
def _lowerCAmelCase ( self : List[Any] , __a : Dict[str, torch.Tensor] ) ->Dict[str, List[str]]:
lowerCamelCase_ : Optional[int] = {
k: self.tokenizer.batch_decode(v.tolist() ) if """mask""" not in k else v.shape for k, v in batch.items()
}
save_json(__a , Path(self.output_dir ) / """text_batch.json""" )
save_json({k: v.tolist() for k, v in batch.items()} , Path(self.output_dir ) / """tok_batch.json""" )
lowerCamelCase_ : List[Any] = True
return readable_batch
def _lowerCAmelCase ( self : Optional[int] , __a : Any , **__a : str ) ->List[str]:
return self.model(__a , **__a )
def _lowerCAmelCase ( self : Optional[Any] , __a : List[int] ) ->Dict:
lowerCamelCase_ : Tuple = self.tokenizer.batch_decode(
__a , skip_special_tokens=__a , clean_up_tokenization_spaces=__a )
return lmap(str.strip , __a )
def _lowerCAmelCase ( self : Dict , __a : dict ) ->Tuple:
lowerCamelCase_ : Optional[Any] = self.tokenizer.pad_token_id
lowerCamelCase_ : List[Any] = batch["""input_ids"""], batch["""attention_mask"""]
lowerCamelCase_ : Any = batch["""labels"""]
if isinstance(self.model , __a ):
lowerCamelCase_ : int = self.model._shift_right(__a )
else:
lowerCamelCase_ : Optional[int] = shift_tokens_right(__a , __a )
if not self.already_saved_batch: # This would be slightly better if it only happened on rank zero
lowerCamelCase_ : Dict = decoder_input_ids
self.save_readable_batch(__a )
lowerCamelCase_ : Optional[int] = self(__a , attention_mask=__a , decoder_input_ids=__a , use_cache=__a )
lowerCamelCase_ : str = outputs["""logits"""]
if self.hparams.label_smoothing == 0:
# Same behavior as modeling_bart.py, besides ignoring pad_token_id
lowerCamelCase_ : Optional[int] = nn.CrossEntropyLoss(ignore_index=__a )
assert lm_logits.shape[-1] == self.vocab_size
lowerCamelCase_ : Dict = ce_loss_fct(lm_logits.view(-1 , lm_logits.shape[-1] ) , tgt_ids.view(-1 ) )
else:
lowerCamelCase_ : Tuple = nn.functional.log_softmax(__a , dim=-1 )
lowerCamelCase_ : Any = label_smoothed_nll_loss(
__a , __a , self.hparams.label_smoothing , ignore_index=__a )
return (loss,)
@property
def _lowerCAmelCase ( self : List[Any] ) ->int:
return self.tokenizer.pad_token_id
def _lowerCAmelCase ( self : Dict , __a : List[str] , __a : Optional[Any] ) ->Dict:
lowerCamelCase_ : int = self._step(__a )
lowerCamelCase_ : str = dict(zip(self.loss_names , __a ) )
# tokens per batch
lowerCamelCase_ : Optional[Any] = batch["""input_ids"""].ne(self.pad ).sum() + batch["""labels"""].ne(self.pad ).sum()
lowerCamelCase_ : List[str] = batch["""input_ids"""].shape[0]
lowerCamelCase_ : Dict = batch["""input_ids"""].eq(self.pad ).sum()
lowerCamelCase_ : Union[str, Any] = batch["""input_ids"""].eq(self.pad ).float().mean()
# TODO(SS): make a wandb summary metric for this
return {"loss": loss_tensors[0], "log": logs}
def _lowerCAmelCase ( self : Union[str, Any] , __a : Tuple , __a : Tuple ) ->Dict:
return self._generative_step(__a )
def _lowerCAmelCase ( self : Dict , __a : int , __a : List[Any]="val" ) ->Dict:
self.step_count += 1
lowerCamelCase_ : Dict = {k: torch.stack([x[k] for x in outputs] ).mean() for k in self.loss_names}
lowerCamelCase_ : Dict = losses["""loss"""]
lowerCamelCase_ : List[str] = {
k: np.array([x[k] for x in outputs] ).mean() for k in self.metric_names + ["""gen_time""", """gen_len"""]
}
lowerCamelCase_ : Optional[int] = (
generative_metrics[self.val_metric] if self.val_metric in generative_metrics else losses[self.val_metric]
)
lowerCamelCase_ : torch.FloatTensor = torch.tensor(__a ).type_as(__a )
generative_metrics.update({k: v.item() for k, v in losses.items()} )
losses.update(__a )
lowerCamelCase_ : Tuple = {F'''{prefix}_avg_{k}''': x for k, x in losses.items()}
lowerCamelCase_ : Tuple = self.step_count
self.metrics[prefix].append(__a ) # callback writes this to self.metrics_save_path
lowerCamelCase_ : Optional[Any] = flatten_list([x["""preds"""] for x in outputs] )
return {
"log": all_metrics,
"preds": preds,
F'''{prefix}_loss''': loss,
F'''{prefix}_{self.val_metric}''': metric_tensor,
}
def _lowerCAmelCase ( self : str , __a : Any , __a : Dict ) ->Dict:
return calculate_rouge(__a , __a )
def _lowerCAmelCase ( self : str , __a : dict ) ->dict:
lowerCamelCase_ : Union[str, Any] = time.time()
# parser.add_argument('--eval_max_gen_length', type=int, default=None, help='never generate more than n tokens')
lowerCamelCase_ : Union[str, Any] = self.model.generate(
batch["""input_ids"""] , attention_mask=batch["""attention_mask"""] , use_cache=__a , decoder_start_token_id=self.decoder_start_token_id , num_beams=self.eval_beams , max_length=self.eval_max_length , )
lowerCamelCase_ : Tuple = (time.time() - ta) / batch["""input_ids"""].shape[0]
lowerCamelCase_ : List[str] = self.ids_to_clean_text(__a )
lowerCamelCase_ : List[str] = self.ids_to_clean_text(batch["""labels"""] )
lowerCamelCase_ : List[str] = self._step(__a )
lowerCamelCase_ : Dict = dict(zip(self.loss_names , __a ) )
lowerCamelCase_ : Dict = self.calc_generative_metrics(__a , __a )
lowerCamelCase_ : List[Any] = np.mean(lmap(__a , __a ) )
base_metrics.update(gen_time=__a , gen_len=__a , preds=__a , target=__a , **__a )
return base_metrics
def _lowerCAmelCase ( self : Optional[int] , __a : List[Any] , __a : Optional[Any] ) ->List[Any]:
return self._generative_step(__a )
def _lowerCAmelCase ( self : List[Any] , __a : List[Any] ) ->List[Any]:
return self.validation_epoch_end(__a , prefix="""test""" )
def _lowerCAmelCase ( self : Optional[int] , __a : int ) ->SeqaSeqDataset:
lowerCamelCase_ : List[str] = self.n_obs[type_path]
lowerCamelCase_ : Tuple = self.target_lens[type_path]
lowerCamelCase_ : Tuple = self.dataset_class(
self.tokenizer , type_path=__a , n_obs=__a , max_target_length=__a , **self.dataset_kwargs , )
return dataset
def _lowerCAmelCase ( self : Any , __a : str , __a : int , __a : bool = False ) ->DataLoader:
lowerCamelCase_ : List[Any] = self.get_dataset(__a )
if self.hparams.sortish_sampler and type_path != "test" and type_path != "val":
lowerCamelCase_ : str = dataset.make_sortish_sampler(__a , distributed=self.hparams.gpus > 1 )
return DataLoader(
__a , batch_size=__a , collate_fn=dataset.collate_fn , shuffle=__a , num_workers=self.num_workers , sampler=__a , )
elif self.hparams.max_tokens_per_batch is not None and type_path != "test" and type_path != "val":
lowerCamelCase_ : Optional[int] = dataset.make_dynamic_sampler(
self.hparams.max_tokens_per_batch , distributed=self.hparams.gpus > 1 )
return DataLoader(
__a , batch_sampler=__a , collate_fn=dataset.collate_fn , num_workers=self.num_workers , )
else:
return DataLoader(
__a , batch_size=__a , collate_fn=dataset.collate_fn , shuffle=__a , num_workers=self.num_workers , sampler=__a , )
def _lowerCAmelCase ( self : Optional[Any] ) ->DataLoader:
lowerCamelCase_ : Optional[int] = self.get_dataloader("""train""" , batch_size=self.hparams.train_batch_size , shuffle=__a )
return dataloader
def _lowerCAmelCase ( self : Union[str, Any] ) ->DataLoader:
return self.get_dataloader("""val""" , batch_size=self.hparams.eval_batch_size )
def _lowerCAmelCase ( self : int ) ->DataLoader:
return self.get_dataloader("""test""" , batch_size=self.hparams.eval_batch_size )
@staticmethod
def _lowerCAmelCase ( __a : Dict , __a : List[Any] ) ->Any:
BaseTransformer.add_model_specific_args(__a , __a )
add_generic_args(__a , __a )
parser.add_argument(
"""--max_source_length""" , default=1_024 , type=__a , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--max_target_length""" , default=56 , type=__a , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--val_max_target_length""" , default=142 , type=__a , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--test_max_target_length""" , default=142 , type=__a , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument("""--freeze_encoder""" , action="""store_true""" )
parser.add_argument("""--freeze_embeds""" , action="""store_true""" )
parser.add_argument("""--sortish_sampler""" , action="""store_true""" , default=__a )
parser.add_argument("""--overwrite_output_dir""" , action="""store_true""" , default=__a )
parser.add_argument("""--max_tokens_per_batch""" , type=__a , default=__a )
parser.add_argument("""--logger_name""" , type=__a , choices=["""default""", """wandb""", """wandb_shared"""] , default="""default""" )
parser.add_argument("""--n_train""" , type=__a , default=-1 , required=__a , help="""# examples. -1 means use all.""" )
parser.add_argument("""--n_val""" , type=__a , default=500 , required=__a , help="""# examples. -1 means use all.""" )
parser.add_argument("""--n_test""" , type=__a , default=-1 , required=__a , help="""# examples. -1 means use all.""" )
parser.add_argument(
"""--task""" , type=__a , default="""summarization""" , required=__a , help="""# examples. -1 means use all.""" )
parser.add_argument("""--label_smoothing""" , type=__a , default=0.0 , required=__a )
parser.add_argument("""--src_lang""" , type=__a , default="""""" , required=__a )
parser.add_argument("""--tgt_lang""" , type=__a , default="""""" , required=__a )
parser.add_argument("""--eval_beams""" , type=__a , default=__a , required=__a )
parser.add_argument(
"""--val_metric""" , type=__a , default=__a , required=__a , choices=["""bleu""", """rouge2""", """loss""", None] )
parser.add_argument("""--eval_max_gen_length""" , type=__a , default=__a , help="""never generate more than n tokens""" )
parser.add_argument("""--save_top_k""" , type=__a , default=1 , required=__a , help="""How many checkpoints to save""" )
parser.add_argument(
"""--early_stopping_patience""" , type=__a , default=-1 , required=__a , help=(
"""-1 means never early stop. early_stopping_patience is measured in validation checks, not epochs. So"""
""" val_check_interval will effect it."""
) , )
return parser
class SCREAMING_SNAKE_CASE_ (a__ ):
'''simple docstring'''
_a = "translation"
_a = ["loss"]
_a = ["bleu"]
_a = "bleu"
def __init__( self : str , __a : str , **__a : int ) ->Optional[int]:
super().__init__(__a , **__a )
lowerCamelCase_ : Dict = hparams.src_lang
lowerCamelCase_ : Any = hparams.tgt_lang
def _lowerCAmelCase ( self : Tuple , __a : int , __a : Optional[Any] ) ->dict:
return calculate_bleu(__a , __a )
def __lowerCamelCase ( A__ : str , A__ : int=None ) -> SummarizationModule:
Path(args.output_dir ).mkdir(exist_ok=A__ )
check_output_dir(A__ , expected_items=3 )
if model is None:
if "summarization" in args.task:
lowerCamelCase_ : SummarizationModule = SummarizationModule(A__ )
else:
lowerCamelCase_ : SummarizationModule = TranslationModule(A__ )
lowerCamelCase_ : List[str] = Path(args.data_dir ).name
if (
args.logger_name == "default"
or args.fast_dev_run
or str(args.output_dir ).startswith("""/tmp""" )
or str(args.output_dir ).startswith("""/var""" )
):
lowerCamelCase_ : Tuple = True # don't pollute wandb logs unnecessarily
elif args.logger_name == "wandb":
from pytorch_lightning.loggers import WandbLogger
lowerCamelCase_ : List[str] = os.environ.get("""WANDB_PROJECT""" , A__ )
lowerCamelCase_ : List[Any] = WandbLogger(name=model.output_dir.name , project=A__ )
elif args.logger_name == "wandb_shared":
from pytorch_lightning.loggers import WandbLogger
lowerCamelCase_ : Union[str, Any] = WandbLogger(name=model.output_dir.name , project=f'''hf_{dataset}''' )
if args.early_stopping_patience >= 0:
lowerCamelCase_ : List[str] = get_early_stopping_callback(model.val_metric , args.early_stopping_patience )
else:
lowerCamelCase_ : Optional[Any] = False
lowerCamelCase_ : str = args.val_metric == """loss"""
lowerCamelCase_ : pl.Trainer = generic_train(
A__ , A__ , logging_callback=SeqaSeqLoggingCallback() , checkpoint_callback=get_checkpoint_callback(
args.output_dir , model.val_metric , args.save_top_k , A__ ) , early_stopping_callback=A__ , logger=A__ , )
pickle_save(model.hparams , model.output_dir / """hparams.pkl""" )
if not args.do_predict:
return model
lowerCamelCase_ : Optional[int] = """"""
lowerCamelCase_ : str = sorted(glob.glob(os.path.join(args.output_dir , """*.ckpt""" ) , recursive=A__ ) )
if checkpoints:
lowerCamelCase_ : Optional[int] = checkpoints[-1]
lowerCamelCase_ : Dict = checkpoints[-1]
trainer.logger.log_hyperparams(model.hparams )
# test() without a model tests using the best checkpoint automatically
trainer.test()
return model
if __name__ == "__main__":
snake_case__ : List[Any] = argparse.ArgumentParser()
snake_case__ : Any = pl.Trainer.add_argparse_args(parser)
snake_case__ : Any = SummarizationModule.add_model_specific_args(parser, os.getcwd())
snake_case__ : str = parser.parse_args()
main(args)
| 706 |
import fire
from utils import calculate_rouge, save_json
def __lowerCamelCase ( A__ : Union[str, Any] , A__ : Optional[int] , A__ : Dict=None , **A__ : Dict ) -> str:
lowerCamelCase_ : Union[str, Any] = [x.strip() for x in open(A__ ).readlines()]
lowerCamelCase_ : Union[str, Any] = [x.strip() for x in open(A__ ).readlines()][: len(A__ )]
lowerCamelCase_ : int = calculate_rouge(A__ , A__ , **A__ )
if save_path is not None:
save_json(A__ , A__ , indent=A__ )
return metrics # these print nicely
if __name__ == "__main__":
fire.Fire(calculate_rouge_path)
| 171 | 0 |
from typing import Optional, Tuple, Union
import tensorflow as tf
from ...activations_tf import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_tf_outputs import (
TFBaseModelOutputWithNoAttention,
TFBaseModelOutputWithPoolingAndNoAttention,
TFSequenceClassifierOutput,
)
from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs
from ...tf_utils import shape_list
from ...utils import logging
from .configuration_regnet import RegNetConfig
_UpperCAmelCase = logging.get_logger(__name__)
# General docstring
_UpperCAmelCase = """RegNetConfig"""
# Base docstring
_UpperCAmelCase = """facebook/regnet-y-040"""
_UpperCAmelCase = [1, 1088, 7, 7]
# Image classification docstring
_UpperCAmelCase = """facebook/regnet-y-040"""
_UpperCAmelCase = """tabby, tabby cat"""
_UpperCAmelCase = [
"""facebook/regnet-y-040""",
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class UpperCAmelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self , lowercase , lowercase = 3 , lowercase = 1 , lowercase = 1 , lowercase = "relu" , **lowercase , ):
"""simple docstring"""
super().__init__(**lowercase )
# The padding and conv has been verified in
# https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb
A_ : List[str] = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 )
A_ : Tuple = tf.keras.layers.ConvaD(
filters=lowercase , kernel_size=lowercase , strides=lowercase , padding='VALID' , groups=lowercase , use_bias=lowercase , name='convolution' , )
A_ : Dict = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name='normalization' )
A_ : Optional[int] = ACTaFN[activation] if activation is not None else tf.identity
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
A_ : int = self.convolution(self.padding(lowercase ) )
A_ : Optional[Any] = self.normalization(lowercase )
A_ : Union[str, Any] = self.activation(lowercase )
return hidden_state
class UpperCAmelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self , lowercase , **lowercase ):
"""simple docstring"""
super().__init__(**lowercase )
A_ : Dict = config.num_channels
A_ : Dict = TFRegNetConvLayer(
out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name='embedder' , )
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
A_ : Union[str, Any] = shape_list(lowercase )[1]
if tf.executing_eagerly() and num_channels != self.num_channels:
raise ValueError(
'Make sure that the channel dimension of the pixel values match with the one set in the configuration.' )
# When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format.
# So change the input format from `NCHW` to `NHWC`.
# shape = (batch_size, in_height, in_width, in_channels=num_channels)
A_ : Optional[Any] = tf.transpose(lowercase , perm=(0, 2, 3, 1) )
A_ : Dict = self.embedder(lowercase )
return hidden_state
class UpperCAmelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self , lowercase , lowercase = 2 , **lowercase ):
"""simple docstring"""
super().__init__(**lowercase )
A_ : Optional[int] = tf.keras.layers.ConvaD(
filters=lowercase , kernel_size=1 , strides=lowercase , use_bias=lowercase , name='convolution' )
A_ : Tuple = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name='normalization' )
def lowerCAmelCase_ ( self , lowercase , lowercase = False ):
"""simple docstring"""
return self.normalization(self.convolution(lowercase ) , training=lowercase )
class UpperCAmelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self , lowercase , lowercase , **lowercase ):
"""simple docstring"""
super().__init__(**lowercase )
A_ : List[Any] = tf.keras.layers.GlobalAveragePoolingaD(keepdims=lowercase , name='pooler' )
A_ : Any = [
tf.keras.layers.ConvaD(filters=lowercase , kernel_size=1 , activation='relu' , name='attention.0' ),
tf.keras.layers.ConvaD(filters=lowercase , kernel_size=1 , activation='sigmoid' , name='attention.2' ),
]
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
A_ : str = self.pooler(lowercase )
for layer_module in self.attention:
A_ : List[str] = layer_module(lowercase )
A_ : str = hidden_state * pooled
return hidden_state
class UpperCAmelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self , lowercase , lowercase , lowercase , lowercase = 1 , **lowercase ):
"""simple docstring"""
super().__init__(**lowercase )
A_ : Union[str, Any] = in_channels != out_channels or stride != 1
A_ : Optional[Any] = max(1 , out_channels // config.groups_width )
A_ : List[Any] = (
TFRegNetShortCut(lowercase , stride=lowercase , name='shortcut' )
if should_apply_shortcut
else tf.keras.layers.Activation('linear' , name='shortcut' )
)
# `self.layers` instead of `self.layer` because that is a reserved argument.
A_ : Optional[Any] = [
TFRegNetConvLayer(lowercase , kernel_size=1 , activation=config.hidden_act , name='layer.0' ),
TFRegNetConvLayer(
lowercase , stride=lowercase , groups=lowercase , activation=config.hidden_act , name='layer.1' ),
TFRegNetConvLayer(lowercase , kernel_size=1 , activation=lowercase , name='layer.2' ),
]
A_ : int = ACTaFN[config.hidden_act]
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
A_ : Optional[Any] = hidden_state
for layer_module in self.layers:
A_ : Union[str, Any] = layer_module(lowercase )
A_ : int = self.shortcut(lowercase )
hidden_state += residual
A_ : Optional[int] = self.activation(lowercase )
return hidden_state
class UpperCAmelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self , lowercase , lowercase , lowercase , lowercase = 1 , **lowercase ):
"""simple docstring"""
super().__init__(**lowercase )
A_ : Any = in_channels != out_channels or stride != 1
A_ : Union[str, Any] = max(1 , out_channels // config.groups_width )
A_ : str = (
TFRegNetShortCut(lowercase , stride=lowercase , name='shortcut' )
if should_apply_shortcut
else tf.keras.layers.Activation('linear' , name='shortcut' )
)
A_ : Optional[int] = [
TFRegNetConvLayer(lowercase , kernel_size=1 , activation=config.hidden_act , name='layer.0' ),
TFRegNetConvLayer(
lowercase , stride=lowercase , groups=lowercase , activation=config.hidden_act , name='layer.1' ),
TFRegNetSELayer(lowercase , reduced_channels=int(round(in_channels / 4 ) ) , name='layer.2' ),
TFRegNetConvLayer(lowercase , kernel_size=1 , activation=lowercase , name='layer.3' ),
]
A_ : str = ACTaFN[config.hidden_act]
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
A_ : Any = hidden_state
for layer_module in self.layers:
A_ : Optional[Any] = layer_module(lowercase )
A_ : Optional[Any] = self.shortcut(lowercase )
hidden_state += residual
A_ : Tuple = self.activation(lowercase )
return hidden_state
class UpperCAmelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self , lowercase , lowercase , lowercase , lowercase = 2 , lowercase = 2 , **lowercase ):
"""simple docstring"""
super().__init__(**lowercase )
A_ : List[Any] = TFRegNetXLayer if config.layer_type == 'x' else TFRegNetYLayer
A_ : int = [
# downsampling is done in the first layer with stride of 2
layer(lowercase , lowercase , lowercase , stride=lowercase , name='layers.0' ),
*[layer(lowercase , lowercase , lowercase , name=F'''layers.{i+1}''' ) for i in range(depth - 1 )],
]
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
for layer_module in self.layers:
A_ : List[str] = layer_module(lowercase )
return hidden_state
class UpperCAmelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self , lowercase , **lowercase ):
"""simple docstring"""
super().__init__(**lowercase )
A_ : Any = []
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
TFRegNetStage(
lowercase , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name='stages.0' , ) )
A_ : int = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for i, ((in_channels, out_channels), depth) in enumerate(zip(lowercase , config.depths[1:] ) ):
self.stages.append(TFRegNetStage(lowercase , lowercase , lowercase , depth=lowercase , name=F'''stages.{i+1}''' ) )
def lowerCAmelCase_ ( self , lowercase , lowercase = False , lowercase = True ):
"""simple docstring"""
A_ : Optional[Any] = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
A_ : Optional[Any] = hidden_states + (hidden_state,)
A_ : List[Any] = stage_module(lowercase )
if output_hidden_states:
A_ : str = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return TFBaseModelOutputWithNoAttention(last_hidden_state=lowercase , hidden_states=lowercase )
@keras_serializable
class UpperCAmelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
lowerCamelCase_ = RegNetConfig
def __init__( self , lowercase , **lowercase ):
"""simple docstring"""
super().__init__(**lowercase )
A_ : List[Any] = config
A_ : List[str] = TFRegNetEmbeddings(lowercase , name='embedder' )
A_ : Dict = TFRegNetEncoder(lowercase , name='encoder' )
A_ : Any = tf.keras.layers.GlobalAveragePoolingaD(keepdims=lowercase , name='pooler' )
@unpack_inputs
def lowerCAmelCase_ ( self , lowercase , lowercase = None , lowercase = None , lowercase = False , ):
"""simple docstring"""
A_ : Dict = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
A_ : Any = return_dict if return_dict is not None else self.config.use_return_dict
A_ : int = self.embedder(lowercase , training=lowercase )
A_ : int = self.encoder(
lowercase , output_hidden_states=lowercase , return_dict=lowercase , training=lowercase )
A_ : List[Any] = encoder_outputs[0]
A_ : Any = self.pooler(lowercase )
# Change to NCHW output format have uniformity in the modules
A_ : Optional[int] = tf.transpose(lowercase , perm=(0, 3, 1, 2) )
A_ : List[str] = tf.transpose(lowercase , perm=(0, 3, 1, 2) )
# Change the other hidden state outputs to NCHW as well
if output_hidden_states:
A_ : Dict = tuple([tf.transpose(lowercase , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=lowercase , pooler_output=lowercase , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , )
class UpperCAmelCase ( __A ):
'''simple docstring'''
lowerCamelCase_ = RegNetConfig
lowerCamelCase_ = '''regnet'''
lowerCamelCase_ = '''pixel_values'''
@property
def lowerCAmelCase_ ( self ):
"""simple docstring"""
return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 2_2_4, 2_2_4) , dtype=tf.floataa )}
_UpperCAmelCase = r"""
Parameters:
This model is a Tensorflow
[tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a
regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and
behavior.
config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.
"""
_UpperCAmelCase = r"""
Args:
pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConveNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
'''The bare RegNet model outputting raw features without any specific head on top.''' , __A , )
class UpperCAmelCase ( __A ):
'''simple docstring'''
def __init__( self , lowercase , *lowercase , **lowercase ):
"""simple docstring"""
super().__init__(lowercase , *lowercase , **lowercase )
A_ : Union[str, Any] = TFRegNetMainLayer(lowercase , name='regnet' )
@unpack_inputs
@add_start_docstrings_to_model_forward(lowercase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowercase , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def lowerCAmelCase_ ( self , lowercase , lowercase = None , lowercase = None , lowercase=False , ):
"""simple docstring"""
A_ : Union[str, Any] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
A_ : str = return_dict if return_dict is not None else self.config.use_return_dict
A_ : int = self.regnet(
pixel_values=lowercase , output_hidden_states=lowercase , return_dict=lowercase , training=lowercase , )
if not return_dict:
return (outputs[0],) + outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , )
@add_start_docstrings(
'''
RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
''' , __A , )
class UpperCAmelCase ( __A , __A ):
'''simple docstring'''
def __init__( self , lowercase , *lowercase , **lowercase ):
"""simple docstring"""
super().__init__(lowercase , *lowercase , **lowercase )
A_ : str = config.num_labels
A_ : Optional[Any] = TFRegNetMainLayer(lowercase , name='regnet' )
# classification head
A_ : int = [
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(config.num_labels , name='classifier.1' ) if config.num_labels > 0 else tf.identity,
]
@unpack_inputs
@add_start_docstrings_to_model_forward(lowercase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowercase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def lowerCAmelCase_ ( self , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase=False , ):
"""simple docstring"""
A_ : Any = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
A_ : Union[str, Any] = return_dict if return_dict is not None else self.config.use_return_dict
A_ : Dict = self.regnet(
lowercase , output_hidden_states=lowercase , return_dict=lowercase , training=lowercase )
A_ : Any = outputs.pooler_output if return_dict else outputs[1]
A_ : Union[str, Any] = self.classifier[0](lowercase )
A_ : Dict = self.classifier[1](lowercase )
A_ : Dict = None if labels is None else self.hf_compute_loss(labels=lowercase , logits=lowercase )
if not return_dict:
A_ : List[str] = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(loss=lowercase , logits=lowercase , hidden_states=outputs.hidden_states )
| 558 | import argparse
import csv
import logging
import os
import random
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from tqdm import tqdm, trange
from transformers import (
CONFIG_NAME,
WEIGHTS_NAME,
AdamW,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTTokenizer,
get_linear_schedule_with_warmup,
)
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO
)
_UpperCAmelCase = logging.getLogger(__name__)
def UpperCamelCase ( __lowercase : Any ,__lowercase : List[str] ):
'''simple docstring'''
A_ : str = np.argmax(__lowercase ,axis=1 )
return np.sum(outputs == labels )
def UpperCamelCase ( __lowercase : Optional[int] ):
'''simple docstring'''
with open(__lowercase ,encoding='utf_8' ) as f:
A_ : Union[str, Any] = csv.reader(__lowercase )
A_ : Optional[Any] = []
next(__lowercase ) # skip the first line
for line in tqdm(__lowercase ):
output.append((' '.join(line[1:5] ), line[5], line[6], int(line[-1] ) - 1) )
return output
def UpperCamelCase ( __lowercase : str ,__lowercase : str ,__lowercase : int ,__lowercase : str ,__lowercase : Optional[Any] ,__lowercase : List[Any] ):
'''simple docstring'''
A_ : Dict = []
for dataset in encoded_datasets:
A_ : int = len(__lowercase )
A_ : List[str] = np.zeros((n_batch, 2, input_len) ,dtype=np.intaa )
A_ : List[str] = np.zeros((n_batch, 2) ,dtype=np.intaa )
A_ : Any = np.full((n_batch, 2, input_len) ,fill_value=-1_00 ,dtype=np.intaa )
A_ : List[Any] = np.zeros((n_batch,) ,dtype=np.intaa )
for (
i,
(story, conta, conta, mc_label),
) in enumerate(__lowercase ):
A_ : Optional[Any] = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
A_ : Any = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
A_ : List[Any] = with_conta
A_ : Any = with_conta
A_ : List[str] = len(__lowercase ) - 1
A_ : List[str] = len(__lowercase ) - 1
A_ : List[Any] = with_conta
A_ : List[Any] = with_conta
A_ : Optional[Any] = mc_label
A_ : List[str] = (input_ids, mc_token_ids, lm_labels, mc_labels)
tensor_datasets.append(tuple(torch.tensor(__lowercase ) for t in all_inputs ) )
return tensor_datasets
def UpperCamelCase ( ):
'''simple docstring'''
A_ : Tuple = argparse.ArgumentParser()
parser.add_argument('--model_name' ,type=__lowercase ,default='openai-gpt' ,help='pretrained model name' )
parser.add_argument('--do_train' ,action='store_true' ,help='Whether to run training.' )
parser.add_argument('--do_eval' ,action='store_true' ,help='Whether to run eval on the dev set.' )
parser.add_argument(
'--output_dir' ,default=__lowercase ,type=__lowercase ,required=__lowercase ,help='The output directory where the model predictions and checkpoints will be written.' ,)
parser.add_argument('--train_dataset' ,type=__lowercase ,default='' )
parser.add_argument('--eval_dataset' ,type=__lowercase ,default='' )
parser.add_argument('--seed' ,type=__lowercase ,default=42 )
parser.add_argument('--num_train_epochs' ,type=__lowercase ,default=3 )
parser.add_argument('--train_batch_size' ,type=__lowercase ,default=8 )
parser.add_argument('--eval_batch_size' ,type=__lowercase ,default=16 )
parser.add_argument('--adam_epsilon' ,default=1e-8 ,type=__lowercase ,help='Epsilon for Adam optimizer.' )
parser.add_argument('--max_grad_norm' ,type=__lowercase ,default=1 )
parser.add_argument(
'--max_steps' ,default=-1 ,type=__lowercase ,help=(
'If > 0: set total number of training steps to perform. Override num_train_epochs.'
) ,)
parser.add_argument(
'--gradient_accumulation_steps' ,type=__lowercase ,default=1 ,help='Number of updates steps to accumulate before performing a backward/update pass.' ,)
parser.add_argument('--learning_rate' ,type=__lowercase ,default=6.2_5e-5 )
parser.add_argument('--warmup_steps' ,default=0 ,type=__lowercase ,help='Linear warmup over warmup_steps.' )
parser.add_argument('--lr_schedule' ,type=__lowercase ,default='warmup_linear' )
parser.add_argument('--weight_decay' ,type=__lowercase ,default=0.01 )
parser.add_argument('--lm_coef' ,type=__lowercase ,default=0.9 )
parser.add_argument('--n_valid' ,type=__lowercase ,default=3_74 )
parser.add_argument('--server_ip' ,type=__lowercase ,default='' ,help='Can be used for distant debugging.' )
parser.add_argument('--server_port' ,type=__lowercase ,default='' ,help='Can be used for distant debugging.' )
A_ : Optional[Any] = parser.parse_args()
print(__lowercase )
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('Waiting for debugger attach' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) ,redirect_output=__lowercase )
ptvsd.wait_for_attach()
random.seed(args.seed )
np.random.seed(args.seed )
torch.manual_seed(args.seed )
torch.cuda.manual_seed_all(args.seed )
A_ : Tuple = torch.device('cuda' if torch.cuda.is_available() else 'cpu' )
A_ : Dict = torch.cuda.device_count()
logger.info('device: {}, n_gpu {}'.format(__lowercase ,__lowercase ) )
if not args.do_train and not args.do_eval:
raise ValueError('At least one of `do_train` or `do_eval` must be True.' )
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
# Load tokenizer and model
# This loading functions also add new tokens and embeddings called `special tokens`
# These new embeddings will be fine-tuned on the RocStories dataset
A_ : Tuple = ['_start_', '_delimiter_', '_classify_']
A_ : Optional[int] = OpenAIGPTTokenizer.from_pretrained(args.model_name )
tokenizer.add_tokens(__lowercase )
A_ : Any = tokenizer.convert_tokens_to_ids(__lowercase )
A_ : Tuple = OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name )
model.resize_token_embeddings(len(__lowercase ) )
model.to(__lowercase )
# Load and encode the datasets
def tokenize_and_encode(__lowercase : Tuple ):
if isinstance(__lowercase ,__lowercase ):
return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(__lowercase ) )
elif isinstance(__lowercase ,__lowercase ):
return obj
return [tokenize_and_encode(__lowercase ) for o in obj]
logger.info('Encoding dataset...' )
A_ : Any = load_rocstories_dataset(args.train_dataset )
A_ : Any = load_rocstories_dataset(args.eval_dataset )
A_ : Dict = (train_dataset, eval_dataset)
A_ : str = tokenize_and_encode(__lowercase )
# Compute the max input length for the Transformer
A_ : Tuple = model.config.n_positions // 2 - 2
A_ : Optional[Any] = max(
len(story[:max_length] ) + max(len(conta[:max_length] ) ,len(conta[:max_length] ) ) + 3
for dataset in encoded_datasets
for story, conta, conta, _ in dataset )
A_ : str = min(__lowercase ,model.config.n_positions ) # Max size of input for the pre-trained model
# Prepare inputs tensors and dataloaders
A_ : List[str] = pre_process_datasets(__lowercase ,__lowercase ,__lowercase ,*__lowercase )
A_ , A_ : List[str] = tensor_datasets[0], tensor_datasets[1]
A_ : str = TensorDataset(*__lowercase )
A_ : Optional[Any] = RandomSampler(__lowercase )
A_ : Optional[int] = DataLoader(__lowercase ,sampler=__lowercase ,batch_size=args.train_batch_size )
A_ : Optional[Any] = TensorDataset(*__lowercase )
A_ : Union[str, Any] = SequentialSampler(__lowercase )
A_ : List[str] = DataLoader(__lowercase ,sampler=__lowercase ,batch_size=args.eval_batch_size )
# Prepare optimizer
if args.do_train:
if args.max_steps > 0:
A_ : List[Any] = args.max_steps
A_ : List[str] = args.max_steps // (len(__lowercase ) // args.gradient_accumulation_steps) + 1
else:
A_ : int = len(__lowercase ) // args.gradient_accumulation_steps * args.num_train_epochs
A_ : List[Any] = list(model.named_parameters() )
A_ : int = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
A_ : Optional[int] = [
{
'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay )],
'weight_decay': args.weight_decay,
},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay )], 'weight_decay': 0.0},
]
A_ : int = AdamW(__lowercase ,lr=args.learning_rate ,eps=args.adam_epsilon )
A_ : str = get_linear_schedule_with_warmup(
__lowercase ,num_warmup_steps=args.warmup_steps ,num_training_steps=__lowercase )
if args.do_train:
A_ , A_ , A_ : str = 0, 0, None
model.train()
for _ in trange(int(args.num_train_epochs ) ,desc='Epoch' ):
A_ : int = 0
A_ : str = 0
A_ : Any = tqdm(__lowercase ,desc='Training' )
for step, batch in enumerate(__lowercase ):
A_ : Optional[Any] = tuple(t.to(__lowercase ) for t in batch )
A_ , A_ , A_ , A_ : Optional[int] = batch
A_ : int = model(__lowercase ,mc_token_ids=__lowercase ,lm_labels=__lowercase ,mc_labels=__lowercase )
A_ : Optional[int] = args.lm_coef * losses[0] + losses[1]
loss.backward()
optimizer.step()
scheduler.step()
optimizer.zero_grad()
tr_loss += loss.item()
A_ : Optional[Any] = (
loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item()
)
nb_tr_steps += 1
A_ : Dict = 'Training loss: {:.2e} lr: {:.2e}'.format(__lowercase ,scheduler.get_lr()[0] )
# Save a trained model
if args.do_train:
# Save a trained model, configuration and tokenizer
A_ : Optional[Any] = model.module if hasattr(__lowercase ,'module' ) else model # Only save the model itself
# If we save using the predefined names, we can load using `from_pretrained`
A_ : Tuple = os.path.join(args.output_dir ,__lowercase )
A_ : str = os.path.join(args.output_dir ,__lowercase )
torch.save(model_to_save.state_dict() ,__lowercase )
model_to_save.config.to_json_file(__lowercase )
tokenizer.save_vocabulary(args.output_dir )
# Load a trained model and vocabulary that you have fine-tuned
A_ : Optional[int] = OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir )
A_ : List[str] = OpenAIGPTTokenizer.from_pretrained(args.output_dir )
model.to(__lowercase )
if args.do_eval:
model.eval()
A_ , A_ : List[str] = 0, 0
A_ , A_ : Dict = 0, 0
for batch in tqdm(__lowercase ,desc='Evaluating' ):
A_ : List[str] = tuple(t.to(__lowercase ) for t in batch )
A_ , A_ , A_ , A_ : Tuple = batch
with torch.no_grad():
A_ , A_ , A_ , A_ : List[str] = model(
__lowercase ,mc_token_ids=__lowercase ,lm_labels=__lowercase ,mc_labels=__lowercase )
A_ : str = mc_logits.detach().cpu().numpy()
A_ : Optional[int] = mc_labels.to('cpu' ).numpy()
A_ : int = accuracy(__lowercase ,__lowercase )
eval_loss += mc_loss.mean().item()
eval_accuracy += tmp_eval_accuracy
nb_eval_examples += input_ids.size(0 )
nb_eval_steps += 1
A_ : int = eval_loss / nb_eval_steps
A_ : Optional[Any] = eval_accuracy / nb_eval_examples
A_ : Union[str, Any] = tr_loss / nb_tr_steps if args.do_train else None
A_ : Any = {'eval_loss': eval_loss, 'eval_accuracy': eval_accuracy, 'train_loss': train_loss}
A_ : Dict = os.path.join(args.output_dir ,'eval_results.txt' )
with open(__lowercase ,'w' ) as writer:
logger.info('***** Eval results *****' )
for key in sorted(result.keys() ):
logger.info(' %s = %s' ,__lowercase ,str(result[key] ) )
writer.write('%s = %s\n' % (key, str(result[key] )) )
if __name__ == "__main__":
main()
| 558 | 1 |
"""simple docstring"""
from __future__ import annotations
import copy
import tempfile
import unittest
from transformers import CONFIG_MAPPING, AutoConfig, BertConfig, GPTaConfig, TaConfig, TapasConfig, is_tf_available
from transformers.testing_utils import (
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tensorflow_probability,
require_tf,
slow,
)
from ..bert.test_modeling_bert import BertModelTester
if is_tf_available():
from transformers import (
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelForTableQuestionAnswering,
TFAutoModelForTokenClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFFunnelBaseModel,
TFFunnelModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
TFTapasForQuestionAnswering,
)
from transformers.models.auto.modeling_tf_auto import (
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_MAPPING,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.tapas.modeling_tf_tapas import TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST
class snake_case_ ( a_ ):
__lowerCAmelCase = "new-model"
if is_tf_available():
class snake_case_ ( a_ ):
__lowerCAmelCase = NewModelConfig
@require_tf
class snake_case_ ( unittest.TestCase ):
@slow
def snake_case_ ( self ):
a_ : Dict = "bert-base-cased"
a_ : str = AutoConfig.from_pretrained(a_ )
self.assertIsNotNone(a_ )
self.assertIsInstance(a_ , a_ )
a_ : Optional[Any] = TFAutoModel.from_pretrained(a_ )
self.assertIsNotNone(a_ )
self.assertIsInstance(a_ , a_ )
@slow
def snake_case_ ( self ):
a_ : List[Any] = "bert-base-cased"
a_ : Dict = AutoConfig.from_pretrained(a_ )
self.assertIsNotNone(a_ )
self.assertIsInstance(a_ , a_ )
a_ : Tuple = TFAutoModelForPreTraining.from_pretrained(a_ )
self.assertIsNotNone(a_ )
self.assertIsInstance(a_ , a_ )
@slow
def snake_case_ ( self ):
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a_ : int = AutoConfig.from_pretrained(a_ )
self.assertIsNotNone(a_ )
self.assertIsInstance(a_ , a_ )
a_ : Optional[int] = TFAutoModelForCausalLM.from_pretrained(a_ )
a_ , a_ : Tuple = TFAutoModelForCausalLM.from_pretrained(a_ , output_loading_info=a_ )
self.assertIsNotNone(a_ )
self.assertIsInstance(a_ , a_ )
@slow
def snake_case_ ( self ):
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a_ : Dict = AutoConfig.from_pretrained(a_ )
self.assertIsNotNone(a_ )
self.assertIsInstance(a_ , a_ )
a_ : Any = TFAutoModelWithLMHead.from_pretrained(a_ )
self.assertIsNotNone(a_ )
self.assertIsInstance(a_ , a_ )
@slow
def snake_case_ ( self ):
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a_ : str = AutoConfig.from_pretrained(a_ )
self.assertIsNotNone(a_ )
self.assertIsInstance(a_ , a_ )
a_ : Union[str, Any] = TFAutoModelForMaskedLM.from_pretrained(a_ )
a_ , a_ : int = TFAutoModelForMaskedLM.from_pretrained(a_ , output_loading_info=a_ )
self.assertIsNotNone(a_ )
self.assertIsInstance(a_ , a_ )
@slow
def snake_case_ ( self ):
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a_ : Union[str, Any] = AutoConfig.from_pretrained(a_ )
self.assertIsNotNone(a_ )
self.assertIsInstance(a_ , a_ )
a_ : List[Any] = TFAutoModelForSeqaSeqLM.from_pretrained(a_ )
a_ , a_ : Union[str, Any] = TFAutoModelForSeqaSeqLM.from_pretrained(a_ , output_loading_info=a_ )
self.assertIsNotNone(a_ )
self.assertIsInstance(a_ , a_ )
@slow
def snake_case_ ( self ):
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
a_ : List[Any] = AutoConfig.from_pretrained(a_ )
self.assertIsNotNone(a_ )
self.assertIsInstance(a_ , a_ )
a_ : str = TFAutoModelForSequenceClassification.from_pretrained(a_ )
self.assertIsNotNone(a_ )
self.assertIsInstance(a_ , a_ )
@slow
def snake_case_ ( self ):
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
a_ : Dict = AutoConfig.from_pretrained(a_ )
self.assertIsNotNone(a_ )
self.assertIsInstance(a_ , a_ )
a_ : Dict = TFAutoModelForQuestionAnswering.from_pretrained(a_ )
self.assertIsNotNone(a_ )
self.assertIsInstance(a_ , a_ )
@slow
@require_tensorflow_probability
def snake_case_ ( self ):
for model_name in TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST[5:6]:
a_ : Any = AutoConfig.from_pretrained(a_ )
self.assertIsNotNone(a_ )
self.assertIsInstance(a_ , a_ )
a_ : Optional[int] = TFAutoModelForTableQuestionAnswering.from_pretrained(a_ )
a_ , a_ : Optional[Any] = TFAutoModelForTableQuestionAnswering.from_pretrained(
a_ , output_loading_info=a_ )
self.assertIsNotNone(a_ )
self.assertIsInstance(a_ , a_ )
def snake_case_ ( self ):
a_ : Tuple = TFAutoModelWithLMHead.from_pretrained(a_ )
self.assertIsInstance(a_ , a_ )
self.assertEqual(model.num_parameters() , 1_4_4_1_0 )
self.assertEqual(model.num_parameters(only_trainable=a_ ) , 1_4_4_1_0 )
def snake_case_ ( self ):
a_ : List[Any] = TFAutoModelWithLMHead.from_pretrained(a_ )
self.assertIsInstance(a_ , a_ )
self.assertEqual(model.num_parameters() , 1_4_4_1_0 )
self.assertEqual(model.num_parameters(only_trainable=a_ ) , 1_4_4_1_0 )
def snake_case_ ( self ):
# For the auto model mapping, FunnelConfig has two models: FunnelModel and FunnelBaseModel
a_ : int = TFAutoModel.from_pretrained("sgugger/funnel-random-tiny" )
self.assertIsInstance(a_ , a_ )
a_ : List[str] = copy.deepcopy(model.config )
a_ : List[Any] = ["FunnelBaseModel"]
a_ : str = TFAutoModel.from_config(a_ )
self.assertIsInstance(a_ , a_ )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(a_ )
a_ : int = TFAutoModel.from_pretrained(a_ )
self.assertIsInstance(a_ , a_ )
def snake_case_ ( self ):
try:
AutoConfig.register("new-model" , a_ )
a_ : Optional[int] = [
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSequenceClassification,
TFAutoModelForTokenClassification,
]
for auto_class in auto_classes:
with self.subTest(auto_class.__name__ ):
# Wrong config class will raise an error
with self.assertRaises(a_ ):
auto_class.register(a_ , a_ )
auto_class.register(a_ , a_ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(a_ ):
auto_class.register(a_ , a_ )
# Now that the config is registered, it can be used as any other config with the auto-API
a_ : Optional[int] = BertModelTester(self ).get_config()
a_ : Any = NewModelConfig(**tiny_config.to_dict() )
a_ : List[str] = auto_class.from_config(a_ )
self.assertIsInstance(a_ , a_ )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(a_ )
a_ : str = auto_class.from_pretrained(a_ )
self.assertIsInstance(a_ , a_ )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
for mapping in (
TF_MODEL_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
):
if NewModelConfig in mapping._extra_content:
del mapping._extra_content[NewModelConfig]
def snake_case_ ( self ):
with self.assertRaisesRegex(
a_ , "bert-base is not a local folder and is not a valid model identifier" ):
a_ : Any = TFAutoModel.from_pretrained("bert-base" )
def snake_case_ ( self ):
with self.assertRaisesRegex(
a_ , R"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ):
a_ : Optional[int] = TFAutoModel.from_pretrained(a_ , revision="aaaaaa" )
def snake_case_ ( self ):
with self.assertRaisesRegex(
a_ , "hf-internal-testing/config-no-model does not appear to have a file named pytorch_model.bin" , ):
a_ : List[Any] = TFAutoModel.from_pretrained("hf-internal-testing/config-no-model" )
def snake_case_ ( self ):
with self.assertRaisesRegex(a_ , "Use `from_pt=True` to load this model" ):
a_ : Tuple = TFAutoModel.from_pretrained("hf-internal-testing/tiny-bert-pt-only" )
def snake_case_ ( self ):
# Make sure we have cached the model.
a_ : Optional[int] = TFAutoModel.from_pretrained("hf-internal-testing/tiny-random-bert" )
with RequestCounter() as counter:
a_ : Optional[Any] = TFAutoModel.from_pretrained("hf-internal-testing/tiny-random-bert" )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
# With a sharded checkpoint
a_ : Any = TFAutoModel.from_pretrained("ArthurZ/tiny-random-bert-sharded" )
with RequestCounter() as counter:
a_ : List[str] = TFAutoModel.from_pretrained("ArthurZ/tiny-random-bert-sharded" )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 ) | 370 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyImgaImgPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class snake_case_ ( a_ ,unittest.TestCase ):
__lowerCAmelCase = KandinskyImgaImgPipeline
__lowerCAmelCase = ["prompt", "image_embeds", "negative_image_embeds", "image"]
__lowerCAmelCase = [
"prompt",
"negative_prompt",
"image_embeds",
"negative_image_embeds",
"image",
]
__lowerCAmelCase = [
"generator",
"height",
"width",
"strength",
"guidance_scale",
"negative_prompt",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
__lowerCAmelCase = False
@property
def snake_case_ ( self ):
return 3_2
@property
def snake_case_ ( self ):
return 3_2
@property
def snake_case_ ( self ):
return self.time_input_dim
@property
def snake_case_ ( self ):
return self.time_input_dim * 4
@property
def snake_case_ ( self ):
return 1_0_0
@property
def snake_case_ ( self ):
a_ : List[str] = XLMRobertaTokenizerFast.from_pretrained("YiYiXu/tiny-random-mclip-base" )
return tokenizer
@property
def snake_case_ ( self ):
torch.manual_seed(0 )
a_ : List[Any] = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=3_7 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1_0_0_5 , )
a_ : str = MultilingualCLIP(a_ )
a_ : Any = text_encoder.eval()
return text_encoder
@property
def snake_case_ ( self ):
torch.manual_seed(0 )
a_ : Union[str, Any] = {
"in_channels": 4,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "text_image",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "text_image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
a_ : Dict = UNetaDConditionModel(**a_ )
return model
@property
def snake_case_ ( self ):
return {
"block_out_channels": [3_2, 6_4],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 1_2,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def snake_case_ ( self ):
torch.manual_seed(0 )
a_ : int = VQModel(**self.dummy_movq_kwargs )
return model
def snake_case_ ( self ):
a_ : Dict = self.dummy_text_encoder
a_ : Dict = self.dummy_tokenizer
a_ : Optional[int] = self.dummy_unet
a_ : Dict = self.dummy_movq
a_ : List[str] = {
"num_train_timesteps": 1_0_0_0,
"beta_schedule": "linear",
"beta_start": 0.00_085,
"beta_end": 0.012,
"clip_sample": False,
"set_alpha_to_one": False,
"steps_offset": 0,
"prediction_type": "epsilon",
"thresholding": False,
}
a_ : List[Any] = DDIMScheduler(**a_ )
a_ : Union[str, Any] = {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def snake_case_ ( self , a_ , a_=0 ):
a_ : Optional[int] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(a_ ) ).to(a_ )
a_ : Tuple = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(a_ )
# create init_image
a_ : Optional[int] = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(a_ ) ).to(a_ )
a_ : Any = image.cpu().permute(0 , 2 , 3 , 1 )[0]
a_ : int = Image.fromarray(np.uinta(a_ ) ).convert("RGB" ).resize((2_5_6, 2_5_6) )
if str(a_ ).startswith("mps" ):
a_ : Any = torch.manual_seed(a_ )
else:
a_ : Any = torch.Generator(device=a_ ).manual_seed(a_ )
a_ : List[Any] = {
"prompt": "horse",
"image": init_image,
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"generator": generator,
"height": 6_4,
"width": 6_4,
"num_inference_steps": 1_0,
"guidance_scale": 7.0,
"strength": 0.2,
"output_type": "np",
}
return inputs
def snake_case_ ( self ):
a_ : Optional[Any] = "cpu"
a_ : List[Any] = self.get_dummy_components()
a_ : Union[str, Any] = self.pipeline_class(**a_ )
a_ : Tuple = pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
a_ : Union[str, Any] = pipe(**self.get_dummy_inputs(a_ ) )
a_ : Any = output.images
a_ : str = pipe(
**self.get_dummy_inputs(a_ ) , return_dict=a_ , )[0]
a_ : List[str] = image[0, -3:, -3:, -1]
a_ : Optional[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
a_ : Optional[int] = np.array(
[0.61_474_943, 0.6_073_539, 0.43_308_544, 0.5_928_269, 0.47_493_595, 0.46_755_973, 0.4_613_838, 0.45_368_797, 0.50_119_233] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
@slow
@require_torch_gpu
class snake_case_ ( unittest.TestCase ):
def snake_case_ ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case_ ( self ):
a_ : Optional[int] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinsky/kandinsky_img2img_frog.npy" )
a_ : Union[str, Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" )
a_ : Optional[Any] = "A red cartoon frog, 4k"
a_ : int = KandinskyPriorPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-1-prior" , torch_dtype=torch.floataa )
pipe_prior.to(a_ )
a_ : int = KandinskyImgaImgPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-1" , torch_dtype=torch.floataa )
a_ : List[Any] = pipeline.to(a_ )
pipeline.set_progress_bar_config(disable=a_ )
a_ : int = torch.Generator(device="cpu" ).manual_seed(0 )
a_ , a_ : Optional[int] = pipe_prior(
a_ , generator=a_ , num_inference_steps=5 , negative_prompt="" , ).to_tuple()
a_ : List[Any] = pipeline(
a_ , image=a_ , image_embeds=a_ , negative_image_embeds=a_ , generator=a_ , num_inference_steps=1_0_0 , height=7_6_8 , width=7_6_8 , strength=0.2 , output_type="np" , )
a_ : int = output.images[0]
assert image.shape == (7_6_8, 7_6_8, 3)
assert_mean_pixel_difference(a_ , a_ ) | 370 | 1 |
'''simple docstring'''
import argparse
import os
import numpy as np
import tensorflow as tf
import torch
from transformers import BertModel
def _lowerCAmelCase ( __magic_name__ : BertModel , __magic_name__ : str , __magic_name__ : str ) -> List[str]:
lowercase : Optional[Any] =('''dense.weight''', '''attention.self.query''', '''attention.self.key''', '''attention.self.value''')
lowercase : Any =(
('''layer.''', '''layer_'''),
('''word_embeddings.weight''', '''word_embeddings'''),
('''position_embeddings.weight''', '''position_embeddings'''),
('''token_type_embeddings.weight''', '''token_type_embeddings'''),
('''.''', '''/'''),
('''LayerNorm/weight''', '''LayerNorm/gamma'''),
('''LayerNorm/bias''', '''LayerNorm/beta'''),
('''weight''', '''kernel'''),
)
if not os.path.isdir(__snake_case ):
os.makedirs(__snake_case )
lowercase : Union[str, Any] =model.state_dict()
def to_tf_var_name(__magic_name__ : str ):
for patt, repl in iter(__snake_case ):
lowercase : Optional[Any] =name.replace(__snake_case , __snake_case )
return f'''bert/{name}'''
def create_tf_var(__magic_name__ : np.ndarray , __magic_name__ : str , __magic_name__ : tf.Session ):
lowercase : Any =tf.dtypes.as_dtype(tensor.dtype )
lowercase : str =tf.get_variable(dtype=__snake_case , shape=tensor.shape , name=__snake_case , initializer=tf.zeros_initializer() )
session.run(tf.variables_initializer([tf_var] ) )
session.run(__snake_case )
return tf_var
tf.reset_default_graph()
with tf.Session() as session:
for var_name in state_dict:
lowercase : Dict =to_tf_var_name(__snake_case )
lowercase : Optional[int] =state_dict[var_name].numpy()
if any(x in var_name for x in tensors_to_transpose ):
lowercase : int =torch_tensor.T
lowercase : Optional[int] =create_tf_var(tensor=__snake_case , name=__snake_case , session=__snake_case )
tf.keras.backend.set_value(__snake_case , __snake_case )
lowercase : Any =session.run(__snake_case )
print(f'''Successfully created {tf_name}: {np.allclose(__snake_case , __snake_case )}''' )
lowercase : List[str] =tf.train.Saver(tf.trainable_variables() )
saver.save(__snake_case , os.path.join(__snake_case , model_name.replace('''-''' , '''_''' ) + '''.ckpt''' ) )
def _lowerCAmelCase ( __magic_name__ : str=None ) -> Dict:
lowercase : int =argparse.ArgumentParser()
parser.add_argument('''--model_name''' , type=__snake_case , required=__snake_case , help='''model name e.g. bert-base-uncased''' )
parser.add_argument(
'''--cache_dir''' , type=__snake_case , default=__snake_case , required=__snake_case , help='''Directory containing pytorch model''' )
parser.add_argument('''--pytorch_model_path''' , type=__snake_case , required=__snake_case , help='''/path/to/<pytorch-model-name>.bin''' )
parser.add_argument('''--tf_cache_dir''' , type=__snake_case , required=__snake_case , help='''Directory in which to save tensorflow model''' )
lowercase : List[str] =parser.parse_args(__snake_case )
lowercase : Tuple =BertModel.from_pretrained(
pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , )
convert_pytorch_checkpoint_to_tf(model=__snake_case , ckpt_dir=args.tf_cache_dir , model_name=args.model_name )
if __name__ == "__main__":
main()
| 92 |
def _A ( __snake_case :int , __snake_case :float , __snake_case :float ) -> float:
"""simple docstring"""
return round(float(moles / volume ) * nfactor )
def _A ( __snake_case :float , __snake_case :float , __snake_case :float ) -> float:
"""simple docstring"""
return round(float((moles * 0.0_8_2_1 * temperature) / (volume) ) )
def _A ( __snake_case :float , __snake_case :float , __snake_case :float ) -> float:
"""simple docstring"""
return round(float((moles * 0.0_8_2_1 * temperature) / (pressure) ) )
def _A ( __snake_case :float , __snake_case :float , __snake_case :float ) -> float:
"""simple docstring"""
return round(float((pressure * volume) / (0.0_8_2_1 * moles) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 693 | 0 |
"""simple docstring"""
from maths.is_square_free import is_square_free
from maths.prime_factors import prime_factors
def __snake_case ( SCREAMING_SNAKE_CASE: int ):
"""simple docstring"""
_lowerCAmelCase = prime_factors(SCREAMING_SNAKE_CASE )
if is_square_free(SCREAMING_SNAKE_CASE ):
return -1 if len(SCREAMING_SNAKE_CASE ) % 2 else 1
return 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 711 |
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
import torch
from huggingface_hub import cached_download, hf_hub_url
from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification
def __snake_case ( SCREAMING_SNAKE_CASE: Optional[int] ):
"""simple docstring"""
_lowerCAmelCase = []
embed.append(
(
f"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight""",
f"""stage{idx}.patch_embed.proj.weight""",
) )
embed.append(
(
f"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias""",
f"""stage{idx}.patch_embed.proj.bias""",
) )
embed.append(
(
f"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight""",
f"""stage{idx}.patch_embed.norm.weight""",
) )
embed.append(
(
f"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias""",
f"""stage{idx}.patch_embed.norm.bias""",
) )
return embed
def __snake_case ( SCREAMING_SNAKE_CASE: Any , SCREAMING_SNAKE_CASE: Tuple ):
"""simple docstring"""
_lowerCAmelCase = []
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight""",
f"""stage{idx}.blocks.{cnt}.attn.proj_q.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias""",
f"""stage{idx}.blocks.{cnt}.attn.proj_q.bias""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight""",
f"""stage{idx}.blocks.{cnt}.attn.proj_k.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias""",
f"""stage{idx}.blocks.{cnt}.attn.proj_k.bias""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight""",
f"""stage{idx}.blocks.{cnt}.attn.proj_v.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias""",
f"""stage{idx}.blocks.{cnt}.attn.proj_v.bias""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight""",
f"""stage{idx}.blocks.{cnt}.attn.proj.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias""",
f"""stage{idx}.blocks.{cnt}.attn.proj.bias""",
) )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight""", f"""stage{idx}.blocks.{cnt}.mlp.fc1.weight""") )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias""", f"""stage{idx}.blocks.{cnt}.mlp.fc1.bias""") )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight""", f"""stage{idx}.blocks.{cnt}.mlp.fc2.weight""") )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias""", f"""stage{idx}.blocks.{cnt}.mlp.fc2.bias""") )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight""", f"""stage{idx}.blocks.{cnt}.norm1.weight""") )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias""", f"""stage{idx}.blocks.{cnt}.norm1.bias""") )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight""", f"""stage{idx}.blocks.{cnt}.norm2.weight""") )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias""", f"""stage{idx}.blocks.{cnt}.norm2.bias""") )
return attention_weights
def __snake_case ( SCREAMING_SNAKE_CASE: int ):
"""simple docstring"""
_lowerCAmelCase = []
token.append((f"""cvt.encoder.stages.{idx}.cls_token""", 'stage2.cls_token') )
return token
def __snake_case ( ):
"""simple docstring"""
_lowerCAmelCase = []
head.append(('layernorm.weight', 'norm.weight') )
head.append(('layernorm.bias', 'norm.bias') )
head.append(('classifier.weight', 'head.weight') )
head.append(('classifier.bias', 'head.bias') )
return head
def __snake_case ( SCREAMING_SNAKE_CASE: List[Any] , SCREAMING_SNAKE_CASE: Optional[Any] , SCREAMING_SNAKE_CASE: List[str] , SCREAMING_SNAKE_CASE: str ):
"""simple docstring"""
_lowerCAmelCase = 'imagenet-1k-id2label.json'
_lowerCAmelCase = 1000
_lowerCAmelCase = 'huggingface/label-files'
_lowerCAmelCase = num_labels
_lowerCAmelCase = json.load(open(cached_download(hf_hub_url(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , repo_type='dataset' ) ) , 'r' ) )
_lowerCAmelCase = {int(SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
_lowerCAmelCase = idalabel
_lowerCAmelCase = {v: k for k, v in idalabel.items()}
_lowerCAmelCase = _lowerCAmelCase = CvtConfig(num_labels=SCREAMING_SNAKE_CASE , idalabel=SCREAMING_SNAKE_CASE , labelaid=SCREAMING_SNAKE_CASE )
# For depth size 13 (13 = 1+2+10)
if cvt_model.rsplit('/' , 1 )[-1][4:6] == "13":
_lowerCAmelCase = [1, 2, 10]
# For depth size 21 (21 = 1+4+16)
elif cvt_model.rsplit('/' , 1 )[-1][4:6] == "21":
_lowerCAmelCase = [1, 4, 16]
# For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20)
else:
_lowerCAmelCase = [2, 2, 20]
_lowerCAmelCase = [3, 12, 16]
_lowerCAmelCase = [192, 768, 1024]
_lowerCAmelCase = CvtForImageClassification(SCREAMING_SNAKE_CASE )
_lowerCAmelCase = AutoImageProcessor.from_pretrained('facebook/convnext-base-224-22k-1k' )
_lowerCAmelCase = image_size
_lowerCAmelCase = torch.load(SCREAMING_SNAKE_CASE , map_location=torch.device('cpu' ) )
_lowerCAmelCase = OrderedDict()
_lowerCAmelCase = []
for idx in range(len(config.depth ) ):
if config.cls_token[idx]:
_lowerCAmelCase = list_of_state_dict + cls_token(SCREAMING_SNAKE_CASE )
_lowerCAmelCase = list_of_state_dict + embeddings(SCREAMING_SNAKE_CASE )
for cnt in range(config.depth[idx] ):
_lowerCAmelCase = list_of_state_dict + attention(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
_lowerCAmelCase = list_of_state_dict + final()
for gg in list_of_state_dict:
print(SCREAMING_SNAKE_CASE )
for i in range(len(SCREAMING_SNAKE_CASE ) ):
_lowerCAmelCase = original_weights[list_of_state_dict[i][1]]
model.load_state_dict(SCREAMING_SNAKE_CASE )
model.save_pretrained(SCREAMING_SNAKE_CASE )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE )
# Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
parser.add_argument(
'''--cvt_model''',
default='''cvt-w24''',
type=str,
help='''Name of the cvt model you\'d like to convert.''',
)
parser.add_argument(
'''--image_size''',
default=3_8_4,
type=int,
help='''Input Image Size''',
)
parser.add_argument(
'''--cvt_file_name''',
default=R'''cvtmodels\CvT-w24-384x384-IN-22k.pth''',
type=str,
help='''Input Image Size''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
_snake_case = parser.parse_args()
convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
| 491 | 0 |
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import PoolFormerConfig, PoolFormerForImageClassification, PoolFormerImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
lowercase = logging.get_logger(__name__)
def UpperCAmelCase ( A : Optional[int] , A : str , A : str , A : int ):
'''simple docstring'''
_UpperCAmelCase = original_name.split('.' )[0]
_UpperCAmelCase = key.split('.' )
_UpperCAmelCase = int(key_list[key_list.index(A ) - 2] )
_UpperCAmelCase = int(key_list[key_list.index(A ) - 1] )
_UpperCAmelCase = orig_block_num - offset
_UpperCAmelCase = key.replace(f'{orig_block_num}.{layer_num}.{original_name}' , f'block.{new_block_num}.{layer_num}.{new_name}' )
return key
def UpperCAmelCase ( A : Optional[Any] ):
'''simple docstring'''
_UpperCAmelCase = OrderedDict()
_UpperCAmelCase , _UpperCAmelCase = 0, 0
for key, value in state_dict.items():
if key.startswith('network' ):
_UpperCAmelCase = key.replace('network' , 'poolformer.encoder' )
if "proj" in key:
# Works for the first embedding as well as the internal embedding layers
if key.endswith('bias' ) and "patch_embed" not in key:
patch_emb_offset += 1
_UpperCAmelCase = key[: key.find('proj' )]
_UpperCAmelCase = key.replace(A , f'patch_embeddings.{total_embed_found}.' )
_UpperCAmelCase = key.replace('proj' , 'projection' )
if key.endswith('bias' ):
total_embed_found += 1
if "patch_embeddings" in key:
_UpperCAmelCase = 'poolformer.encoder.' + key
if "mlp.fc1" in key:
_UpperCAmelCase = replace_key_with_offset(A , A , 'mlp.fc1' , 'output.conv1' )
if "mlp.fc2" in key:
_UpperCAmelCase = replace_key_with_offset(A , A , 'mlp.fc2' , 'output.conv2' )
if "norm1" in key:
_UpperCAmelCase = replace_key_with_offset(A , A , 'norm1' , 'before_norm' )
if "norm2" in key:
_UpperCAmelCase = replace_key_with_offset(A , A , 'norm2' , 'after_norm' )
if "layer_scale_1" in key:
_UpperCAmelCase = replace_key_with_offset(A , A , 'layer_scale_1' , 'layer_scale_1' )
if "layer_scale_2" in key:
_UpperCAmelCase = replace_key_with_offset(A , A , 'layer_scale_2' , 'layer_scale_2' )
if "head" in key:
_UpperCAmelCase = key.replace('head' , 'classifier' )
_UpperCAmelCase = value
return new_state_dict
def UpperCAmelCase ( ):
'''simple docstring'''
_UpperCAmelCase = 'http://images.cocodataset.org/val2017/000000039769.jpg'
_UpperCAmelCase = Image.open(requests.get(A , stream=A ).raw )
return image
@torch.no_grad()
def UpperCAmelCase ( A : str , A : List[Any] , A : int ):
'''simple docstring'''
_UpperCAmelCase = PoolFormerConfig()
# set attributes based on model_name
_UpperCAmelCase = 'huggingface/label-files'
_UpperCAmelCase = model_name[-3:]
_UpperCAmelCase = 1000
_UpperCAmelCase = 'imagenet-1k-id2label.json'
_UpperCAmelCase = (1, 1000)
# set config attributes
_UpperCAmelCase = json.load(open(hf_hub_download(A , A , repo_type='dataset' ) , 'r' ) )
_UpperCAmelCase = {int(A ): v for k, v in idalabel.items()}
_UpperCAmelCase = idalabel
_UpperCAmelCase = {v: k for k, v in idalabel.items()}
if size == "s12":
_UpperCAmelCase = [2, 2, 6, 2]
_UpperCAmelCase = [64, 128, 320, 512]
_UpperCAmelCase = 4.0
_UpperCAmelCase = 0.9
elif size == "s24":
_UpperCAmelCase = [4, 4, 12, 4]
_UpperCAmelCase = [64, 128, 320, 512]
_UpperCAmelCase = 4.0
_UpperCAmelCase = 0.9
elif size == "s36":
_UpperCAmelCase = [6, 6, 18, 6]
_UpperCAmelCase = [64, 128, 320, 512]
_UpperCAmelCase = 4.0
_UpperCAmelCase = 1e-6
_UpperCAmelCase = 0.9
elif size == "m36":
_UpperCAmelCase = [6, 6, 18, 6]
_UpperCAmelCase = [96, 192, 384, 768]
_UpperCAmelCase = 4.0
_UpperCAmelCase = 1e-6
_UpperCAmelCase = 0.95
elif size == "m48":
_UpperCAmelCase = [8, 8, 24, 8]
_UpperCAmelCase = [96, 192, 384, 768]
_UpperCAmelCase = 4.0
_UpperCAmelCase = 1e-6
_UpperCAmelCase = 0.95
else:
raise ValueError(f'Size {size} not supported' )
# load image processor
_UpperCAmelCase = PoolFormerImageProcessor(crop_pct=A )
# Prepare image
_UpperCAmelCase = prepare_img()
_UpperCAmelCase = image_processor(images=A , return_tensors='pt' ).pixel_values
logger.info(f'Converting model {model_name}...' )
# load original state dict
_UpperCAmelCase = torch.load(A , map_location=torch.device('cpu' ) )
# rename keys
_UpperCAmelCase = rename_keys(A )
# create HuggingFace model and load state dict
_UpperCAmelCase = PoolFormerForImageClassification(A )
model.load_state_dict(A )
model.eval()
# Define image processor
_UpperCAmelCase = PoolFormerImageProcessor(crop_pct=A )
_UpperCAmelCase = image_processor(images=prepare_img() , return_tensors='pt' ).pixel_values
# forward pass
_UpperCAmelCase = model(A )
_UpperCAmelCase = outputs.logits
# define expected logit slices for different models
if size == "s12":
_UpperCAmelCase = torch.tensor([-0.3045, -0.6758, -0.4869] )
elif size == "s24":
_UpperCAmelCase = torch.tensor([0.4402, -0.1374, -0.8045] )
elif size == "s36":
_UpperCAmelCase = torch.tensor([-0.6080, -0.5133, -0.5898] )
elif size == "m36":
_UpperCAmelCase = torch.tensor([0.3952, 0.2263, -1.2668] )
elif size == "m48":
_UpperCAmelCase = torch.tensor([0.1167, -0.0656, -0.3423] )
else:
raise ValueError(f'Size {size} not supported' )
# verify logits
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3] , A , atol=1e-2 )
# finally, save model and image processor
logger.info(f'Saving PyTorch model and image processor to {pytorch_dump_folder_path}...' )
Path(A ).mkdir(exist_ok=A )
model.save_pretrained(A )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(A )
if __name__ == "__main__":
lowercase = argparse.ArgumentParser()
parser.add_argument(
'''--model_name''',
default='''poolformer_s12''',
type=str,
help='''Name of the model you\'d like to convert.''',
)
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, help='''Path to the original PyTorch checkpoint (.pth file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
lowercase = parser.parse_args()
convert_poolformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 573 |
"""simple docstring"""
import unittest
from transformers import (
MODEL_FOR_OBJECT_DETECTION_MAPPING,
AutoFeatureExtractor,
AutoModelForObjectDetection,
ObjectDetectionPipeline,
is_vision_available,
pipeline,
)
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_pytesseract,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class lowercase__ :
'''simple docstring'''
@staticmethod
def lowerCamelCase_ ( *snake_case , **snake_case ) -> str:
pass
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
_UpperCAmelCase = MODEL_FOR_OBJECT_DETECTION_MAPPING
def lowerCamelCase_ ( self , snake_case , snake_case , snake_case ) -> Union[str, Any]:
_UpperCAmelCase = ObjectDetectionPipeline(model=snake_case , image_processor=snake_case )
return object_detector, ["./tests/fixtures/tests_samples/COCO/000000039769.png"]
def lowerCamelCase_ ( self , snake_case , snake_case ) -> List[Any]:
_UpperCAmelCase = object_detector('./tests/fixtures/tests_samples/COCO/000000039769.png' , threshold=0.0 )
self.assertGreater(len(snake_case ) , 0 )
for detected_object in outputs:
self.assertEqual(
snake_case , {
'score': ANY(snake_case ),
'label': ANY(snake_case ),
'box': {'xmin': ANY(snake_case ), 'ymin': ANY(snake_case ), 'xmax': ANY(snake_case ), 'ymax': ANY(snake_case )},
} , )
import datasets
_UpperCAmelCase = datasets.load_dataset('hf-internal-testing/fixtures_image_utils' , 'image' , split='test' )
_UpperCAmelCase = [
Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ),
'http://images.cocodataset.org/val2017/000000039769.jpg',
# RGBA
dataset[0]['file'],
# LA
dataset[1]['file'],
# L
dataset[2]['file'],
]
_UpperCAmelCase = object_detector(snake_case , threshold=0.0 )
self.assertEqual(len(snake_case ) , len(snake_case ) )
for outputs in batch_outputs:
self.assertGreater(len(snake_case ) , 0 )
for detected_object in outputs:
self.assertEqual(
snake_case , {
'score': ANY(snake_case ),
'label': ANY(snake_case ),
'box': {'xmin': ANY(snake_case ), 'ymin': ANY(snake_case ), 'xmax': ANY(snake_case ), 'ymax': ANY(snake_case )},
} , )
@require_tf
@unittest.skip('Object detection not implemented in TF' )
def lowerCamelCase_ ( self ) -> List[Any]:
pass
@require_torch
def lowerCamelCase_ ( self ) -> int:
_UpperCAmelCase = 'hf-internal-testing/tiny-detr-mobilenetsv3'
_UpperCAmelCase = AutoModelForObjectDetection.from_pretrained(snake_case )
_UpperCAmelCase = AutoFeatureExtractor.from_pretrained(snake_case )
_UpperCAmelCase = ObjectDetectionPipeline(model=snake_case , feature_extractor=snake_case )
_UpperCAmelCase = object_detector('http://images.cocodataset.org/val2017/000000039769.jpg' , threshold=0.0 )
self.assertEqual(
nested_simplify(snake_case , decimals=4 ) , [
{'score': 0.3376, 'label': 'LABEL_0', 'box': {'xmin': 159, 'ymin': 120, 'xmax': 480, 'ymax': 359}},
{'score': 0.3376, 'label': 'LABEL_0', 'box': {'xmin': 159, 'ymin': 120, 'xmax': 480, 'ymax': 359}},
] , )
_UpperCAmelCase = object_detector(
[
'http://images.cocodataset.org/val2017/000000039769.jpg',
'http://images.cocodataset.org/val2017/000000039769.jpg',
] , threshold=0.0 , )
self.assertEqual(
nested_simplify(snake_case , decimals=4 ) , [
[
{'score': 0.3376, 'label': 'LABEL_0', 'box': {'xmin': 159, 'ymin': 120, 'xmax': 480, 'ymax': 359}},
{'score': 0.3376, 'label': 'LABEL_0', 'box': {'xmin': 159, 'ymin': 120, 'xmax': 480, 'ymax': 359}},
],
[
{'score': 0.3376, 'label': 'LABEL_0', 'box': {'xmin': 159, 'ymin': 120, 'xmax': 480, 'ymax': 359}},
{'score': 0.3376, 'label': 'LABEL_0', 'box': {'xmin': 159, 'ymin': 120, 'xmax': 480, 'ymax': 359}},
],
] , )
@require_torch
@slow
def lowerCamelCase_ ( self ) -> Optional[Any]:
_UpperCAmelCase = 'facebook/detr-resnet-50'
_UpperCAmelCase = AutoModelForObjectDetection.from_pretrained(snake_case )
_UpperCAmelCase = AutoFeatureExtractor.from_pretrained(snake_case )
_UpperCAmelCase = ObjectDetectionPipeline(model=snake_case , feature_extractor=snake_case )
_UpperCAmelCase = object_detector('http://images.cocodataset.org/val2017/000000039769.jpg' )
self.assertEqual(
nested_simplify(snake_case , decimals=4 ) , [
{'score': 0.9982, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}},
{'score': 0.9960, 'label': 'remote', 'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}},
{'score': 0.9955, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}},
{'score': 0.9988, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.9987, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
] , )
_UpperCAmelCase = object_detector(
[
'http://images.cocodataset.org/val2017/000000039769.jpg',
'http://images.cocodataset.org/val2017/000000039769.jpg',
] )
self.assertEqual(
nested_simplify(snake_case , decimals=4 ) , [
[
{'score': 0.9982, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}},
{'score': 0.9960, 'label': 'remote', 'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}},
{'score': 0.9955, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}},
{'score': 0.9988, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.9987, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
],
[
{'score': 0.9982, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}},
{'score': 0.9960, 'label': 'remote', 'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}},
{'score': 0.9955, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}},
{'score': 0.9988, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.9987, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
],
] , )
@require_torch
@slow
def lowerCamelCase_ ( self ) -> List[str]:
_UpperCAmelCase = 'facebook/detr-resnet-50'
_UpperCAmelCase = pipeline('object-detection' , model=snake_case )
_UpperCAmelCase = object_detector('http://images.cocodataset.org/val2017/000000039769.jpg' )
self.assertEqual(
nested_simplify(snake_case , decimals=4 ) , [
{'score': 0.9982, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}},
{'score': 0.9960, 'label': 'remote', 'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}},
{'score': 0.9955, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}},
{'score': 0.9988, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.9987, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
] , )
_UpperCAmelCase = object_detector(
[
'http://images.cocodataset.org/val2017/000000039769.jpg',
'http://images.cocodataset.org/val2017/000000039769.jpg',
] )
self.assertEqual(
nested_simplify(snake_case , decimals=4 ) , [
[
{'score': 0.9982, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}},
{'score': 0.9960, 'label': 'remote', 'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}},
{'score': 0.9955, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}},
{'score': 0.9988, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.9987, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
],
[
{'score': 0.9982, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}},
{'score': 0.9960, 'label': 'remote', 'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}},
{'score': 0.9955, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}},
{'score': 0.9988, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.9987, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
],
] , )
@require_torch
@slow
def lowerCamelCase_ ( self ) -> Tuple:
_UpperCAmelCase = 0.9985
_UpperCAmelCase = 'facebook/detr-resnet-50'
_UpperCAmelCase = pipeline('object-detection' , model=snake_case )
_UpperCAmelCase = object_detector('http://images.cocodataset.org/val2017/000000039769.jpg' , threshold=snake_case )
self.assertEqual(
nested_simplify(snake_case , decimals=4 ) , [
{'score': 0.9988, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.9987, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
] , )
@require_torch
@require_pytesseract
@slow
def lowerCamelCase_ ( self ) -> List[str]:
_UpperCAmelCase = 'Narsil/layoutlmv3-finetuned-funsd'
_UpperCAmelCase = 0.9993
_UpperCAmelCase = pipeline('object-detection' , model=snake_case , threshold=snake_case )
_UpperCAmelCase = object_detector(
'https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png' )
self.assertEqual(
nested_simplify(snake_case , decimals=4 ) , [
{'score': 0.9993, 'label': 'I-ANSWER', 'box': {'xmin': 294, 'ymin': 254, 'xmax': 343, 'ymax': 264}},
{'score': 0.9993, 'label': 'I-ANSWER', 'box': {'xmin': 294, 'ymin': 254, 'xmax': 343, 'ymax': 264}},
] , )
| 573 | 1 |
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
_lowerCAmelCase = "\\n@misc{wu2016googles,\n title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n"
_lowerCAmelCase = "\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe 'GLEU score'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore's range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n"
_lowerCAmelCase = "\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n 'google_bleu': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.4\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class lowerCAmelCase_ ( datasets.Metric ):
def UpperCamelCase_ ( self : Optional[Any] ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''string''' , id='''token''' ) , id='''sequence''' ),
'''references''': datasets.Sequence(
datasets.Sequence(datasets.Value('''string''' , id='''token''' ) , id='''sequence''' ) , id='''references''' ),
} ) , )
def UpperCamelCase_ ( self : int , _A : List[List[List[str]]] , _A : List[List[str]] , _A : int = 1 , _A : int = 4 , ):
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=_A , hypotheses=_A , min_len=_A , max_len=_A )
}
| 71 | import enum
import warnings
from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING
from ..utils import add_end_docstrings, is_tf_available
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
class lowerCAmelCase_ ( enum.Enum ):
UpperCAmelCase = 0
UpperCAmelCase = 1
UpperCAmelCase = 2
@add_end_docstrings(__lowercase )
class lowerCAmelCase_ ( __lowercase ):
UpperCAmelCase = "\n In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The\n voice of Nicholas's young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western\n Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision\n and denounces one of the men as a horse thief. Although his father initially slaps him for making such an\n accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of\n the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,\n begging for his blessing. <eod> </s> <eos>\n "
def __init__( self : Tuple , *_A : List[str] , **_A : str ):
super().__init__(*_A , **_A )
self.check_model_type(
TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == '''tf''' else MODEL_FOR_CAUSAL_LM_MAPPING )
if "prefix" not in self._preprocess_params:
# This is very specific. The logic is quite complex and needs to be done
# as a "default".
# It also defines both some preprocess_kwargs and generate_kwargs
# which is why we cannot put them in their respective methods.
_UpperCamelCase = None
if self.model.config.prefix is not None:
_UpperCamelCase = self.model.config.prefix
if prefix is None and self.model.__class__.__name__ in [
"XLNetLMHeadModel",
"TransfoXLLMHeadModel",
"TFXLNetLMHeadModel",
"TFTransfoXLLMHeadModel",
]:
# For XLNet and TransformerXL we add an article to the prompt to give more state to the model.
_UpperCamelCase = self.XL_PREFIX
if prefix is not None:
# Recalculate some generate_kwargs linked to prefix.
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = self._sanitize_parameters(prefix=_A , **self._forward_params )
_UpperCamelCase = {**self._preprocess_params, **preprocess_params}
_UpperCamelCase = {**self._forward_params, **forward_params}
def UpperCamelCase_ ( self : Dict , _A : Optional[int]=None , _A : Any=None , _A : Optional[int]=None , _A : List[str]=None , _A : List[Any]=None , _A : int=None , _A : Tuple=None , _A : Optional[Any]=None , **_A : Optional[int] , ):
_UpperCamelCase = {}
if prefix is not None:
_UpperCamelCase = prefix
if prefix:
_UpperCamelCase = self.tokenizer(
_A , padding=_A , add_special_tokens=_A , return_tensors=self.framework )
_UpperCamelCase = prefix_inputs['''input_ids'''].shape[-1]
if handle_long_generation is not None:
if handle_long_generation not in {"hole"}:
raise ValueError(
F"""{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected"""
''' [None, \'hole\']''' )
_UpperCamelCase = handle_long_generation
preprocess_params.update(_A )
_UpperCamelCase = generate_kwargs
_UpperCamelCase = {}
if return_full_text is not None and return_type is None:
if return_text is not None:
raise ValueError('''`return_text` is mutually exclusive with `return_full_text`''' )
if return_tensors is not None:
raise ValueError('''`return_full_text` is mutually exclusive with `return_tensors`''' )
_UpperCamelCase = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT
if return_tensors is not None and return_type is None:
if return_text is not None:
raise ValueError('''`return_text` is mutually exclusive with `return_tensors`''' )
_UpperCamelCase = ReturnType.TENSORS
if return_type is not None:
_UpperCamelCase = return_type
if clean_up_tokenization_spaces is not None:
_UpperCamelCase = clean_up_tokenization_spaces
if stop_sequence is not None:
_UpperCamelCase = self.tokenizer.encode(_A , add_special_tokens=_A )
if len(_A ) > 1:
warnings.warn(
'''Stopping on a multiple token sequence is not yet supported on transformers. The first token of'''
''' the stop sequence will be used as the stop sequence string in the interim.''' )
_UpperCamelCase = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def UpperCamelCase_ ( self : int , *_A : Union[str, Any] , **_A : Union[str, Any] ):
# Parse arguments
if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]:
kwargs.update({'''add_space_before_punct_symbol''': True} )
return super()._parse_and_tokenize(*_A , **_A )
def __call__( self : List[str] , _A : str , **_A : Any ):
return super().__call__(_A , **_A )
def UpperCamelCase_ ( self : Optional[Any] , _A : List[str] , _A : int="" , _A : Optional[Any]=None , **_A : Optional[Any] ):
_UpperCamelCase = self.tokenizer(
prefix + prompt_text , padding=_A , add_special_tokens=_A , return_tensors=self.framework )
_UpperCamelCase = prompt_text
if handle_long_generation == "hole":
_UpperCamelCase = inputs['''input_ids'''].shape[-1]
if "max_new_tokens" in generate_kwargs:
_UpperCamelCase = generate_kwargs['''max_new_tokens''']
else:
_UpperCamelCase = generate_kwargs.get('''max_length''' , self.model.config.max_length ) - cur_len
if new_tokens < 0:
raise ValueError('''We cannot infer how many new tokens are expected''' )
if cur_len + new_tokens > self.tokenizer.model_max_length:
_UpperCamelCase = self.tokenizer.model_max_length - new_tokens
if keep_length <= 0:
raise ValueError(
'''We cannot use `hole` to handle this generation the number of desired tokens exceeds the'''
''' models max length''' )
_UpperCamelCase = inputs['''input_ids'''][:, -keep_length:]
if "attention_mask" in inputs:
_UpperCamelCase = inputs['''attention_mask'''][:, -keep_length:]
return inputs
def UpperCamelCase_ ( self : Dict , _A : Optional[int] , **_A : str ):
_UpperCamelCase = model_inputs['''input_ids''']
_UpperCamelCase = model_inputs.get('''attention_mask''' , _A )
# Allow empty prompts
if input_ids.shape[1] == 0:
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = 1
else:
_UpperCamelCase = input_ids.shape[0]
_UpperCamelCase = model_inputs.pop('''prompt_text''' )
# If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying
# generate_kwargs, as some of the parameterization may come from the initialization of the pipeline.
_UpperCamelCase = generate_kwargs.pop('''prefix_length''' , 0 )
if prefix_length > 0:
_UpperCamelCase = '''max_new_tokens''' in generate_kwargs or (
'''generation_config''' in generate_kwargs
and generate_kwargs['''generation_config'''].max_new_tokens is not None
)
if not has_max_new_tokens:
_UpperCamelCase = generate_kwargs.get('''max_length''' ) or self.model.config.max_length
generate_kwargs["max_length"] += prefix_length
_UpperCamelCase = '''min_new_tokens''' in generate_kwargs or (
'''generation_config''' in generate_kwargs
and generate_kwargs['''generation_config'''].min_new_tokens is not None
)
if not has_min_new_tokens and "min_length" in generate_kwargs:
generate_kwargs["min_length"] += prefix_length
# BS x SL
_UpperCamelCase = self.model.generate(input_ids=_A , attention_mask=_A , **_A )
_UpperCamelCase = generated_sequence.shape[0]
if self.framework == "pt":
_UpperCamelCase = generated_sequence.reshape(_A , out_b // in_b , *generated_sequence.shape[1:] )
elif self.framework == "tf":
_UpperCamelCase = tf.reshape(_A , (in_b, out_b // in_b, *generated_sequence.shape[1:]) )
return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text}
def UpperCamelCase_ ( self : List[str] , _A : Dict , _A : Optional[Any]=ReturnType.FULL_TEXT , _A : Dict=True ):
_UpperCamelCase = model_outputs['''generated_sequence'''][0]
_UpperCamelCase = model_outputs['''input_ids''']
_UpperCamelCase = model_outputs['''prompt_text''']
_UpperCamelCase = generated_sequence.numpy().tolist()
_UpperCamelCase = []
for sequence in generated_sequence:
if return_type == ReturnType.TENSORS:
_UpperCamelCase = {'''generated_token_ids''': sequence}
elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}:
# Decode text
_UpperCamelCase = self.tokenizer.decode(
_A , skip_special_tokens=_A , clean_up_tokenization_spaces=_A , )
# Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used
if input_ids is None:
_UpperCamelCase = 0
else:
_UpperCamelCase = len(
self.tokenizer.decode(
input_ids[0] , skip_special_tokens=_A , clean_up_tokenization_spaces=_A , ) )
if return_type == ReturnType.FULL_TEXT:
_UpperCamelCase = prompt_text + text[prompt_length:]
else:
_UpperCamelCase = text[prompt_length:]
_UpperCamelCase = {'''generated_text''': all_text}
records.append(_A )
return records
| 71 | 1 |
"""simple docstring"""
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
lowerCAmelCase: List[Any] ="\nHugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf originally as a company that developed a chatbot app targeted at teenagers.[2] After open-sourcing the model behind the chatbot, the company pivoted to focus on being a platform for machine learning.\n\nIn March 2021, Hugging Face raised $40 million in a Series B funding round.[3]\n\nOn April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model.[4] In 2022, the workshop concluded with the announcement of BLOOM, a multilingual large language model with 176 billion parameters.[5]\n"
class lowerCamelCase__ ( unittest.TestCase , __UpperCamelCase ):
def _UpperCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
lowercase : Optional[Any] = load_tool("""text-question-answering""" )
self.tool.setup()
lowercase : Union[str, Any] = load_tool("""text-question-answering""" , remote=snake_case )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
lowercase : Union[str, Any] = self.tool(snake_case , """What did Hugging Face do in April 2021?""" )
self.assertEqual(snake_case , """launched the BigScience Research Workshop""" )
def _UpperCAmelCase ( self ) -> int:
"""simple docstring"""
lowercase : Optional[Any] = self.remote_tool(snake_case , """What did Hugging Face do in April 2021?""" )
self.assertEqual(snake_case , """launched the BigScience Research Workshop""" )
def _UpperCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
lowercase : List[str] = self.tool(text=snake_case , question="""What did Hugging Face do in April 2021?""" )
self.assertEqual(snake_case , """launched the BigScience Research Workshop""" )
def _UpperCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
lowercase : List[str] = self.remote_tool(text=snake_case , question="""What did Hugging Face do in April 2021?""" )
self.assertEqual(snake_case , """launched the BigScience Research Workshop""" )
| 607 |
"""simple docstring"""
import os
import random
import sys
from . import cryptomath_module as cryptoMath # noqa: N812
from . import rabin_miller as rabinMiller # noqa: N812
def __snake_case ( ) -> None:
print("""Making key files...""" )
make_key_files("""rsa""" ,1024 )
print("""Key files generation successful.""" )
def __snake_case ( __A ) -> tuple[tuple[int, int], tuple[int, int]]:
print("""Generating prime p...""" )
lowercase : int = rabinMiller.generate_large_prime(__A )
print("""Generating prime q...""" )
lowercase : Optional[int] = rabinMiller.generate_large_prime(__A )
lowercase : Optional[int] = p * q
print("""Generating e that is relatively prime to (p - 1) * (q - 1)...""" )
while True:
lowercase : List[str] = random.randrange(2 ** (key_size - 1) ,2 ** (key_size) )
if cryptoMath.gcd(__A ,(p - 1) * (q - 1) ) == 1:
break
print("""Calculating d that is mod inverse of e...""" )
lowercase : List[Any] = cryptoMath.find_mod_inverse(__A ,(p - 1) * (q - 1) )
lowercase : Any = (n, e)
lowercase : Optional[Any] = (n, d)
return (public_key, private_key)
def __snake_case ( __A ,__A ) -> None:
if os.path.exists(F'''{name}_pubkey.txt''' ) or os.path.exists(F'''{name}_privkey.txt''' ):
print("""\nWARNING:""" )
print(
F'''"{name}_pubkey.txt" or "{name}_privkey.txt" already exists. \n'''
"""Use a different name or delete these files and re-run this program.""" )
sys.exit()
lowercase , lowercase : Optional[int] = generate_key(__A )
print(F'''\nWriting public key to file {name}_pubkey.txt...''' )
with open(F'''{name}_pubkey.txt''' ,"""w""" ) as out_file:
out_file.write(F'''{key_size},{public_key[0]},{public_key[1]}''' )
print(F'''Writing private key to file {name}_privkey.txt...''' )
with open(F'''{name}_privkey.txt''' ,"""w""" ) as out_file:
out_file.write(F'''{key_size},{private_key[0]},{private_key[1]}''' )
if __name__ == "__main__":
main()
| 607 | 1 |
"""simple docstring"""
UpperCamelCase__ = '''Input must be a string of 8 numbers plus letter'''
UpperCamelCase__ = '''TRWAGMYFPDXBNJZSQVHLCKE'''
def UpperCAmelCase ( snake_case : Optional[Any] ):
if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
_lowerCAmelCase:str = F'Expected string as input, found {type(__SCREAMING_SNAKE_CASE ).__name__}'
raise TypeError(__SCREAMING_SNAKE_CASE )
_lowerCAmelCase:Union[str, Any] = spanish_id.replace('''-''' , '''''' ).upper()
if len(__SCREAMING_SNAKE_CASE ) != 9:
raise ValueError(__SCREAMING_SNAKE_CASE )
try:
_lowerCAmelCase:int = int(spanish_id_clean[0:8] )
_lowerCAmelCase:Any = spanish_id_clean[8]
except ValueError as ex:
raise ValueError(__SCREAMING_SNAKE_CASE ) from ex
if letter.isdigit():
raise ValueError(__SCREAMING_SNAKE_CASE )
return letter == LOOKUP_LETTERS[number % 23]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 701 |
"""simple docstring"""
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def UpperCAmelCase ( ):
_lowerCAmelCase:Optional[int] = ArgumentParser(
description=(
'''PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes'''
) )
# Optional arguments for the launch helper
parser.add_argument('''--num_cores''' , type=snake_case , default=1 , help='''Number of TPU cores to use (1 or 8).''' )
# positional
parser.add_argument(
'''training_script''' , type=snake_case , help=(
'''The full path to the single TPU training '''
'''program/script to be launched in parallel, '''
'''followed by all the arguments for the '''
'''training script'''
) , )
# rest from the training program
parser.add_argument('''training_script_args''' , nargs=snake_case )
return parser.parse_args()
def UpperCAmelCase ( ):
_lowerCAmelCase:int = parse_args()
# Import training_script as a module.
_lowerCAmelCase:Optional[int] = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
_lowerCAmelCase:Optional[int] = script_fpath.stem
_lowerCAmelCase:Any = importlib.import_module(snake_case )
# Patch sys.argv
_lowerCAmelCase:Dict = [args.training_script] + args.training_script_args + ['''--tpu_num_cores''', str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 439 | 0 |
'''simple docstring'''
import argparse
import json
import os
import torch
from torch import nn
from transformers import NllbMoeConfig, NllbMoeModel
from transformers.modeling_utils import dtype_byte_size
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
def a__ ( _SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = [
"encoder.version",
"decoder.version",
"model.encoder.version",
"model.decoder.version",
"decoder.output_projection.weight",
"_float_tensor",
"encoder.embed_positions._float_tensor",
"decoder.embed_positions._float_tensor",
]
for k in ignore_keys:
state_dict.pop(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def a__ ( _SCREAMING_SNAKE_CASE : Optional[Any] ) -> str:
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ : str = emb.weight.shape
UpperCAmelCase_ : List[Any] = nn.Linear(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , bias=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Tuple = emb.weight.data
return lin_layer
def a__ ( _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Optional[Any]=None ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = {}
for old_key in state_dict.keys():
UpperCAmelCase_ : Optional[int] = old_key
if "moe_layer.experts." in key:
if expert_idx is not None:
UpperCAmelCase_ : List[Any] = key.replace("moe_layer.experts.0" , F'''ffn.experts.expert_{expert_idx}''' )
else:
UpperCAmelCase_ : Tuple = key.replace("moe_layer.experts." , "ffn.experts.expert_" )
if "gate" in key:
UpperCAmelCase_ : List[Any] = key.replace(".moe_layer.gate.wg" , ".ffn.router.classifier" )
if "fc2" and "experts" not in key:
UpperCAmelCase_ : Dict = key.replace(".fc2." , ".ffn.fc2." )
if "fc1" and "experts" not in key:
UpperCAmelCase_ : List[str] = key.replace(".fc1." , ".ffn.fc1." )
if ".encoder_attn." in key:
UpperCAmelCase_ : List[str] = key.replace(".encoder_attn." , ".cross_attention." )
if "encoder_attn_layer_norm" in key:
UpperCAmelCase_ : Union[str, Any] = key.replace("encoder_attn_layer_norm" , "cross_attention_layer_norm" )
if "final_layer_norm" in key:
UpperCAmelCase_ : Any = key.replace("final_layer_norm" , "ff_layer_norm" )
UpperCAmelCase_ : Optional[Any] = state_dict[old_key]
return new_dict
def a__ ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : str = WEIGHTS_NAME ) -> str:
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = []
UpperCAmelCase_ : Optional[Any] = 0
os.makedirs(_SCREAMING_SNAKE_CASE , exist_ok=_SCREAMING_SNAKE_CASE )
for expert in range(_SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : Any = switch_checkpoint_path + F'''-rank-{expert}.pt'''
if os.path.isfile(_SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : List[Any] = torch.load(_SCREAMING_SNAKE_CASE )["model"]
remove_ignore_keys_(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[Any] = rename_fairseq_keys(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Union[str, Any] = os.path.join(
_SCREAMING_SNAKE_CASE , weights_name.replace(".bin" , F'''-{len(_SCREAMING_SNAKE_CASE )+1:05d}-of-???.bin''' ) )
torch.save(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
sharded_state_dicts.append(expert_state.keys() )
total_size += sum([value.numel() for key, value in expert_state.items()] ) * dtype_byte_size(
expert_state[list(_SCREAMING_SNAKE_CASE )[0]].dtype )
# Add the last block
UpperCAmelCase_ : Tuple = os.path.join(_SCREAMING_SNAKE_CASE , weights_name.replace(".bin" , F'''-{len(_SCREAMING_SNAKE_CASE )+1:05d}-of-???.bin''' ) )
UpperCAmelCase_ : List[str] = torch.load(switch_checkpoint_path + "-shared.pt" )["model"]
remove_ignore_keys_(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : str = rename_fairseq_keys(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[str] = shared_weights["decoder.embed_tokens.weight"]
sharded_state_dicts.append(shared_weights.keys() )
# If we only have the shared weights (dummy model/experts saved on the same file)
if len(_SCREAMING_SNAKE_CASE ) == 1:
UpperCAmelCase_ : Dict = os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
torch.save(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return {weights_name: sharded_state_dicts[0]}, None
else:
torch.save(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Otherwise, let's build the index
UpperCAmelCase_ : Optional[Any] = {}
for idx, shard in enumerate(_SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : str = weights_name.replace(".bin" , F'''-{idx+1:05d}-of-{len(_SCREAMING_SNAKE_CASE ):05d}.bin''' )
UpperCAmelCase_ : List[Any] = os.path.join(_SCREAMING_SNAKE_CASE , weights_name.replace(".bin" , F'''-{idx+1:05d}-of-???.bin''' ) )
os.rename(_SCREAMING_SNAKE_CASE , os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
for key in shard:
UpperCAmelCase_ : Tuple = shard_file
# Add the metadata
UpperCAmelCase_ : Dict = {"total_size": total_size}
UpperCAmelCase_ : List[Any] = {"metadata": metadata, "weight_map": weight_map}
with open(os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , "w" , encoding="utf-8" ) as f:
UpperCAmelCase_ : Dict = json.dumps(_SCREAMING_SNAKE_CASE , indent=2 , sort_keys=_SCREAMING_SNAKE_CASE ) + "\n"
f.write(_SCREAMING_SNAKE_CASE )
return metadata, index
if __name__ == "__main__":
_lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--nllb_moe_checkpoint_path""",
default="""/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000""",
type=str,
required=False,
help="""Path to a directory containing a folder per layer. Follows the original Google format.""",
)
parser.add_argument("""--dtype""", default="""float32""", type=str, required=False, help="""dtype of the saved model""")
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b""",
type=str,
required=False,
help="""Path to the output pytorch model.""",
)
_lowerCamelCase = parser.parse_args()
_lowerCamelCase , _lowerCamelCase = shard_on_the_fly(
args.nllb_moe_checkpoint_path,
args.pytorch_dump_folder_path,
128,
args.dtype,
)
_lowerCamelCase = NllbMoeConfig.from_pretrained(
"""facebook/nllb-200-3.3B""", encoder_sparse_step=4, decoder_sparse_step=4, num_experts=128
)
config.save_pretrained(args.pytorch_dump_folder_path)
_lowerCamelCase = NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path)
print("""Done""")
model.save_pretrained(args.pytorch_dump_folder_path)
| 71 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class lowerCamelCase_ ( lowerCamelCase ):
a__ = ['''image_processor''', '''tokenizer''']
a__ = '''ChineseCLIPImageProcessor'''
a__ = ('''BertTokenizer''', '''BertTokenizerFast''')
def __init__( self , __lowerCAmelCase=None , __lowerCAmelCase=None , **__lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :Tuple = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , __lowerCAmelCase , )
__magic_name__ :Optional[Any] = kwargs.pop('''feature_extractor''' )
__magic_name__ :Tuple = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(__lowerCAmelCase , __lowerCAmelCase )
__magic_name__ :List[Any] = self.image_processor
def __call__( self , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , **__lowerCAmelCase ):
"""simple docstring"""
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
__magic_name__ :int = self.tokenizer(__lowerCAmelCase , return_tensors=__lowerCAmelCase , **__lowerCAmelCase )
if images is not None:
__magic_name__ :Dict = self.image_processor(__lowerCAmelCase , return_tensors=__lowerCAmelCase , **__lowerCAmelCase )
if text is not None and images is not None:
__magic_name__ :Union[str, Any] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__lowerCAmelCase ) , tensor_type=__lowerCAmelCase )
def A ( self , *__lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
return self.tokenizer.batch_decode(*__lowerCAmelCase , **__lowerCAmelCase )
def A ( self , *__lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
return self.tokenizer.decode(*__lowerCAmelCase , **__lowerCAmelCase )
@property
def A ( self ):
"""simple docstring"""
__magic_name__ :List[Any] = self.tokenizer.model_input_names
__magic_name__ :Any = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def A ( self ):
"""simple docstring"""
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , __lowerCAmelCase , )
return self.image_processor_class
| 0 | 0 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
from unittest import TestCase
from transformers import BartTokenizer, BartTokenizerFast, DPRQuestionEncoderTokenizer, DPRQuestionEncoderTokenizerFast
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_tokenizers, require_torch, slow
from transformers.utils import is_datasets_available, is_faiss_available, is_torch_available
if is_torch_available() and is_datasets_available() and is_faiss_available():
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.tokenization_rag import RagTokenizer
@require_faiss
@require_torch
class a__ ( UpperCAmelCase_ ):
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = tempfile.mkdtemp()
lowercase__ = 8
# DPR tok
lowercase__ = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
lowercase__ = os.path.join(self.tmpdirname, "dpr_tokenizer" )
os.makedirs(_lowercase, exist_ok=_lowercase )
lowercase__ = os.path.join(_lowercase, DPR_VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file, "w", encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
# BART tok
lowercase__ = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
lowercase__ = dict(zip(_lowercase, range(len(_lowercase ) ) ) )
lowercase__ = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
lowercase__ = {"unk_token": "<unk>"}
lowercase__ = os.path.join(self.tmpdirname, "bart_tokenizer" )
os.makedirs(_lowercase, exist_ok=_lowercase )
lowercase__ = os.path.join(_lowercase, BART_VOCAB_FILES_NAMES["vocab_file"] )
lowercase__ = os.path.join(_lowercase, BART_VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file, "w", encoding="utf-8" ) as fp:
fp.write(json.dumps(_lowercase ) + "\n" )
with open(self.merges_file, "w", encoding="utf-8" ) as fp:
fp.write("\n".join(_lowercase ) )
def snake_case__ ( self ):
'''simple docstring'''
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname, "dpr_tokenizer" ) )
def snake_case__ ( self ):
'''simple docstring'''
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname, "bart_tokenizer" ) )
def snake_case__ ( self ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
@require_tokenizers
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = os.path.join(self.tmpdirname, "rag_tokenizer" )
lowercase__ = RagConfig(question_encoder=DPRConfig().to_dict(), generator=BartConfig().to_dict() )
lowercase__ = RagTokenizer(question_encoder=self.get_dpr_tokenizer(), generator=self.get_bart_tokenizer() )
rag_config.save_pretrained(_lowercase )
rag_tokenizer.save_pretrained(_lowercase )
lowercase__ = RagTokenizer.from_pretrained(_lowercase, config=_lowercase )
self.assertIsInstance(new_rag_tokenizer.question_encoder, _lowercase )
self.assertEqual(new_rag_tokenizer.question_encoder.get_vocab(), rag_tokenizer.question_encoder.get_vocab() )
self.assertIsInstance(new_rag_tokenizer.generator, _lowercase )
self.assertEqual(new_rag_tokenizer.generator.get_vocab(), rag_tokenizer.generator.get_vocab() )
@slow
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = RagTokenizer.from_pretrained("facebook/rag-token-nq" )
lowercase__ = [
"who got the first nobel prize in physics",
"when is the next deadpool movie being released",
"which mode is used for short wave broadcast service",
"who is the owner of reading football club",
"when is the next scandal episode coming out",
"when is the last time the philadelphia won the superbowl",
"what is the most current adobe flash player version",
"how many episodes are there in dragon ball z",
"what is the first step in the evolution of the eye",
"where is gall bladder situated in human body",
"what is the main mineral in lithium batteries",
"who is the president of usa right now",
"where do the greasers live in the outsiders",
"panda is a national animal of which country",
"what is the name of manchester united stadium",
]
lowercase__ = tokenizer(_lowercase )
self.assertIsNotNone(_lowercase )
@slow
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = RagTokenizer.from_pretrained("facebook/rag-sequence-nq" )
lowercase__ = [
"who got the first nobel prize in physics",
"when is the next deadpool movie being released",
"which mode is used for short wave broadcast service",
"who is the owner of reading football club",
"when is the next scandal episode coming out",
"when is the last time the philadelphia won the superbowl",
"what is the most current adobe flash player version",
"how many episodes are there in dragon ball z",
"what is the first step in the evolution of the eye",
"where is gall bladder situated in human body",
"what is the main mineral in lithium batteries",
"who is the president of usa right now",
"where do the greasers live in the outsiders",
"panda is a national animal of which country",
"what is the name of manchester united stadium",
]
lowercase__ = tokenizer(_lowercase )
self.assertIsNotNone(_lowercase )
| 703 | """simple docstring"""
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
lowerCAmelCase_: List[str] = 1_6
lowerCAmelCase_: Optional[Any] = 3_2
def __a ( A , A = 16 , A = "bert-base-cased" ):
'''simple docstring'''
lowercase__ = AutoTokenizer.from_pretrained(A )
lowercase__ = load_dataset("glue" , "mrpc" )
def tokenize_function(A ):
# max_length=None => use the model max length (it's actually the default)
lowercase__ = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=A , max_length=A )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
lowercase__ = datasets.map(
A , batched=A , remove_columns=["idx", "sentence1", "sentence2"] , load_from_cache_file=A )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowercase__ = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(A ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(A , padding="max_length" , max_length=1_28 , return_tensors="pt" )
return tokenizer.pad(A , padding="longest" , return_tensors="pt" )
# Instantiate dataloaders.
lowercase__ = DataLoader(
tokenized_datasets["train"] , shuffle=A , collate_fn=A , batch_size=A )
lowercase__ = DataLoader(
tokenized_datasets["validation"] , shuffle=A , collate_fn=A , batch_size=A )
return train_dataloader, eval_dataloader
def __a ( A , A ):
'''simple docstring'''
lowercase__ = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowercase__ = config["lr"]
lowercase__ = int(config["num_epochs"] )
lowercase__ = int(config["seed"] )
lowercase__ = int(config["batch_size"] )
lowercase__ = args.model_name_or_path
set_seed(A )
lowercase__ , lowercase__ = get_dataloaders(A , A , A )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowercase__ = AutoModelForSequenceClassification.from_pretrained(A , return_dict=A )
# Instantiate optimizer
lowercase__ = (
AdamW
if accelerator.state.deepspeed_plugin is None
or "optimizer" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
lowercase__ = optimizer_cls(params=model.parameters() , lr=A )
if accelerator.state.deepspeed_plugin is not None:
lowercase__ = accelerator.state.deepspeed_plugin.deepspeed_config[
"gradient_accumulation_steps"
]
else:
lowercase__ = 1
lowercase__ = (len(A ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
lowercase__ = get_linear_schedule_with_warmup(
optimizer=A , num_warmup_steps=0 , num_training_steps=A , )
else:
lowercase__ = DummyScheduler(A , total_num_steps=A , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ = accelerator.prepare(
A , A , A , A , A )
# We need to keep track of how many total steps we have iterated over
lowercase__ = 0
# We also need to keep track of the stating epoch so files are named properly
lowercase__ = 0
# Now we train the model
lowercase__ = evaluate.load("glue" , "mrpc" )
lowercase__ = 0
lowercase__ = {}
for epoch in range(A , A ):
model.train()
for step, batch in enumerate(A ):
lowercase__ = model(**A )
lowercase__ = outputs.loss
lowercase__ = loss / gradient_accumulation_steps
accelerator.backward(A )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
model.eval()
lowercase__ = 0
for step, batch in enumerate(A ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowercase__ = model(**A )
lowercase__ = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
lowercase__ , lowercase__ = accelerator.gather(
(predictions, batch["labels"]) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(A ) - 1:
lowercase__ = predictions[: len(eval_dataloader.dataset ) - samples_seen]
lowercase__ = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=A , references=A , )
lowercase__ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'''epoch {epoch}:''' , A )
lowercase__ = eval_metric["accuracy"]
if best_performance < eval_metric["accuracy"]:
lowercase__ = eval_metric["accuracy"]
if args.performance_lower_bound is not None:
assert (
args.performance_lower_bound <= best_performance
), f'''Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}'''
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , "all_results.json" ) , "w" ) as f:
json.dump(A , A )
def __a ( ):
'''simple docstring'''
lowercase__ = argparse.ArgumentParser(description="Simple example of training script tracking peak GPU memory usage." )
parser.add_argument(
"--model_name_or_path" , type=A , default="bert-base-cased" , help="Path to pretrained model or model identifier from huggingface.co/models." , required=A , )
parser.add_argument(
"--output_dir" , type=A , default="." , help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory." , )
parser.add_argument(
"--performance_lower_bound" , type=A , default=A , help="Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value." , )
parser.add_argument(
"--num_epochs" , type=A , default=3 , help="Number of train epochs." , )
lowercase__ = parser.parse_args()
lowercase__ = {"lr": 2e-5, "num_epochs": args.num_epochs, "seed": 42, "batch_size": 16}
training_function(A , A )
if __name__ == "__main__":
main()
| 668 | 0 |
'''simple docstring'''
import unittest
from huggingface_hub import hf_hub_download
from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor
from transformers.pipelines import VideoClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_decord,
require_tf,
require_torch,
require_torch_or_tf,
require_vision,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
@require_vision
@require_decord
class UpperCAmelCase ( unittest.TestCase ):
_lowercase: Union[str, Any] = MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
def lowercase__ ( self : Union[str, Any] , __snake_case : Tuple , __snake_case : Optional[Any] , __snake_case : int ) -> str:
_lowerCAmelCase = hf_hub_download(
repo_id="""nateraw/video-demo""" , filename="""archery.mp4""" , repo_type="""dataset""" )
_lowerCAmelCase = VideoClassificationPipeline(model=__snake_case , image_processor=__snake_case , top_k=2 )
_lowerCAmelCase = [
example_video_filepath,
"""https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4""",
]
return video_classifier, examples
def lowercase__ ( self : Tuple , __snake_case : Optional[Any] , __snake_case : Union[str, Any] ) -> Dict:
for example in examples:
_lowerCAmelCase = video_classifier(__snake_case )
self.assertEqual(
__snake_case , [
{"""score""": ANY(__snake_case ), """label""": ANY(__snake_case )},
{"""score""": ANY(__snake_case ), """label""": ANY(__snake_case )},
] , )
@require_torch
def lowercase__ ( self : Optional[Any] ) -> Tuple:
_lowerCAmelCase = """hf-internal-testing/tiny-random-VideoMAEForVideoClassification"""
_lowerCAmelCase = VideoMAEFeatureExtractor(
size={"""shortest_edge""": 10} , crop_size={"""height""": 10, """width""": 10} )
_lowerCAmelCase = pipeline(
"""video-classification""" , model=__snake_case , feature_extractor=__snake_case , frame_sampling_rate=4 )
_lowerCAmelCase = hf_hub_download(repo_id="""nateraw/video-demo""" , filename="""archery.mp4""" , repo_type="""dataset""" )
_lowerCAmelCase = video_classifier(__snake_case , top_k=2 )
self.assertEqual(
nested_simplify(__snake_case , decimals=4 ) , [{"""score""": 0.51_99, """label""": """LABEL_0"""}, {"""score""": 0.48_01, """label""": """LABEL_1"""}] , )
_lowerCAmelCase = video_classifier(
[
video_file_path,
video_file_path,
] , top_k=2 , )
self.assertEqual(
nested_simplify(__snake_case , decimals=4 ) , [
[{"""score""": 0.51_99, """label""": """LABEL_0"""}, {"""score""": 0.48_01, """label""": """LABEL_1"""}],
[{"""score""": 0.51_99, """label""": """LABEL_0"""}, {"""score""": 0.48_01, """label""": """LABEL_1"""}],
] , )
@require_tf
def lowercase__ ( self : Union[str, Any] ) -> Dict:
pass
| 207 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A__ : int ={
'''configuration_xmod''': [
'''XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''XmodConfig''',
'''XmodOnnxConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : List[str] =[
'''XMOD_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XmodForCausalLM''',
'''XmodForMaskedLM''',
'''XmodForMultipleChoice''',
'''XmodForQuestionAnswering''',
'''XmodForSequenceClassification''',
'''XmodForTokenClassification''',
'''XmodModel''',
'''XmodPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xmod import (
XMOD_PRETRAINED_MODEL_ARCHIVE_LIST,
XmodForCausalLM,
XmodForMaskedLM,
XmodForMultipleChoice,
XmodForQuestionAnswering,
XmodForSequenceClassification,
XmodForTokenClassification,
XmodModel,
XmodPreTrainedModel,
)
else:
import sys
A__ : str =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 207 | 1 |
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
_lowercase : int = logging.get_logger(__name__) # pylint: disable=invalid-name
_lowercase : Any = "\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline\n >>> from diffusers.utils import load_image\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... \"kandinsky-community/kandinsky-2-2-prior\", torch_dtype=torch.float16\n ... )\n >>> pipe_prior.to(\"cuda\")\n\n >>> prompt = \"A red cartoon frog, 4k\"\n >>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False)\n\n >>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained(\n ... \"kandinsky-community/kandinsky-2-2-decoder\", torch_dtype=torch.float16\n ... )\n >>> pipe.to(\"cuda\")\n\n >>> init_image = load_image(\n ... \"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main\"\n ... \"/kandinsky/frog.png\"\n ... )\n\n >>> image = pipe(\n ... image=init_image,\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=100,\n ... strength=0.2,\n ... ).images\n\n >>> image[0].save(\"red_frog.png\")\n ```\n"
def snake_case__ ( __lowerCamelCase : int , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Dict=8 ):
"""simple docstring"""
lowerCamelCase__ : List[Any] =height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
lowerCamelCase__ : List[str] =width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
def snake_case__ ( __lowerCamelCase : List[Any] , __lowerCamelCase : Any=512 , __lowerCamelCase : Optional[int]=512 ):
"""simple docstring"""
lowerCamelCase__ : Union[str, Any] =pil_image.resize((w, h) , resample=Image.BICUBIC , reducing_gap=1 )
lowerCamelCase__ : List[str] =np.array(pil_image.convert('''RGB''' ) )
lowerCamelCase__ : int =arr.astype(np.floataa ) / 127.5 - 1
lowerCamelCase__ : Optional[Any] =np.transpose(__lowerCamelCase , [2, 0, 1] )
lowerCamelCase__ : List[str] =torch.from_numpy(__lowerCamelCase ).unsqueeze(0 )
return image
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
'''simple docstring'''
def __init__( self : List[Any], lowerCamelCase : UNetaDConditionModel, lowerCamelCase : DDPMScheduler, lowerCamelCase : VQModel, )-> Optional[Any]:
super().__init__()
self.register_modules(
unet=lowerCamelCase, scheduler=lowerCamelCase, movq=lowerCamelCase, )
lowerCamelCase__ : Optional[Any] =2 ** (len(self.movq.config.block_out_channels ) - 1)
def snake_case ( self : Dict, lowerCamelCase : str, lowerCamelCase : int, lowerCamelCase : str )-> Dict:
# get the original timestep using init_timestep
lowerCamelCase__ : List[Any] =min(int(num_inference_steps * strength ), lowerCamelCase )
lowerCamelCase__ : Tuple =max(num_inference_steps - init_timestep, 0 )
lowerCamelCase__ : str =self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def snake_case ( self : Optional[int], lowerCamelCase : Dict, lowerCamelCase : int, lowerCamelCase : int, lowerCamelCase : Optional[Any], lowerCamelCase : Union[str, Any], lowerCamelCase : Any, lowerCamelCase : Optional[int]=None )-> Union[str, Any]:
if not isinstance(lowerCamelCase, (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
F'''`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(lowerCamelCase )}''' )
lowerCamelCase__ : List[str] =image.to(device=lowerCamelCase, dtype=lowerCamelCase )
lowerCamelCase__ : Dict =batch_size * num_images_per_prompt
if image.shape[1] == 4:
lowerCamelCase__ : List[str] =image
else:
if isinstance(lowerCamelCase, lowerCamelCase ) and len(lowerCamelCase ) != batch_size:
raise ValueError(
F'''You have passed a list of generators of length {len(lowerCamelCase )}, but requested an effective batch'''
F''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
elif isinstance(lowerCamelCase, lowerCamelCase ):
lowerCamelCase__ : int =[
self.movq.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(lowerCamelCase )
]
lowerCamelCase__ : List[Any] =torch.cat(lowerCamelCase, dim=0 )
else:
lowerCamelCase__ : int =self.movq.encode(lowerCamelCase ).latent_dist.sample(lowerCamelCase )
lowerCamelCase__ : Optional[Any] =self.movq.config.scaling_factor * init_latents
lowerCamelCase__ : Optional[int] =torch.cat([init_latents], dim=0 )
lowerCamelCase__ : Tuple =init_latents.shape
lowerCamelCase__ : Union[str, Any] =randn_tensor(lowerCamelCase, generator=lowerCamelCase, device=lowerCamelCase, dtype=lowerCamelCase )
# get latents
lowerCamelCase__ : Any =self.scheduler.add_noise(lowerCamelCase, lowerCamelCase, lowerCamelCase )
lowerCamelCase__ : Tuple =init_latents
return latents
def snake_case ( self : Optional[int], lowerCamelCase : int=0 )-> List[Any]:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''' )
lowerCamelCase__ : Optional[Any] =torch.device(F'''cuda:{gpu_id}''' )
lowerCamelCase__ : str =[
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(lowerCamelCase, lowerCamelCase )
def snake_case ( self : List[str], lowerCamelCase : str=0 )-> List[Any]:
if is_accelerate_available() and is_accelerate_version('''>=''', '''0.17.0.dev0''' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('''`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.''' )
lowerCamelCase__ : List[str] =torch.device(F'''cuda:{gpu_id}''' )
if self.device.type != "cpu":
self.to('''cpu''', silence_dtype_warnings=lowerCamelCase )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
lowerCamelCase__ : str =None
for cpu_offloaded_model in [self.unet, self.movq]:
lowerCamelCase__ : int =cpu_offload_with_hook(lowerCamelCase, lowerCamelCase, prev_module_hook=lowerCamelCase )
# We'll offload the last model manually.
lowerCamelCase__ : int =hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def snake_case ( self : Optional[Any] )-> List[str]:
if not hasattr(self.unet, '''_hf_hook''' ):
return self.device
for module in self.unet.modules():
if (
hasattr(lowerCamelCase, '''_hf_hook''' )
and hasattr(module._hf_hook, '''execution_device''' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(lowerCamelCase )
def __call__( self : List[str], lowerCamelCase : Union[torch.FloatTensor, List[torch.FloatTensor]], lowerCamelCase : Union[torch.FloatTensor, PIL.Image.Image, List[torch.FloatTensor], List[PIL.Image.Image]], lowerCamelCase : Union[torch.FloatTensor, List[torch.FloatTensor]], lowerCamelCase : int = 512, lowerCamelCase : int = 512, lowerCamelCase : int = 100, lowerCamelCase : float = 4.0, lowerCamelCase : float = 0.3, lowerCamelCase : int = 1, lowerCamelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None, lowerCamelCase : Optional[str] = "pil", lowerCamelCase : bool = True, )-> List[str]:
lowerCamelCase__ : Tuple =self._execution_device
lowerCamelCase__ : str =guidance_scale > 1.0
if isinstance(lowerCamelCase, lowerCamelCase ):
lowerCamelCase__ : List[str] =torch.cat(lowerCamelCase, dim=0 )
lowerCamelCase__ : Any =image_embeds.shape[0]
if isinstance(lowerCamelCase, lowerCamelCase ):
lowerCamelCase__ : int =torch.cat(lowerCamelCase, dim=0 )
if do_classifier_free_guidance:
lowerCamelCase__ : Dict =image_embeds.repeat_interleave(lowerCamelCase, dim=0 )
lowerCamelCase__ : Optional[int] =negative_image_embeds.repeat_interleave(lowerCamelCase, dim=0 )
lowerCamelCase__ : Any =torch.cat([negative_image_embeds, image_embeds], dim=0 ).to(dtype=self.unet.dtype, device=lowerCamelCase )
if not isinstance(lowerCamelCase, lowerCamelCase ):
lowerCamelCase__ : Optional[int] =[image]
if not all(isinstance(lowerCamelCase, (PIL.Image.Image, torch.Tensor) ) for i in image ):
raise ValueError(
F'''Input is in incorrect format: {[type(lowerCamelCase ) for i in image]}. Currently, we only support PIL image and pytorch tensor''' )
lowerCamelCase__ : Union[str, Any] =torch.cat([prepare_image(lowerCamelCase, lowerCamelCase, lowerCamelCase ) for i in image], dim=0 )
lowerCamelCase__ : int =image.to(dtype=image_embeds.dtype, device=lowerCamelCase )
lowerCamelCase__ : Tuple =self.movq.encode(lowerCamelCase )['''latents''']
lowerCamelCase__ : int =latents.repeat_interleave(lowerCamelCase, dim=0 )
self.scheduler.set_timesteps(lowerCamelCase, device=lowerCamelCase )
lowerCamelCase__ : Tuple =self.get_timesteps(lowerCamelCase, lowerCamelCase, lowerCamelCase )
lowerCamelCase__ : Any =timesteps[:1].repeat(batch_size * num_images_per_prompt )
lowerCamelCase__ : str =downscale_height_and_width(lowerCamelCase, lowerCamelCase, self.movq_scale_factor )
lowerCamelCase__ : Union[str, Any] =self.prepare_latents(
lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, image_embeds.dtype, lowerCamelCase, lowerCamelCase )
for i, t in enumerate(self.progress_bar(lowerCamelCase ) ):
# expand the latents if we are doing classifier free guidance
lowerCamelCase__ : Dict =torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowerCamelCase__ : Dict ={'''image_embeds''': image_embeds}
lowerCamelCase__ : int =self.unet(
sample=lowerCamelCase, timestep=lowerCamelCase, encoder_hidden_states=lowerCamelCase, added_cond_kwargs=lowerCamelCase, return_dict=lowerCamelCase, )[0]
if do_classifier_free_guidance:
lowerCamelCase__ : Dict =noise_pred.split(latents.shape[1], dim=1 )
lowerCamelCase__ : Optional[Any] =noise_pred.chunk(2 )
lowerCamelCase__ : int =variance_pred.chunk(2 )
lowerCamelCase__ : Optional[Any] =noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
lowerCamelCase__ : Union[str, Any] =torch.cat([noise_pred, variance_pred_text], dim=1 )
if not (
hasattr(self.scheduler.config, '''variance_type''' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
lowerCamelCase__ : List[str] =noise_pred.split(latents.shape[1], dim=1 )
# compute the previous noisy sample x_t -> x_t-1
lowerCamelCase__ : int =self.scheduler.step(
lowerCamelCase, lowerCamelCase, lowerCamelCase, generator=lowerCamelCase, )[0]
# post-processing
lowerCamelCase__ : Union[str, Any] =self.movq.decode(lowerCamelCase, force_not_quantize=lowerCamelCase )['''sample''']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F'''Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}''' )
if output_type in ["np", "pil"]:
lowerCamelCase__ : Dict =image * 0.5 + 0.5
lowerCamelCase__ : str =image.clamp(0, 1 )
lowerCamelCase__ : List[str] =image.cpu().permute(0, 2, 3, 1 ).float().numpy()
if output_type == "pil":
lowerCamelCase__ : Tuple =self.numpy_to_pil(lowerCamelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowerCamelCase )
| 707 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Iterator
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : str, lowerCamelCase : int )-> None:
lowerCamelCase__ : str =value
lowerCamelCase__ : Node | None =None
lowerCamelCase__ : Node | None =None
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : int, lowerCamelCase : Node )-> None:
lowerCamelCase__ : Any =tree
def snake_case ( self : str, lowerCamelCase : Node | None )-> int:
if node is None:
return 0
return node.value + (
self.depth_first_search(node.left ) + self.depth_first_search(node.right )
)
def __iter__( self : Dict )-> Iterator[int]:
yield self.depth_first_search(self.tree )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 625 | 0 |
"""simple docstring"""
import collections
import importlib.util
import os
import re
from pathlib import Path
__A : Optional[int] = 'src/transformers'
# Matches is_xxx_available()
__A : Union[str, Any] = re.compile(R'is\_([a-z_]*)_available()')
# Catches a one-line _import_struct = {xxx}
__A : Dict = re.compile(R'^_import_structure\s+=\s+\{([^\}]+)\}')
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
__A : int = re.compile(R'\s+"\S*":\s+\[([^\]]*)\]')
# Catches a line if not is_foo_available
__A : int = re.compile(R'^\s*if\s+not\s+is\_[a-z_]*\_available\(\)')
# Catches a line _import_struct["bla"].append("foo")
__A : Optional[int] = re.compile(R'^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)')
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
__A : Optional[Any] = re.compile(R'^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]')
# Catches a line with an object between quotes and a comma: "MyModel",
__A : Union[str, Any] = re.compile('^\s+"([^"]+)",')
# Catches a line with objects between brackets only: ["foo", "bar"],
__A : int = re.compile('^\s+\[([^\]]+)\]')
# Catches a line with from foo import bar, bla, boo
__A : Union[str, Any] = re.compile(R'\s+from\s+\S*\s+import\s+([^\(\s].*)\n')
# Catches a line with try:
__A : Dict = re.compile(R'^\s*try:')
# Catches a line with else:
__A : Optional[Any] = re.compile(R'^\s*else:')
def snake_case__ ( _lowerCamelCase ) ->Dict:
"""simple docstring"""
if _re_test_backend.search(_lowerCamelCase ) is None:
return None
__lowercase : Any = [b[0] for b in _re_backend.findall(_lowerCamelCase )]
backends.sort()
return "_and_".join(_lowerCamelCase )
def snake_case__ ( _lowerCamelCase ) ->List[Any]:
"""simple docstring"""
with open(_lowerCamelCase, "r", encoding="utf-8", newline="\n" ) as f:
__lowercase : Any = f.readlines()
__lowercase : List[Any] = 0
while line_index < len(_lowerCamelCase ) and not lines[line_index].startswith("_import_structure = {" ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(_lowerCamelCase ):
return None
# First grab the objects without a specific backend in _import_structure
__lowercase : Optional[Any] = []
while not lines[line_index].startswith("if TYPE_CHECKING" ) and find_backend(lines[line_index] ) is None:
__lowercase : Tuple = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(_lowerCamelCase ):
__lowercase : Union[str, Any] = _re_one_line_import_struct.search(_lowerCamelCase ).groups()[0]
__lowercase : List[Any] = re.findall("\[([^\]]+)\]", _lowerCamelCase )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(", " )] )
line_index += 1
continue
__lowercase : List[str] = _re_import_struct_key_value.search(_lowerCamelCase )
if single_line_import_search is not None:
__lowercase : Union[str, Any] = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(", " ) if len(_lowerCamelCase ) > 0]
objects.extend(_lowerCamelCase )
elif line.startswith(" " * 8 + "\"" ):
objects.append(line[9:-3] )
line_index += 1
__lowercase : Optional[Any] = {"none": objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith("if TYPE_CHECKING" ):
# If the line is an if not is_backend_available, we grab all objects associated.
__lowercase : Dict = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
__lowercase : List[str] = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
__lowercase : Union[str, Any] = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(" " * 4 ):
__lowercase : Optional[int] = lines[line_index]
if _re_import_struct_add_one.search(_lowerCamelCase ) is not None:
objects.append(_re_import_struct_add_one.search(_lowerCamelCase ).groups()[0] )
elif _re_import_struct_add_many.search(_lowerCamelCase ) is not None:
__lowercase : int = _re_import_struct_add_many.search(_lowerCamelCase ).groups()[0].split(", " )
__lowercase : List[str] = [obj[1:-1] for obj in imports if len(_lowerCamelCase ) > 0]
objects.extend(_lowerCamelCase )
elif _re_between_brackets.search(_lowerCamelCase ) is not None:
__lowercase : Dict = _re_between_brackets.search(_lowerCamelCase ).groups()[0].split(", " )
__lowercase : Optional[int] = [obj[1:-1] for obj in imports if len(_lowerCamelCase ) > 0]
objects.extend(_lowerCamelCase )
elif _re_quote_object.search(_lowerCamelCase ) is not None:
objects.append(_re_quote_object.search(_lowerCamelCase ).groups()[0] )
elif line.startswith(" " * 8 + "\"" ):
objects.append(line[9:-3] )
elif line.startswith(" " * 12 + "\"" ):
objects.append(line[13:-3] )
line_index += 1
__lowercase : int = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
__lowercase : Dict = []
while (
line_index < len(_lowerCamelCase )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith("else" )
):
__lowercase : List[Any] = lines[line_index]
__lowercase : int = _re_import.search(_lowerCamelCase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(", " ) )
elif line.startswith(" " * 8 ):
objects.append(line[8:-2] )
line_index += 1
__lowercase : Optional[int] = {"none": objects}
# Let's continue with backend-specific objects
while line_index < len(_lowerCamelCase ):
# If the line is an if is_backend_available, we grab all objects associated.
__lowercase : str = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
__lowercase : str = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
__lowercase : Any = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(" " * 8 ):
__lowercase : Union[str, Any] = lines[line_index]
__lowercase : Dict = _re_import.search(_lowerCamelCase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(", " ) )
elif line.startswith(" " * 12 ):
objects.append(line[12:-2] )
line_index += 1
__lowercase : Optional[Any] = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def snake_case__ ( _lowerCamelCase, _lowerCamelCase ) ->Union[str, Any]:
"""simple docstring"""
def find_duplicates(_lowerCamelCase ):
return [k for k, v in collections.Counter(_lowerCamelCase ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
__lowercase : Dict = []
for key in import_dict_objects.keys():
__lowercase : Dict = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(F'Duplicate _import_structure definitions for: {duplicate_imports}' )
__lowercase : int = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(F'Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}' )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
__lowercase : Union[str, Any] = "base imports" if key == "none" else F'{key} backend'
errors.append(F'Differences for {name}:' )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F' {a} in TYPE_HINT but not in _import_structure.' )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F' {a} in _import_structure but not in TYPE_HINT.' )
return errors
def snake_case__ ( ) ->Optional[Any]:
"""simple docstring"""
__lowercase : str = []
for root, _, files in os.walk(_lowerCamelCase ):
if "__init__.py" in files:
__lowercase : Union[str, Any] = os.path.join(_lowerCamelCase, "__init__.py" )
__lowercase : Any = parse_init(_lowerCamelCase )
if objects is not None:
__lowercase : Tuple = analyze_results(*_lowerCamelCase )
if len(_lowerCamelCase ) > 0:
__lowercase : Union[str, Any] = F'Problem in {fname}, both halves do not define the same objects.\n{errors[0]}'
failures.append("\n".join(_lowerCamelCase ) )
if len(_lowerCamelCase ) > 0:
raise ValueError("\n\n".join(_lowerCamelCase ) )
def snake_case__ ( ) ->Any:
"""simple docstring"""
__lowercase : List[str] = []
for path, directories, files in os.walk(_lowerCamelCase ):
for folder in directories:
# Ignore private modules
if folder.startswith("_" ):
directories.remove(_lowerCamelCase )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(_lowerCamelCase ) / folder).glob("*.py" ) ) ) == 0:
continue
__lowercase : Union[str, Any] = str((Path(_lowerCamelCase ) / folder).relative_to(_lowerCamelCase ) )
__lowercase : Any = short_path.replace(os.path.sep, "." )
submodules.append(_lowerCamelCase )
for fname in files:
if fname == "__init__.py":
continue
__lowercase : Optional[int] = str((Path(_lowerCamelCase ) / fname).relative_to(_lowerCamelCase ) )
__lowercase : Union[str, Any] = short_path.replace(".py", "" ).replace(os.path.sep, "." )
if len(submodule.split("." ) ) == 1:
submodules.append(_lowerCamelCase )
return submodules
__A : Optional[Any] = [
'convert_pytorch_checkpoint_to_tf2',
'modeling_flax_pytorch_utils',
]
def snake_case__ ( ) ->List[str]:
"""simple docstring"""
__lowercase : Optional[Any] = importlib.util.spec_from_file_location(
"transformers", os.path.join(_lowerCamelCase, "__init__.py" ), submodule_search_locations=[PATH_TO_TRANSFORMERS], )
__lowercase : Optional[Any] = spec.loader.load_module()
__lowercase : Any = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys()
]
if len(_lowerCamelCase ) > 0:
__lowercase : str = "\n".join(F'- {module}' for module in module_not_registered )
raise ValueError(
"The following submodules are not properly registered in the main init of Transformers:\n"
F'{list_of_modules}\n'
"Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value." )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 575 |
"""simple docstring"""
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
__A : Tuple = subprocess.check_output('git merge-base main HEAD'.split()).decode('utf-8')
__A : Optional[int] = subprocess.check_output(F"""git diff --name-only {fork_point_sha}""".split()).decode('utf-8').split()
__A : Any = '|'.join(sys.argv[1:])
__A : Optional[int] = re.compile(RF"""^({joined_dirs}).*?\.py$""")
__A : Tuple = [x for x in modified_files if regex.match(x)]
print(' '.join(relevant_modified_files), end='')
| 575 | 1 |
'''simple docstring'''
def lowercase__ ( __lowercase : bytes ) -> str:
"""simple docstring"""
return "".join([hex(__lowercase )[2:].zfill(2 ).upper() for byte in list(__lowercase )] )
def lowercase__ ( __lowercase : str ) -> bytes:
"""simple docstring"""
if (len(__lowercase ) % 2) != 0:
raise ValueError(
'Base16 encoded data is invalid:\nData does not have an even number of hex digits.' )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(__lowercase ) <= set('0123456789ABCDEF' ):
raise ValueError(
'Base16 encoded data is invalid:\nData is not uppercase hex or it contains invalid characters.' )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(__lowercase ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 717 |
'''simple docstring'''
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ASTConfig
from transformers.testing_utils import require_torch, require_torchaudio, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_torchaudio_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ASTForAudioClassification, ASTModel
from transformers.models.audio_spectrogram_transformer.modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_torchaudio_available():
import torchaudio
from transformers import ASTFeatureExtractor
class snake_case :
"""simple docstring"""
def __init__( self : str , __A : List[str] , __A : Optional[Any]=1_3 , __A : Any=2 , __A : List[Any]=2_4 , __A : List[str]=1_6 , __A : Tuple=True , __A : int=True , __A : Tuple=3_2 , __A : int=5 , __A : Dict=4 , __A : Any=3_7 , __A : Optional[Any]="gelu" , __A : List[Any]=0.1 , __A : str=0.1 , __A : Dict=1_0 , __A : Any=0.02 , __A : Optional[Any]=None , __A : Dict=2 , __A : Optional[int]=2 , ):
__UpperCamelCase = parent
__UpperCamelCase = batch_size
__UpperCamelCase = patch_size
__UpperCamelCase = max_length
__UpperCamelCase = num_mel_bins
__UpperCamelCase = is_training
__UpperCamelCase = use_labels
__UpperCamelCase = hidden_size
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = num_attention_heads
__UpperCamelCase = intermediate_size
__UpperCamelCase = hidden_act
__UpperCamelCase = hidden_dropout_prob
__UpperCamelCase = attention_probs_dropout_prob
__UpperCamelCase = type_sequence_label_size
__UpperCamelCase = initializer_range
__UpperCamelCase = scope
__UpperCamelCase = frequency_stride
__UpperCamelCase = time_stride
# in AST, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
__UpperCamelCase = (self.num_mel_bins - self.patch_size) // self.frequency_stride + 1
__UpperCamelCase = (self.max_length - self.patch_size) // self.time_stride + 1
__UpperCamelCase = frequency_out_dimension * time_out_dimension
__UpperCamelCase = num_patches + 2
def _lowerCamelCase ( self : int ):
__UpperCamelCase = floats_tensor([self.batch_size, self.max_length, self.num_mel_bins] )
__UpperCamelCase = None
if self.use_labels:
__UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCamelCase = self.get_config()
return config, input_values, labels
def _lowerCamelCase ( self : str ):
return ASTConfig(
patch_size=self.patch_size , max_length=self.max_length , num_mel_bins=self.num_mel_bins , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__A , initializer_range=self.initializer_range , frequency_stride=self.frequency_stride , time_stride=self.time_stride , )
def _lowerCamelCase ( self : List[str] , __A : str , __A : Dict , __A : Union[str, Any] ):
__UpperCamelCase = ASTModel(config=__A )
model.to(__A )
model.eval()
__UpperCamelCase = model(__A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCamelCase ( self : List[str] ):
__UpperCamelCase = self.prepare_config_and_inputs()
(
(
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) ,
) = config_and_inputs
__UpperCamelCase = {'input_values': input_values}
return config, inputs_dict
@require_torch
class snake_case ( __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] =(
(
ASTModel,
ASTForAudioClassification,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE_ : Optional[Any] =(
{"audio-classification": ASTForAudioClassification, "feature-extraction": ASTModel}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE_ : List[str] =False
SCREAMING_SNAKE_CASE_ : Optional[Any] =False
SCREAMING_SNAKE_CASE_ : Dict =False
SCREAMING_SNAKE_CASE_ : Dict =False
def _lowerCamelCase ( self : Dict , __A : Optional[int] , __A : Optional[int] , __A : Tuple , __A : Optional[int] , __A : Optional[Any] ):
if pipeline_test_casse_name == "AudioClassificationPipelineTests":
return True
return False
def _lowerCamelCase ( self : List[Any] ):
__UpperCamelCase = ASTModelTester(self )
__UpperCamelCase = ConfigTester(self , config_class=__A , has_text_modality=__A , hidden_size=3_7 )
def _lowerCamelCase ( self : List[str] ):
self.config_tester.run_common_tests()
@unittest.skip(reason='AST does not use inputs_embeds' )
def _lowerCamelCase ( self : int ):
pass
def _lowerCamelCase ( self : Union[str, Any] ):
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCamelCase = model_class(__A )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__UpperCamelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__A , nn.Linear ) )
def _lowerCamelCase ( self : Optional[int] ):
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCamelCase = model_class(__A )
__UpperCamelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCamelCase = [*signature.parameters.keys()]
__UpperCamelCase = ['input_values']
self.assertListEqual(arg_names[:1] , __A )
def _lowerCamelCase ( self : List[Any] ):
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A )
@slow
def _lowerCamelCase ( self : Tuple ):
for model_name in AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase = ASTModel.from_pretrained(__A )
self.assertIsNotNone(__A )
def lowercase__ ( ) -> List[Any]:
"""simple docstring"""
__UpperCamelCase = hf_hub_download(
repo_id='nielsr/audio-spectogram-transformer-checkpoint' , filename='sample_audio.flac' , repo_type='dataset' )
__UpperCamelCase , __UpperCamelCase = torchaudio.load(__lowercase )
return audio, sampling_rate
@require_torch
@require_torchaudio
class snake_case ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _lowerCamelCase ( self : Tuple ):
return (
ASTFeatureExtractor.from_pretrained('MIT/ast-finetuned-audioset-10-10-0.4593' )
if is_torchaudio_available()
else None
)
@slow
def _lowerCamelCase ( self : Union[str, Any] ):
__UpperCamelCase = self.default_feature_extractor
__UpperCamelCase = ASTForAudioClassification.from_pretrained('MIT/ast-finetuned-audioset-10-10-0.4593' ).to(__A )
__UpperCamelCase = self.default_feature_extractor
__UpperCamelCase , __UpperCamelCase = prepare_audio()
__UpperCamelCase = audio.squeeze().numpy()
__UpperCamelCase = feature_extractor(__A , sampling_rate=__A , return_tensors='pt' ).to(__A )
# forward pass
with torch.no_grad():
__UpperCamelCase = model(**__A )
# verify the logits
__UpperCamelCase = torch.Size((1, 5_2_7) )
self.assertEqual(outputs.logits.shape , __A )
__UpperCamelCase = torch.tensor([-0.8760, -7.0042, -8.6602] ).to(__A )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __A , atol=1e-4 ) )
| 434 | 0 |
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
from ....audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ....feature_extraction_sequence_utils import SequenceFeatureExtractor
from ....feature_extraction_utils import BatchFeature
from ....file_utils import PaddingStrategy, TensorType
from ....utils import logging
__A = logging.get_logger(__name__)
class snake_case ( _SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE_ : Tuple = ["input_features", "attention_mask"]
def __init__( self : Any , UpperCamelCase__ : Optional[Any]=8_0 , UpperCamelCase__ : Dict=1_6_0_0_0 , UpperCamelCase__ : Any=0.0 , UpperCamelCase__ : Union[str, Any]=1_0 , UpperCamelCase__ : List[Any]=2_5 , UpperCamelCase__ : Any="hamming_window" , UpperCamelCase__ : Tuple=3_2_7_6_8.0 , UpperCamelCase__ : int=0.97 , UpperCamelCase__ : Tuple=1.0 , UpperCamelCase__ : Dict=True , UpperCamelCase__ : str=True , UpperCamelCase__ : Any=False , **UpperCamelCase__ : str , )-> Optional[Any]:
'''simple docstring'''
super().__init__(feature_size=lowercase__ , sampling_rate=lowercase__ , padding_value=lowercase__ , **lowercase__)
__lowerCAmelCase: Union[str, Any] = feature_size
__lowerCAmelCase: str = sampling_rate
__lowerCAmelCase: int = padding_value
__lowerCAmelCase: List[str] = hop_length
__lowerCAmelCase: Optional[int] = win_length
__lowerCAmelCase: int = frame_signal_scale
__lowerCAmelCase: Any = preemphasis_coeff
__lowerCAmelCase: List[str] = mel_floor
__lowerCAmelCase: Tuple = normalize_means
__lowerCAmelCase: List[Any] = normalize_vars
__lowerCAmelCase: List[Any] = win_function
__lowerCAmelCase: Union[str, Any] = return_attention_mask
__lowerCAmelCase: Optional[Any] = win_length * sampling_rate // 1_0_0_0
__lowerCAmelCase: Tuple = hop_length * sampling_rate // 1_0_0_0
__lowerCAmelCase: Optional[int] = optimal_fft_length(self.sample_size)
__lowerCAmelCase: Union[str, Any] = (self.n_fft // 2) + 1
def lowercase_ ( self : Dict , UpperCamelCase__ : Optional[int])-> np.ndarray:
'''simple docstring'''
if self.win_function == "hamming_window":
__lowerCAmelCase: Optional[Any] = window_function(window_length=self.sample_size , name=self.win_function , periodic=lowercase__)
else:
__lowerCAmelCase: Tuple = window_function(window_length=self.sample_size , name=self.win_function)
__lowerCAmelCase: Tuple = mel_filter_bank(
num_frequency_bins=self.n_freqs , num_mel_filters=self.feature_size , min_frequency=0.0 , max_frequency=self.sampling_rate / 2.0 , sampling_rate=self.sampling_rate , )
__lowerCAmelCase: Union[str, Any] = spectrogram(
one_waveform * self.frame_signal_scale , window=lowercase__ , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , center=lowercase__ , preemphasis=self.preemphasis_coeff , mel_filters=lowercase__ , mel_floor=self.mel_floor , log_mel="log" , )
return msfc_features.T
def lowercase_ ( self : Dict , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : List[str])-> str:
'''simple docstring'''
if self.normalize_means:
__lowerCAmelCase: Any = x[:input_length].mean(axis=0)
__lowerCAmelCase: Dict = np.subtract(lowercase__ , lowercase__)
if self.normalize_vars:
__lowerCAmelCase: List[str] = x[:input_length].std(axis=0)
__lowerCAmelCase: Tuple = np.divide(lowercase__ , lowercase__)
if input_length < x.shape[0]:
__lowerCAmelCase: str = padding_value
# make sure array is in float32
__lowerCAmelCase: int = x.astype(np.floataa)
return x
def lowercase_ ( self : Tuple , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[Any] = None)-> List[np.ndarray]:
'''simple docstring'''
__lowerCAmelCase: Optional[int] = attention_mask.sum(-1) if attention_mask is not None else [x.shape[0] for x in input_features]
return [self._normalize_one(lowercase__ , lowercase__ , self.padding_value) for x, n in zip(lowercase__ , lowercase__)]
def __call__( self : Any , UpperCamelCase__ : str , UpperCamelCase__ : Any = False , UpperCamelCase__ : Optional[Any] = None , UpperCamelCase__ : str = False , UpperCamelCase__ : List[str] = None , UpperCamelCase__ : str = None , UpperCamelCase__ : int = None , UpperCamelCase__ : int = None , **UpperCamelCase__ : List[str] , )-> BatchFeature:
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"The model corresponding to this feature extractor: {self} was trained using a sampling rate of"
f" {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with"
f" {self.sampling_rate} and not {sampling_rate}.")
else:
logger.warning(
"It is strongly recommended to pass the ``sampling_rate`` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug.")
__lowerCAmelCase: Union[str, Any] = isinstance(lowercase__ , np.ndarray) and len(raw_speech.shape) > 1
if is_batched_numpy and len(raw_speech.shape) > 2:
raise ValueError(f"Only mono-channel audio is supported for input to {self}")
__lowerCAmelCase: str = is_batched_numpy or (
isinstance(lowercase__ , (list, tuple)) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list)))
)
if is_batched:
__lowerCAmelCase: Tuple = [np.asarray(lowercase__ , dtype=np.floataa) for speech in raw_speech]
elif not is_batched and not isinstance(lowercase__ , np.ndarray):
__lowerCAmelCase: str = np.asarray(lowercase__ , dtype=np.floataa)
elif isinstance(lowercase__ , np.ndarray) and raw_speech.dtype is np.dtype(np.floataa):
__lowerCAmelCase: int = raw_speech.astype(np.floataa)
# always return batch
if not is_batched:
__lowerCAmelCase: Any = [raw_speech]
# extract fbank features
__lowerCAmelCase: Dict = [self._extract_mfsc_features(lowercase__) for one_waveform in raw_speech]
# convert into correct format for padding
__lowerCAmelCase: Optional[Any] = BatchFeature({"input_features": features})
__lowerCAmelCase: int = self.pad(
lowercase__ , padding=lowercase__ , max_length=lowercase__ , truncation=lowercase__ , pad_to_multiple_of=lowercase__ , return_attention_mask=lowercase__ , **lowercase__ , )
# make sure list is in array format
__lowerCAmelCase: Tuple = padded_inputs.get("input_features")
if isinstance(input_features[0] , lowercase__):
__lowerCAmelCase: Any = [np.asarray(lowercase__ , dtype=np.floataa) for feature in input_features]
__lowerCAmelCase: int = padded_inputs.get("attention_mask")
if attention_mask is not None:
__lowerCAmelCase: int = [np.asarray(lowercase__ , dtype=np.intaa) for array in attention_mask]
if self.normalize_means or self.normalize_vars:
__lowerCAmelCase: Dict = (
np.array(lowercase__ , dtype=np.intaa)
if self._get_padding_strategies(lowercase__ , max_length=lowercase__) is not PaddingStrategy.DO_NOT_PAD
and padding
else None
)
__lowerCAmelCase: Union[str, Any] = self.normalize(
padded_inputs["input_features"] , attention_mask=lowercase__)
if return_tensors is not None:
__lowerCAmelCase: int = padded_inputs.convert_to_tensors(lowercase__)
return padded_inputs
| 346 | '''simple docstring'''
import unittest
from diffusers import FlaxAutoencoderKL
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax
from .test_modeling_common_flax import FlaxModelTesterMixin
if is_flax_available():
import jax
@require_flax
class UpperCAmelCase ( _SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
snake_case__ : Tuple = FlaxAutoencoderKL
@property
def _UpperCamelCase ( self ) -> int:
SCREAMING_SNAKE_CASE : Union[str, Any] = 4
SCREAMING_SNAKE_CASE : Tuple = 3
SCREAMING_SNAKE_CASE : str = (32, 32)
SCREAMING_SNAKE_CASE : Optional[Any] = jax.random.PRNGKey(0 )
SCREAMING_SNAKE_CASE : Dict = jax.random.uniform(lowercase__ , ((batch_size, num_channels) + sizes) )
return {"sample": image, "prng_key": prng_key}
def _UpperCamelCase ( self ) -> Tuple:
SCREAMING_SNAKE_CASE : Optional[int] = {
'block_out_channels': [32, 64],
'in_channels': 3,
'out_channels': 3,
'down_block_types': ['DownEncoderBlock2D', 'DownEncoderBlock2D'],
'up_block_types': ['UpDecoderBlock2D', 'UpDecoderBlock2D'],
'latent_channels': 4,
}
SCREAMING_SNAKE_CASE : Optional[int] = self.dummy_input
return init_dict, inputs_dict
| 251 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
a : Any = {
"""configuration_squeezebert""": [
"""SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""SqueezeBertConfig""",
"""SqueezeBertOnnxConfig""",
],
"""tokenization_squeezebert""": ["""SqueezeBertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : int = ["""SqueezeBertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Optional[Any] = [
"""SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""SqueezeBertForMaskedLM""",
"""SqueezeBertForMultipleChoice""",
"""SqueezeBertForQuestionAnswering""",
"""SqueezeBertForSequenceClassification""",
"""SqueezeBertForTokenClassification""",
"""SqueezeBertModel""",
"""SqueezeBertModule""",
"""SqueezeBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_squeezebert import (
SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
SqueezeBertConfig,
SqueezeBertOnnxConfig,
)
from .tokenization_squeezebert import SqueezeBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_squeezebert import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
SqueezeBertModule,
SqueezeBertPreTrainedModel,
)
else:
import sys
a : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 85 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import _LazyModule
a : List[str] = {"""processing_wav2vec2_with_lm""": ["""Wav2Vec2ProcessorWithLM"""]}
if TYPE_CHECKING:
from .processing_wavaveca_with_lm import WavaVecaProcessorWithLM
else:
import sys
a : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 85 | 1 |
import argparse
SCREAMING_SNAKE_CASE :List[str] = '''docs/source/_static/js/custom.js'''
def _lowerCAmelCase ( lowerCAmelCase_ :str )->List[str]:
'''simple docstring'''
with open(lowerCAmelCase_ , encoding="utf-8" , newline="\n" ) as f:
snake_case_ = f.readlines()
snake_case_ = 0
# First let's put the right version
while not lines[index].startswith("const stableVersion =" ):
index += 1
snake_case_ = F'''const stableVersion = \"v{version}\"\n'''
# Then update the dictionary
while not lines[index].startswith("const versionMapping = {" ):
index += 1
# We go until the end
while not lines[index].startswith("}" ):
index += 1
# We add the new version at the end
lines[index - 1] += F''' \"v{version}\": \"v{version}\",\n'''
with open(lowerCAmelCase_ , "w" , encoding="utf-8" , newline="\n" ) as f:
f.writelines(lowerCAmelCase_ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE :Optional[int] = argparse.ArgumentParser()
parser.add_argument('''--version''', help='''Release version.''')
SCREAMING_SNAKE_CASE :List[Any] = parser.parse_args()
update_custom_js(args.version)
| 283 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
"""naver-clova-ix/donut-base""": """https://huggingface.co/naver-clova-ix/donut-base/resolve/main/config.json""",
# See all Donut models at https://huggingface.co/models?filter=donut-swin
}
class _lowerCAmelCase ( _lowercase ):
A__ = 'donut-swin'
A__ = {
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self , __UpperCAmelCase=224 , __UpperCAmelCase=4 , __UpperCAmelCase=3 , __UpperCAmelCase=96 , __UpperCAmelCase=[2, 2, 6, 2] , __UpperCAmelCase=[3, 6, 12, 24] , __UpperCAmelCase=7 , __UpperCAmelCase=4.0 , __UpperCAmelCase=True , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.1 , __UpperCAmelCase="gelu" , __UpperCAmelCase=False , __UpperCAmelCase=0.02 , __UpperCAmelCase=1e-5 , **__UpperCAmelCase , ):
super().__init__(**__UpperCAmelCase )
lowerCAmelCase__ : Union[str, Any] = image_size
lowerCAmelCase__ : List[str] = patch_size
lowerCAmelCase__ : int = num_channels
lowerCAmelCase__ : Optional[Any] = embed_dim
lowerCAmelCase__ : int = depths
lowerCAmelCase__ : Dict = len(__UpperCAmelCase )
lowerCAmelCase__ : Union[str, Any] = num_heads
lowerCAmelCase__ : Dict = window_size
lowerCAmelCase__ : str = mlp_ratio
lowerCAmelCase__ : Optional[int] = qkv_bias
lowerCAmelCase__ : Any = hidden_dropout_prob
lowerCAmelCase__ : Union[str, Any] = attention_probs_dropout_prob
lowerCAmelCase__ : List[str] = drop_path_rate
lowerCAmelCase__ : Tuple = hidden_act
lowerCAmelCase__ : List[str] = use_absolute_embeddings
lowerCAmelCase__ : Dict = layer_norm_eps
lowerCAmelCase__ : Any = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
lowerCAmelCase__ : List[Any] = int(embed_dim * 2 ** (len(__UpperCAmelCase ) - 1) )
| 678 | 0 |
import json
import os
import tempfile
import transformers
import datasets
from utils import generate_example_dataset, get_duration
A = 5_0_0_0_0_0
A ,A = os.path.split(__file__)
A = os.path.join(RESULTS_BASEPATH, 'results', RESULTS_FILENAME.replace('.py', '.json'))
@get_duration
def lowerCamelCase ( UpperCamelCase : datasets.Dataset , **UpperCamelCase : int ) -> Optional[Any]:
_lowerCamelCase = dataset.map(**UpperCamelCase )
@get_duration
def lowerCamelCase ( UpperCamelCase : datasets.Dataset , **UpperCamelCase : int ) -> List[str]:
_lowerCamelCase = dataset.filter(**UpperCamelCase )
def lowerCamelCase ( ) -> Tuple:
_lowerCamelCase = {'num examples': SPEED_TEST_N_EXAMPLES}
with tempfile.TemporaryDirectory() as tmp_dir:
_lowerCamelCase = datasets.Features({'text': datasets.Value('string' ), 'numbers': datasets.Value('float32' )} )
_lowerCamelCase = generate_example_dataset(
os.path.join(UpperCamelCase , 'dataset.arrow' ) , UpperCamelCase , num_examples=UpperCamelCase )
_lowerCamelCase = transformers.AutoTokenizer.from_pretrained('bert-base-cased' , use_fast=UpperCamelCase )
def tokenize(UpperCamelCase : Any ):
return tokenizer(examples['text'] )
_lowerCamelCase = map(UpperCamelCase )
_lowerCamelCase = map(UpperCamelCase , batched=UpperCamelCase )
_lowerCamelCase = map(UpperCamelCase , function=lambda UpperCamelCase : None , batched=UpperCamelCase )
with dataset.formatted_as(type='numpy' ):
_lowerCamelCase = map(UpperCamelCase , function=lambda UpperCamelCase : None , batched=UpperCamelCase )
with dataset.formatted_as(type='pandas' ):
_lowerCamelCase = map(UpperCamelCase , function=lambda UpperCamelCase : None , batched=UpperCamelCase )
with dataset.formatted_as(type='torch' , columns='numbers' ):
_lowerCamelCase = map(UpperCamelCase , function=lambda UpperCamelCase : None , batched=UpperCamelCase )
with dataset.formatted_as(type='tensorflow' , columns='numbers' ):
_lowerCamelCase = map(UpperCamelCase , function=lambda UpperCamelCase : None , batched=UpperCamelCase )
_lowerCamelCase = map(UpperCamelCase , function=UpperCamelCase , batched=UpperCamelCase )
_lowerCamelCase = filter(UpperCamelCase )
# Activate later when tokenizer support batched inputs
# with dataset.formatted_as(type='numpy'):
# times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True)
with open(UpperCamelCase , 'wb' ) as f:
f.write(json.dumps(UpperCamelCase ).encode('utf-8' ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_map_filter() | 234 | from .data_collator import (
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForSeqaSeq,
DataCollatorForSOP,
DataCollatorForTokenClassification,
DataCollatorForWholeWordMask,
DataCollatorWithPadding,
DefaultDataCollator,
default_data_collator,
)
from .metrics import glue_compute_metrics, xnli_compute_metrics
from .processors import (
DataProcessor,
InputExample,
InputFeatures,
SingleSentenceClassificationProcessor,
SquadExample,
SquadFeatures,
SquadVaProcessor,
SquadVaProcessor,
glue_convert_examples_to_features,
glue_output_modes,
glue_processors,
glue_tasks_num_labels,
squad_convert_examples_to_features,
xnli_output_modes,
xnli_processors,
xnli_tasks_num_labels,
) | 234 | 1 |
def UpperCAmelCase__( __UpperCAmelCase : str ):
return "".join(chr(ord(__UpperCAmelCase ) - 32 ) if 'a' <= char <= 'z' else char for char in word )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 576 | import math
import unittest
from transformers import BioGptConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptTokenizer,
)
from transformers.models.biogpt.modeling_biogpt import BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=7 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase=True , _UpperCAmelCase=99 , _UpperCAmelCase=32 , _UpperCAmelCase=5 , _UpperCAmelCase=4 , _UpperCAmelCase=37 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=16 , _UpperCAmelCase=2 , _UpperCAmelCase=0.02 , _UpperCAmelCase=3 , _UpperCAmelCase=4 , _UpperCAmelCase=None , ):
__snake_case : str = parent
__snake_case : List[Any] = batch_size
__snake_case : int = seq_length
__snake_case : Any = is_training
__snake_case : Tuple = use_input_mask
__snake_case : Optional[Any] = use_token_type_ids
__snake_case : List[Any] = use_labels
__snake_case : Optional[int] = vocab_size
__snake_case : Optional[Any] = hidden_size
__snake_case : Dict = num_hidden_layers
__snake_case : Optional[Any] = num_attention_heads
__snake_case : int = intermediate_size
__snake_case : str = hidden_act
__snake_case : Optional[int] = hidden_dropout_prob
__snake_case : Dict = attention_probs_dropout_prob
__snake_case : Tuple = max_position_embeddings
__snake_case : Tuple = type_vocab_size
__snake_case : Dict = type_sequence_label_size
__snake_case : Union[str, Any] = initializer_range
__snake_case : List[Any] = num_labels
__snake_case : Dict = num_choices
__snake_case : List[Any] = scope
def lowercase_ ( self ):
__snake_case : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__snake_case : List[Any] = None
if self.use_input_mask:
__snake_case : Any = random_attention_mask([self.batch_size, self.seq_length] )
__snake_case : Any = None
if self.use_token_type_ids:
__snake_case : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__snake_case : List[str] = None
__snake_case : Dict = None
__snake_case : int = None
if self.use_labels:
__snake_case : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__snake_case : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__snake_case : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices )
__snake_case : Union[str, Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase_ ( self ):
return BioGptConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_UpperCAmelCase , initializer_range=self.initializer_range , )
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__snake_case : Optional[Any] = BioGptModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__snake_case : Dict = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase )
__snake_case : Optional[Any] = model(_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ):
__snake_case : List[Any] = BioGptForCausalLM(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__snake_case : Tuple = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , *_UpperCAmelCase ):
__snake_case : str = BioGptModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
# create attention mask
__snake_case : Any = torch.ones(input_ids.shape , dtype=torch.long , device=_UpperCAmelCase )
__snake_case : Any = self.seq_length // 2
__snake_case : Any = 0
# first forward pass
__snake_case , __snake_case : List[str] = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase ).to_tuple()
# create hypothetical next token and extent to next_input_ids
__snake_case : Optional[int] = ids_tensor((self.batch_size, 1) , config.vocab_size )
# change a random masked slice from input_ids
__snake_case : int = ids_tensor((1,) , _UpperCAmelCase ).item() + 1
__snake_case : int = ids_tensor((self.batch_size, 1) , config.vocab_size ).squeeze(-1 )
__snake_case : List[Any] = random_other_next_tokens
# append to next input_ids and attn_mask
__snake_case : Tuple = torch.cat([input_ids, next_tokens] , dim=-1 )
__snake_case : List[Any] = torch.cat(
[attn_mask, torch.ones((attn_mask.shape[0], 1) , dtype=torch.long , device=_UpperCAmelCase )] , dim=1 , )
# get two different outputs
__snake_case : str = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase )['last_hidden_state']
__snake_case : str = model(_UpperCAmelCase , past_key_values=_UpperCAmelCase , attention_mask=_UpperCAmelCase )['last_hidden_state']
# select random slice
__snake_case : List[Any] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__snake_case : List[Any] = output_from_no_past[:, -1, random_slice_idx].detach()
__snake_case : Tuple = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_UpperCAmelCase , _UpperCAmelCase , atol=1E-3 ) )
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , *_UpperCAmelCase ):
__snake_case : Tuple = BioGptModel(config=_UpperCAmelCase ).to(_UpperCAmelCase ).eval()
__snake_case : List[Any] = torch.ones(input_ids.shape , dtype=torch.long , device=_UpperCAmelCase )
# first forward pass
__snake_case : Union[str, Any] = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , use_cache=_UpperCAmelCase )
__snake_case , __snake_case : Union[str, Any] = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
__snake_case : str = ids_tensor((self.batch_size, 3) , config.vocab_size )
__snake_case : str = ids_tensor((self.batch_size, 3) , 2 )
# append to next input_ids and
__snake_case : Tuple = torch.cat([input_ids, next_tokens] , dim=-1 )
__snake_case : Tuple = torch.cat([attention_mask, next_attn_mask] , dim=-1 )
__snake_case : str = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase )['last_hidden_state']
__snake_case : Optional[Any] = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , past_key_values=_UpperCAmelCase )[
'last_hidden_state'
]
# select random slice
__snake_case : Optional[int] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__snake_case : Optional[Any] = output_from_no_past[:, -3:, random_slice_idx].detach()
__snake_case : Union[str, Any] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_UpperCAmelCase , _UpperCAmelCase , atol=1E-3 ) )
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , *_UpperCAmelCase , _UpperCAmelCase=False ):
__snake_case : Tuple = BioGptForCausalLM(_UpperCAmelCase )
model.to(_UpperCAmelCase )
if gradient_checkpointing:
model.gradient_checkpointing_enable()
__snake_case : List[Any] = model(_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
result.loss.backward()
def lowercase_ ( self , _UpperCAmelCase , *_UpperCAmelCase ):
__snake_case : Tuple = BioGptModel(_UpperCAmelCase )
__snake_case : Optional[int] = model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers )
for key in model.state_dict().keys():
if "c_proj" in key and "weight" in key:
self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key] ) - model_std ) , 0.001 )
self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key] ) - 0.0 ) , 0.01 )
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , *_UpperCAmelCase ):
__snake_case : Dict = self.num_labels
__snake_case : int = BioGptForTokenClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__snake_case : Optional[int] = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase_ ( self ):
__snake_case : str = self.prepare_config_and_inputs()
(
(
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) ,
) : List[Any] = config_and_inputs
__snake_case : Tuple = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase , unittest.TestCase):
"""simple docstring"""
__UpperCAmelCase = (
(BioGptModel, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification)
if is_torch_available()
else ()
)
__UpperCAmelCase = (BioGptForCausalLM,) if is_torch_available() else ()
__UpperCAmelCase = (
{
"feature-extraction": BioGptModel,
"text-classification": BioGptForSequenceClassification,
"text-generation": BioGptForCausalLM,
"token-classification": BioGptForTokenClassification,
"zero-shot": BioGptForSequenceClassification,
}
if is_torch_available()
else {}
)
__UpperCAmelCase = False
def lowercase_ ( self ):
__snake_case : List[str] = BioGptModelTester(self )
__snake_case : Any = ConfigTester(self , config_class=_UpperCAmelCase , hidden_size=37 )
def lowercase_ ( self ):
self.config_tester.run_common_tests()
def lowercase_ ( self ):
__snake_case : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def lowercase_ ( self ):
__snake_case : List[Any] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__snake_case : Union[str, Any] = type
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def lowercase_ ( self ):
__snake_case : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_attention_mask_past(*_UpperCAmelCase )
def lowercase_ ( self ):
__snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_forward_and_backwards(*_UpperCAmelCase , gradient_checkpointing=_UpperCAmelCase )
def lowercase_ ( self ):
__snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_past_large_inputs(*_UpperCAmelCase )
def lowercase_ ( self ):
__snake_case : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_weight_initialization(*_UpperCAmelCase )
def lowercase_ ( self ):
__snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_for_token_classification(*_UpperCAmelCase )
@slow
def lowercase_ ( self ):
__snake_case : str = BioGptForCausalLM.from_pretrained('microsoft/biogpt' )
model.to(_UpperCAmelCase )
__snake_case : int = BioGptTokenizer.from_pretrained('microsoft/biogpt' )
__snake_case : Dict = 'left'
# Define PAD Token = EOS Token = 50256
__snake_case : Dict = tokenizer.eos_token
__snake_case : Optional[int] = model.config.eos_token_id
# use different length sentences to test batching
__snake_case : Any = [
'Hello, my dog is a little',
'Today, I',
]
__snake_case : Optional[Any] = tokenizer(_UpperCAmelCase , return_tensors='pt' , padding=_UpperCAmelCase )
__snake_case : Optional[Any] = inputs['input_ids'].to(_UpperCAmelCase )
__snake_case : str = model.generate(
input_ids=_UpperCAmelCase , attention_mask=inputs['attention_mask'].to(_UpperCAmelCase ) , )
__snake_case : List[Any] = tokenizer(sentences[0] , return_tensors='pt' ).input_ids.to(_UpperCAmelCase )
__snake_case : List[str] = model.generate(input_ids=_UpperCAmelCase )
__snake_case : Optional[Any] = inputs_non_padded.shape[-1] - inputs['attention_mask'][-1].long().sum().cpu().item()
__snake_case : Tuple = tokenizer(sentences[1] , return_tensors='pt' ).input_ids.to(_UpperCAmelCase )
__snake_case : Dict = model.generate(input_ids=_UpperCAmelCase , max_length=model.config.max_length - num_paddings )
__snake_case : Any = tokenizer.batch_decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase )
__snake_case : List[str] = tokenizer.decode(output_non_padded[0] , skip_special_tokens=_UpperCAmelCase )
__snake_case : Union[str, Any] = tokenizer.decode(output_padded[0] , skip_special_tokens=_UpperCAmelCase )
__snake_case : str = [
'Hello, my dog is a little bit bigger than a little bit.',
'Today, I have a good idea of how to use the information',
]
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , [non_padded_sentence, padded_sentence] )
@slow
def lowercase_ ( self ):
for model_name in BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case : Tuple = BioGptModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
def lowercase_ ( self ):
__snake_case , __snake_case : Dict = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case : List[str] = 3
__snake_case : Union[str, Any] = input_dict['input_ids']
__snake_case : Tuple = input_ids.ne(1 ).to(_UpperCAmelCase )
__snake_case : Any = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
__snake_case : Optional[Any] = BioGptForSequenceClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__snake_case : Tuple = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , labels=_UpperCAmelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def lowercase_ ( self ):
__snake_case , __snake_case : int = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case : Tuple = 3
__snake_case : Any = 'multi_label_classification'
__snake_case : Tuple = input_dict['input_ids']
__snake_case : List[Any] = input_ids.ne(1 ).to(_UpperCAmelCase )
__snake_case : Dict = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
__snake_case : str = BioGptForSequenceClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__snake_case : Dict = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , labels=_UpperCAmelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@require_torch
class __SCREAMING_SNAKE_CASE ( unittest.TestCase):
"""simple docstring"""
@slow
def lowercase_ ( self ):
__snake_case : str = BioGptForCausalLM.from_pretrained('microsoft/biogpt' )
__snake_case : Tuple = torch.tensor([[2, 4_805, 9, 656, 21]] )
__snake_case : Any = model(_UpperCAmelCase )[0]
__snake_case : List[Any] = 42_384
__snake_case : Any = torch.Size((1, 5, vocab_size) )
self.assertEqual(output.shape , _UpperCAmelCase )
__snake_case : Dict = torch.tensor(
[[[-9.5236, -9.8918, 10.4557], [-11.0469, -9.6423, 8.1022], [-8.8664, -7.8826, 5.5325]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _UpperCAmelCase , atol=1E-4 ) )
@slow
def lowercase_ ( self ):
__snake_case : List[str] = BioGptTokenizer.from_pretrained('microsoft/biogpt' )
__snake_case : Union[str, Any] = BioGptForCausalLM.from_pretrained('microsoft/biogpt' )
model.to(_UpperCAmelCase )
torch.manual_seed(0 )
__snake_case : Tuple = tokenizer('COVID-19 is' , return_tensors='pt' ).to(_UpperCAmelCase )
__snake_case : Tuple = model.generate(
**_UpperCAmelCase , min_length=100 , max_length=1_024 , num_beams=5 , early_stopping=_UpperCAmelCase , )
__snake_case : Tuple = tokenizer.decode(output_ids[0] , skip_special_tokens=_UpperCAmelCase )
__snake_case : str = (
'COVID-19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), the'
' causative agent of coronavirus disease 2019 (COVID-19), which has spread to more than 200 countries and'
' territories, including the United States (US), Canada, Australia, New Zealand, the United Kingdom (UK),'
' and the United States of America (USA), as of March 11, 2020, with more than 800,000 confirmed cases and'
' more than 800,000 deaths.'
)
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
| 576 | 1 |
"""simple docstring"""
from __future__ import annotations
def snake_case ( UpperCamelCase__ : int ) -> bool:
lowerCamelCase : Dict = str(UpperCamelCase__ )
return len(UpperCamelCase__ ) == 9 and set(UpperCamelCase__ ) == set("""123456789""" )
def snake_case ( ) -> int | None:
for base_num in range(9999 , 4999 , -1 ):
lowerCamelCase : List[Any] = 100002 * base_num
if is_9_pandigital(UpperCamelCase__ ):
return candidate
for base_num in range(333 , 99 , -1 ):
lowerCamelCase : Tuple = 1002003 * base_num
if is_9_pandigital(UpperCamelCase__ ):
return candidate
return None
if __name__ == "__main__":
print(f"""{solution() = }""")
| 42 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DiffusionPipeline,
EulerDiscreteScheduler,
StableDiffusionXLImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.utils import floats_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class A__ ( __lowercase , __lowercase , unittest.TestCase):
"""simple docstring"""
snake_case__ : str =StableDiffusionXLImgaImgPipeline
snake_case__ : Any =TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width'''}
snake_case__ : Optional[int] =PipelineTesterMixin.required_optional_params - {'''latents'''}
snake_case__ : Dict =TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
snake_case__ : Tuple =IMAGE_TO_IMAGE_IMAGE_PARAMS
snake_case__ : List[str] =IMAGE_TO_IMAGE_IMAGE_PARAMS
def a__ ( self: List[str] )-> int:
torch.manual_seed(0 )
lowerCamelCase : Any = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , attention_head_dim=(2, 4) , use_linear_projection=__a , addition_embed_type="""text_time""" , addition_time_embed_dim=8 , transformer_layers_per_block=(1, 2) , projection_class_embeddings_input_dim=80 , cross_attention_dim=64 , )
lowerCamelCase : Any = EulerDiscreteScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , steps_offset=1 , beta_schedule="""scaled_linear""" , timestep_spacing="""leading""" , )
torch.manual_seed(0 )
lowerCamelCase : Any = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
lowerCamelCase : Optional[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act="""gelu""" , projection_dim=32 , )
lowerCamelCase : Dict = CLIPTextModel(__a )
lowerCamelCase : Union[str, Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" , local_files_only=__a )
lowerCamelCase : Dict = CLIPTextModelWithProjection(__a )
lowerCamelCase : Optional[int] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" , local_files_only=__a )
lowerCamelCase : str = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""text_encoder_2""": text_encoder_a,
"""tokenizer_2""": tokenizer_a,
# "safety_checker": None,
# "feature_extractor": None,
}
return components
def a__ ( self: Any , __a: str , __a: Tuple=0 )-> Union[str, Any]:
lowerCamelCase : List[Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(__a ) ).to(__a )
lowerCamelCase : Any = image / 2 + 0.5
if str(__a ).startswith("""mps""" ):
lowerCamelCase : Dict = torch.manual_seed(__a )
else:
lowerCamelCase : Tuple = torch.Generator(device=__a ).manual_seed(__a )
lowerCamelCase : Tuple = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 5.0,
"""output_type""": """numpy""",
"""strength""": 0.75,
}
return inputs
def a__ ( self: Dict )-> Optional[Any]:
lowerCamelCase : Any = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowerCamelCase : Union[str, Any] = self.get_dummy_components()
lowerCamelCase : Optional[int] = StableDiffusionXLImgaImgPipeline(**__a )
lowerCamelCase : int = sd_pipe.to(__a )
sd_pipe.set_progress_bar_config(disable=__a )
lowerCamelCase : Optional[Any] = self.get_dummy_inputs(__a )
lowerCamelCase : Optional[int] = sd_pipe(**__a ).images
lowerCamelCase : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowerCamelCase : Any = np.array([0.46_56, 0.48_40, 0.44_39, 0.66_98, 0.55_74, 0.45_24, 0.57_99, 0.59_43, 0.51_65] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def a__ ( self: Optional[int] )-> Union[str, Any]:
super().test_attention_slicing_forward_pass(expected_max_diff=3e-3 )
def a__ ( self: Optional[Any] )-> str:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def a__ ( self: List[str] )-> Optional[Any]:
pass
def a__ ( self: List[Any] )-> Union[str, Any]:
lowerCamelCase : Tuple = self.get_dummy_components()
lowerCamelCase : Union[str, Any] = StableDiffusionXLImgaImgPipeline(**__a )
lowerCamelCase : str = sd_pipe.to(__a )
lowerCamelCase : Any = sd_pipe.to(__a )
sd_pipe.set_progress_bar_config(disable=__a )
# forward without prompt embeds
lowerCamelCase : Dict = self.get_dummy_inputs(__a )
lowerCamelCase : Any = 3 * ["""this is a negative prompt"""]
lowerCamelCase : Optional[int] = negative_prompt
lowerCamelCase : Tuple = 3 * [inputs["""prompt"""]]
lowerCamelCase : List[Any] = sd_pipe(**__a )
lowerCamelCase : Optional[int] = output.images[0, -3:, -3:, -1]
# forward with prompt embeds
lowerCamelCase : Tuple = self.get_dummy_inputs(__a )
lowerCamelCase : List[Any] = 3 * ["""this is a negative prompt"""]
lowerCamelCase : Tuple = 3 * [inputs.pop("""prompt""" )]
(
(
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) ,
) : Union[str, Any] = sd_pipe.encode_prompt(__a , negative_prompt=__a )
lowerCamelCase : int = sd_pipe(
**__a , prompt_embeds=__a , negative_prompt_embeds=__a , pooled_prompt_embeds=__a , negative_pooled_prompt_embeds=__a , )
lowerCamelCase : Union[str, Any] = output.images[0, -3:, -3:, -1]
# make sure that it's equal
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1e-4
@slow
@require_torch_gpu
class A__ ( unittest.TestCase):
"""simple docstring"""
def a__ ( self: Dict )-> str:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a__ ( self: Union[str, Any] , __a: Any , __a: Any="cpu" , __a: str=torch.floataa , __a: Any=0 )-> Optional[Any]:
lowerCamelCase : Optional[Any] = torch.Generator(device=__a ).manual_seed(__a )
lowerCamelCase : List[Any] = np.random.RandomState(__a ).standard_normal((1, 4, 64, 64) )
lowerCamelCase : List[str] = torch.from_numpy(__a ).to(device=__a , dtype=__a )
lowerCamelCase : int = {
"""prompt""": """a photograph of an astronaut riding a horse""",
"""latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def a__ ( self: Optional[int] )-> List[str]:
lowerCamelCase : Tuple = DiffusionPipeline.from_pretrained("""stabilityai/stable-diffusion-2-base""" )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
lowerCamelCase : Optional[int] = self.get_inputs(__a )
lowerCamelCase : Optional[Any] = pipe(**__a ).images
lowerCamelCase : Dict = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
lowerCamelCase : List[str] = np.array([0.4_94_93, 0.4_78_96, 0.4_07_98, 0.5_42_14, 0.5_32_12, 0.4_82_02, 0.4_76_56, 0.4_63_29, 0.4_85_06] )
assert np.abs(image_slice - expected_slice ).max() < 7e-3
| 42 | 1 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm import create_model
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import BitConfig, BitForImageClassification, BitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
lowerCAmelCase = """huggingface/label-files"""
lowerCAmelCase = """imagenet-1k-id2label.json"""
lowerCAmelCase = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , repo_type="""dataset""" ) , """r""" ) )
lowerCAmelCase = {int(SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
lowerCAmelCase = {v: k for k, v in idalabel.items()}
lowerCAmelCase = """std_conv""" if """bit""" in model_name else False
# note that when using BiT as backbone for ViT-hybrid checkpoints,
# one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same",
# config.conv_layer = "std_conv_same"
lowerCAmelCase = BitConfig(
conv_layer=SCREAMING_SNAKE_CASE , num_labels=10_00 , idalabel=SCREAMING_SNAKE_CASE , labelaid=SCREAMING_SNAKE_CASE , )
return config
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
if "stem.conv" in name:
lowerCAmelCase = name.replace("""stem.conv""" , """bit.embedder.convolution""" )
if "blocks" in name:
lowerCAmelCase = name.replace("""blocks""" , """layers""" )
if "head.fc" in name:
lowerCAmelCase = name.replace("""head.fc""" , """classifier.1""" )
if name.startswith("""norm""" ):
lowerCAmelCase = """bit.""" + name
if "bit" not in name and "classifier" not in name:
lowerCAmelCase = """bit.encoder.""" + name
return name
def UpperCAmelCase__ ( ):
'''simple docstring'''
lowerCAmelCase = """http://images.cocodataset.org/val2017/000000039769.jpg"""
lowerCAmelCase = Image.open(requests.get(SCREAMING_SNAKE_CASE , stream=SCREAMING_SNAKE_CASE ).raw )
return im
@torch.no_grad()
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Optional[int]=False ):
'''simple docstring'''
lowerCAmelCase = get_config(SCREAMING_SNAKE_CASE )
# load original model from timm
lowerCAmelCase = create_model(SCREAMING_SNAKE_CASE , pretrained=SCREAMING_SNAKE_CASE )
timm_model.eval()
# load state_dict of original model
lowerCAmelCase = timm_model.state_dict()
for key in state_dict.copy().keys():
lowerCAmelCase = state_dict.pop(SCREAMING_SNAKE_CASE )
lowerCAmelCase = val.squeeze() if """head""" in key else val
# load HuggingFace model
lowerCAmelCase = BitForImageClassification(SCREAMING_SNAKE_CASE )
model.eval()
model.load_state_dict(SCREAMING_SNAKE_CASE )
# create image processor
lowerCAmelCase = create_transform(**resolve_data_config({} , model=SCREAMING_SNAKE_CASE ) )
lowerCAmelCase = transform.transforms
lowerCAmelCase = {
"""bilinear""": PILImageResampling.BILINEAR,
"""bicubic""": PILImageResampling.BICUBIC,
"""nearest""": PILImageResampling.NEAREST,
}
lowerCAmelCase = BitImageProcessor(
do_resize=SCREAMING_SNAKE_CASE , size={"""shortest_edge""": timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=SCREAMING_SNAKE_CASE , crop_size={"""height""": timm_transforms[1].size[0], """width""": timm_transforms[1].size[1]} , do_normalize=SCREAMING_SNAKE_CASE , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
lowerCAmelCase = prepare_img()
lowerCAmelCase = transform(SCREAMING_SNAKE_CASE ).unsqueeze(0 )
lowerCAmelCase = processor(SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).pixel_values
# verify pixel values
assert torch.allclose(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# verify logits
with torch.no_grad():
lowerCAmelCase = model(SCREAMING_SNAKE_CASE )
lowerCAmelCase = outputs.logits
print("""Logits:""" , logits[0, :3] )
print("""Predicted class:""" , model.config.idalabel[logits.argmax(-1 ).item()] )
lowerCAmelCase = timm_model(SCREAMING_SNAKE_CASE )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(SCREAMING_SNAKE_CASE , outputs.logits , atol=1e-3 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
Path(SCREAMING_SNAKE_CASE ).mkdir(exist_ok=SCREAMING_SNAKE_CASE )
print(F'Saving model {model_name} and processor to {pytorch_dump_folder_path}' )
model.save_pretrained(SCREAMING_SNAKE_CASE )
processor.save_pretrained(SCREAMING_SNAKE_CASE )
if push_to_hub:
print(F'Pushing model {model_name} and processor to the hub' )
model.push_to_hub(F'ybelkada/{model_name}' )
processor.push_to_hub(F'ybelkada/{model_name}' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="resnetv2_50x1_bitm",
type=str,
help="Name of the BiT timm model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether to push the model to the hub.",
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 532 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE__ = {
"configuration_roformer": ["ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "RoFormerConfig", "RoFormerOnnxConfig"],
"tokenization_roformer": ["RoFormerTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = ["RoFormerTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"RoFormerForCausalLM",
"RoFormerForMaskedLM",
"RoFormerForMultipleChoice",
"RoFormerForQuestionAnswering",
"RoFormerForSequenceClassification",
"RoFormerForTokenClassification",
"RoFormerLayer",
"RoFormerModel",
"RoFormerPreTrainedModel",
"load_tf_weights_in_roformer",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFRoFormerForCausalLM",
"TFRoFormerForMaskedLM",
"TFRoFormerForMultipleChoice",
"TFRoFormerForQuestionAnswering",
"TFRoFormerForSequenceClassification",
"TFRoFormerForTokenClassification",
"TFRoFormerLayer",
"TFRoFormerModel",
"TFRoFormerPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"FlaxRoFormerForMaskedLM",
"FlaxRoFormerForMultipleChoice",
"FlaxRoFormerForQuestionAnswering",
"FlaxRoFormerForSequenceClassification",
"FlaxRoFormerForTokenClassification",
"FlaxRoFormerModel",
"FlaxRoFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig
from .tokenization_roformer import RoFormerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roformer_fast import RoFormerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roformer import (
ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
RoFormerForCausalLM,
RoFormerForMaskedLM,
RoFormerForMultipleChoice,
RoFormerForQuestionAnswering,
RoFormerForSequenceClassification,
RoFormerForTokenClassification,
RoFormerLayer,
RoFormerModel,
RoFormerPreTrainedModel,
load_tf_weights_in_roformer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roformer import (
TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerLayer,
TFRoFormerModel,
TFRoFormerPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roformer import (
FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
FlaxRoFormerPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 532 | 1 |
import argparse
import json
import re
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileNetVaConfig,
MobileNetVaForImageClassification,
MobileNetVaImageProcessor,
load_tf_weights_in_mobilenet_va,
)
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCAmelCase : str = logging.get_logger(__name__)
def a__ ( A_ ):
'''simple docstring'''
__magic_name__ = MobileNetVaConfig(layer_norm_eps=0.001 )
if "_quant" in model_name:
raise ValueError("""Quantized models are not supported.""" )
__magic_name__ = re.match(R"""^mobilenet_v1_([^_]*)_([^_]*)$""", A_ )
if matches:
__magic_name__ = float(matches[1] )
__magic_name__ = int(matches[2] )
# The TensorFlow version of MobileNetV1 predicts 1001 classes instead of
# the usual 1000. The first class (index 0) is "background".
__magic_name__ = 1001
__magic_name__ = """imagenet-1k-id2label.json"""
__magic_name__ = """huggingface/label-files"""
__magic_name__ = json.load(open(hf_hub_download(A_, A_, repo_type="""dataset""" ), """r""" ) )
__magic_name__ = {int(A_ ) + 1: v for k, v in idalabel.items()}
__magic_name__ = """background"""
__magic_name__ = idalabel
__magic_name__ = {v: k for k, v in idalabel.items()}
return config
def a__ ( ):
'''simple docstring'''
__magic_name__ = """http://images.cocodataset.org/val2017/000000039769.jpg"""
__magic_name__ = Image.open(requests.get(A_, stream=A_ ).raw )
return im
@torch.no_grad()
def a__ ( A_, A_, A_, A_=False ):
'''simple docstring'''
__magic_name__ = get_mobilenet_va_config(A_ )
# Load 🤗 model
__magic_name__ = MobileNetVaForImageClassification(A_ ).eval()
# Load weights from TensorFlow checkpoint
load_tf_weights_in_mobilenet_va(A_, A_, A_ )
# Check outputs on an image, prepared by MobileNetV1ImageProcessor
__magic_name__ = MobileNetVaImageProcessor(
crop_size={"""width""": config.image_size, """height""": config.image_size}, size={"""shortest_edge""": config.image_size + 32}, )
__magic_name__ = image_processor(images=prepare_img(), return_tensors="""pt""" )
__magic_name__ = model(**A_ )
__magic_name__ = outputs.logits
assert logits.shape == (1, 1001)
if model_name == "mobilenet_v1_1.0_224":
__magic_name__ = torch.tensor([-4.1739, -1.1233, 3.1205] )
elif model_name == "mobilenet_v1_0.75_192":
__magic_name__ = torch.tensor([-3.9440, -2.3141, -0.3333] )
else:
__magic_name__ = None
if expected_logits is not None:
assert torch.allclose(logits[0, :3], A_, atol=1e-4 )
Path(A_ ).mkdir(exist_ok=A_ )
print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(A_ )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(A_ )
if push_to_hub:
print("""Pushing to the hub...""" )
__magic_name__ = """google/""" + model_name
image_processor.push_to_hub(A_ )
model.push_to_hub(A_ )
if __name__ == "__main__":
__lowerCAmelCase : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='mobilenet_v1_1.0_224',
type=str,
help='Name of the MobileNetV1 model you\'d like to convert. Should in the form \'mobilenet_v1_<depth>_<size>\'.',
)
parser.add_argument(
'--checkpoint_path', required=True, type=str, help='Path to the original TensorFlow checkpoint (.ckpt file).'
)
parser.add_argument(
'--pytorch_dump_folder_path', required=True, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
__lowerCAmelCase : str = parser.parse_args()
convert_movilevit_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 76 |
import collections
import importlib.util
import os
import re
from pathlib import Path
__lowerCAmelCase : int = 'src/transformers'
# Matches is_xxx_available()
__lowerCAmelCase : Optional[int] = re.compile(R'is\_([a-z_]*)_available()')
# Catches a one-line _import_struct = {xxx}
__lowerCAmelCase : Dict = re.compile(R'^_import_structure\s+=\s+\{([^\}]+)\}')
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
__lowerCAmelCase : int = re.compile(R'\s+"\S*":\s+\[([^\]]*)\]')
# Catches a line if not is_foo_available
__lowerCAmelCase : Optional[Any] = re.compile(R'^\s*if\s+not\s+is\_[a-z_]*\_available\(\)')
# Catches a line _import_struct["bla"].append("foo")
__lowerCAmelCase : Optional[Any] = re.compile(R'^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)')
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
__lowerCAmelCase : Dict = re.compile(R'^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]')
# Catches a line with an object between quotes and a comma: "MyModel",
__lowerCAmelCase : List[str] = re.compile('^\s+"([^"]+)",')
# Catches a line with objects between brackets only: ["foo", "bar"],
__lowerCAmelCase : Optional[int] = re.compile('^\s+\[([^\]]+)\]')
# Catches a line with from foo import bar, bla, boo
__lowerCAmelCase : List[str] = re.compile(R'\s+from\s+\S*\s+import\s+([^\(\s].*)\n')
# Catches a line with try:
__lowerCAmelCase : int = re.compile(R'^\s*try:')
# Catches a line with else:
__lowerCAmelCase : Tuple = re.compile(R'^\s*else:')
def a__ ( A_ ):
'''simple docstring'''
if _re_test_backend.search(A_ ) is None:
return None
__magic_name__ = [b[0] for b in _re_backend.findall(A_ )]
backends.sort()
return "_and_".join(A_ )
def a__ ( A_ ):
'''simple docstring'''
with open(A_, """r""", encoding="""utf-8""", newline="""\n""" ) as f:
__magic_name__ = f.readlines()
__magic_name__ = 0
while line_index < len(A_ ) and not lines[line_index].startswith("""_import_structure = {""" ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(A_ ):
return None
# First grab the objects without a specific backend in _import_structure
__magic_name__ = []
while not lines[line_index].startswith("""if TYPE_CHECKING""" ) and find_backend(lines[line_index] ) is None:
__magic_name__ = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(A_ ):
__magic_name__ = _re_one_line_import_struct.search(A_ ).groups()[0]
__magic_name__ = re.findall("""\[([^\]]+)\]""", A_ )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(""", """ )] )
line_index += 1
continue
__magic_name__ = _re_import_struct_key_value.search(A_ )
if single_line_import_search is not None:
__magic_name__ = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(""", """ ) if len(A_ ) > 0]
objects.extend(A_ )
elif line.startswith(""" """ * 8 + """\"""" ):
objects.append(line[9:-3] )
line_index += 1
__magic_name__ = {"""none""": objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith("""if TYPE_CHECKING""" ):
# If the line is an if not is_backend_available, we grab all objects associated.
__magic_name__ = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
__magic_name__ = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
__magic_name__ = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(""" """ * 4 ):
__magic_name__ = lines[line_index]
if _re_import_struct_add_one.search(A_ ) is not None:
objects.append(_re_import_struct_add_one.search(A_ ).groups()[0] )
elif _re_import_struct_add_many.search(A_ ) is not None:
__magic_name__ = _re_import_struct_add_many.search(A_ ).groups()[0].split(""", """ )
__magic_name__ = [obj[1:-1] for obj in imports if len(A_ ) > 0]
objects.extend(A_ )
elif _re_between_brackets.search(A_ ) is not None:
__magic_name__ = _re_between_brackets.search(A_ ).groups()[0].split(""", """ )
__magic_name__ = [obj[1:-1] for obj in imports if len(A_ ) > 0]
objects.extend(A_ )
elif _re_quote_object.search(A_ ) is not None:
objects.append(_re_quote_object.search(A_ ).groups()[0] )
elif line.startswith(""" """ * 8 + """\"""" ):
objects.append(line[9:-3] )
elif line.startswith(""" """ * 12 + """\"""" ):
objects.append(line[13:-3] )
line_index += 1
__magic_name__ = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
__magic_name__ = []
while (
line_index < len(A_ )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith("""else""" )
):
__magic_name__ = lines[line_index]
__magic_name__ = _re_import.search(A_ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(""", """ ) )
elif line.startswith(""" """ * 8 ):
objects.append(line[8:-2] )
line_index += 1
__magic_name__ = {"""none""": objects}
# Let's continue with backend-specific objects
while line_index < len(A_ ):
# If the line is an if is_backend_available, we grab all objects associated.
__magic_name__ = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
__magic_name__ = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
__magic_name__ = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(""" """ * 8 ):
__magic_name__ = lines[line_index]
__magic_name__ = _re_import.search(A_ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(""", """ ) )
elif line.startswith(""" """ * 12 ):
objects.append(line[12:-2] )
line_index += 1
__magic_name__ = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def a__ ( A_, A_ ):
'''simple docstring'''
def find_duplicates(A_ ):
return [k for k, v in collections.Counter(A_ ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
__magic_name__ = []
for key in import_dict_objects.keys():
__magic_name__ = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(f'''Duplicate _import_structure definitions for: {duplicate_imports}''' )
__magic_name__ = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(f'''Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}''' )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
__magic_name__ = """base imports""" if key == """none""" else f'''{key} backend'''
errors.append(f'''Differences for {name}:''' )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(f''' {a} in TYPE_HINT but not in _import_structure.''' )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(f''' {a} in _import_structure but not in TYPE_HINT.''' )
return errors
def a__ ( ):
'''simple docstring'''
__magic_name__ = []
for root, _, files in os.walk(A_ ):
if "__init__.py" in files:
__magic_name__ = os.path.join(A_, """__init__.py""" )
__magic_name__ = parse_init(A_ )
if objects is not None:
__magic_name__ = analyze_results(*A_ )
if len(A_ ) > 0:
__magic_name__ = f'''Problem in {fname}, both halves do not define the same objects.\n{errors[0]}'''
failures.append("""\n""".join(A_ ) )
if len(A_ ) > 0:
raise ValueError("""\n\n""".join(A_ ) )
def a__ ( ):
'''simple docstring'''
__magic_name__ = []
for path, directories, files in os.walk(A_ ):
for folder in directories:
# Ignore private modules
if folder.startswith("""_""" ):
directories.remove(A_ )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(A_ ) / folder).glob("""*.py""" ) ) ) == 0:
continue
__magic_name__ = str((Path(A_ ) / folder).relative_to(A_ ) )
__magic_name__ = short_path.replace(os.path.sep, """.""" )
submodules.append(A_ )
for fname in files:
if fname == "__init__.py":
continue
__magic_name__ = str((Path(A_ ) / fname).relative_to(A_ ) )
__magic_name__ = short_path.replace(""".py""", """""" ).replace(os.path.sep, """.""" )
if len(submodule.split(""".""" ) ) == 1:
submodules.append(A_ )
return submodules
__lowerCAmelCase : Dict = [
'convert_pytorch_checkpoint_to_tf2',
'modeling_flax_pytorch_utils',
]
def a__ ( ):
'''simple docstring'''
__magic_name__ = importlib.util.spec_from_file_location(
"""transformers""", os.path.join(A_, """__init__.py""" ), submodule_search_locations=[PATH_TO_TRANSFORMERS], )
__magic_name__ = spec.loader.load_module()
__magic_name__ = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys()
]
if len(A_ ) > 0:
__magic_name__ = """\n""".join(f'''- {module}''' for module in module_not_registered )
raise ValueError(
"""The following submodules are not properly registered in the main init of Transformers:\n"""
f'''{list_of_modules}\n'''
"""Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.""" )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 76 | 1 |
'''simple docstring'''
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ) -> float:
if density <= 0:
raise ValueError('Impossible fluid density' )
if bulk_modulus <= 0:
raise ValueError('Impossible bulk modulus' )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 42 |
def A__ (snake_case : int ) -> bool:
if number < 0:
raise ValueError("""number must not be negative""" )
return number & (number - 1) == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 279 | 0 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class lowerCAmelCase_ ( metaclass=lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase : Optional[Any] = ["""note_seq"""]
def __init__( self , *lowerCAmelCase , **lowerCAmelCase ):
"""simple docstring"""
requires_backends(self , ['note_seq'] )
@classmethod
def snake_case ( cls , *lowerCAmelCase , **lowerCAmelCase ):
"""simple docstring"""
requires_backends(cls , ['note_seq'] )
@classmethod
def snake_case ( cls , *lowerCAmelCase , **lowerCAmelCase ):
"""simple docstring"""
requires_backends(cls , ['note_seq'] )
| 104 | """simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
"weiweishi/roc-bert-base-zh": "https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/config.json",
}
class lowerCAmelCase_ ( lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase : Dict = """roc_bert"""
def __init__( self , lowerCAmelCase=3_05_22 , lowerCAmelCase=7_68 , lowerCAmelCase=12 , lowerCAmelCase=12 , lowerCAmelCase=30_72 , lowerCAmelCase="gelu" , lowerCAmelCase=0.1 , lowerCAmelCase=0.1 , lowerCAmelCase=5_12 , lowerCAmelCase=2 , lowerCAmelCase=0.02 , lowerCAmelCase=1E-12 , lowerCAmelCase=True , lowerCAmelCase=0 , lowerCAmelCase="absolute" , lowerCAmelCase=None , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=7_68 , lowerCAmelCase=9_10 , lowerCAmelCase=5_12 , lowerCAmelCase=2_48_58 , lowerCAmelCase=True , **lowerCAmelCase , ):
"""simple docstring"""
snake_case = vocab_size
snake_case = max_position_embeddings
snake_case = hidden_size
snake_case = num_hidden_layers
snake_case = num_attention_heads
snake_case = intermediate_size
snake_case = hidden_act
snake_case = hidden_dropout_prob
snake_case = attention_probs_dropout_prob
snake_case = initializer_range
snake_case = type_vocab_size
snake_case = layer_norm_eps
snake_case = use_cache
snake_case = enable_pronunciation
snake_case = enable_shape
snake_case = pronunciation_embed_dim
snake_case = pronunciation_vocab_size
snake_case = shape_embed_dim
snake_case = shape_vocab_size
snake_case = concat_input
snake_case = position_embedding_type
snake_case = classifier_dropout
super().__init__(pad_token_id=lowerCAmelCase , **lowerCAmelCase )
| 104 | 1 |
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class UpperCamelCase_ :
lowerCamelCase_ = 42
lowerCamelCase_ = None
lowerCamelCase_ = None
_lowerCamelCase = namedtuple('CoinsDistribResult', 'moves excess')
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: TreeNode | None ):
if root is None:
return 0
# Validation
def count_nodes(UpperCamelCase__: TreeNode | None ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(UpperCamelCase__: TreeNode | None ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(UpperCamelCase__ ) != count_coins(UpperCamelCase__ ):
raise ValueError("""The nodes number should be same as the number of coins""" )
# Main calculation
def get_distrib(UpperCamelCase__: TreeNode | None ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 , 1 )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = get_distrib(node.left )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = get_distrib(node.right )
SCREAMING_SNAKE_CASE__ = 1 - left_distrib_excess
SCREAMING_SNAKE_CASE__ = 1 - right_distrib_excess
SCREAMING_SNAKE_CASE__ = (
left_distrib_moves
+ right_distrib_moves
+ abs(UpperCamelCase__ )
+ abs(UpperCamelCase__ )
)
SCREAMING_SNAKE_CASE__ = node.data - coins_to_left - coins_to_right
return CoinsDistribResult(UpperCamelCase__ , UpperCamelCase__ )
return get_distrib(UpperCamelCase__ )[0]
if __name__ == "__main__":
import doctest
doctest.testmod() | 6 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
_lowerCamelCase = {'configuration_speech_encoder_decoder': ['SpeechEncoderDecoderConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = ['SpeechEncoderDecoderModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = ['FlaxSpeechEncoderDecoderModel']
if TYPE_CHECKING:
from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel
else:
import sys
_lowerCamelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 6 | 1 |
import unittest
import torch
from torch import nn
from accelerate.test_utils import require_cuda
from accelerate.utils.memory import find_executable_batch_size, release_memory
def __lowerCAmelCase ( ):
raise RuntimeError("CUDA out of memory." )
class _UpperCamelCase (nn.Module ):
def __init__( self )-> Any:
super().__init__()
__lowerCAmelCase = nn.Linear(3 , 4 )
__lowerCAmelCase = nn.BatchNormad(4 )
__lowerCAmelCase = nn.Linear(4 , 5 )
def __UpperCAmelCase ( self , __UpperCamelCase )-> Tuple:
return self.lineara(self.batchnorm(self.lineara(__UpperCamelCase ) ) )
class _UpperCamelCase (unittest.TestCase ):
def __UpperCAmelCase ( self )-> Union[str, Any]:
__lowerCAmelCase = []
@find_executable_batch_size(starting_batch_size=1_2_8 )
def mock_training_loop_function(__UpperCamelCase ):
nonlocal batch_sizes
batch_sizes.append(__UpperCamelCase )
if batch_size != 8:
raise_fake_out_of_memory()
mock_training_loop_function()
self.assertListEqual(__UpperCamelCase , [1_2_8, 6_4, 3_2, 1_6, 8] )
def __UpperCAmelCase ( self )-> Dict:
__lowerCAmelCase = []
@find_executable_batch_size(starting_batch_size=1_2_8 )
def mock_training_loop_function(__UpperCamelCase , __UpperCamelCase ):
nonlocal batch_sizes
batch_sizes.append(__UpperCamelCase )
if batch_size != 8:
raise_fake_out_of_memory()
return batch_size, arga
__lowerCAmelCase , __lowerCAmelCase = mock_training_loop_function("hello" )
self.assertListEqual(__UpperCamelCase , [1_2_8, 6_4, 3_2, 1_6, 8] )
self.assertListEqual([bs, arga] , [8, "hello"] )
def __UpperCAmelCase ( self )-> List[Any]:
@find_executable_batch_size(starting_batch_size=0 )
def mock_training_loop_function(__UpperCamelCase ):
pass
with self.assertRaises(__UpperCamelCase ) as cm:
mock_training_loop_function()
self.assertIn("No executable batch size found, reached zero." , cm.exception.args[0] )
def __UpperCAmelCase ( self )-> str:
@find_executable_batch_size(starting_batch_size=1_6 )
def mock_training_loop_function(__UpperCamelCase ):
if batch_size > 0:
raise_fake_out_of_memory()
pass
with self.assertRaises(__UpperCamelCase ) as cm:
mock_training_loop_function()
self.assertIn("No executable batch size found, reached zero." , cm.exception.args[0] )
def __UpperCAmelCase ( self )-> Any:
@find_executable_batch_size(starting_batch_size=1_2_8 )
def mock_training_loop_function(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
if batch_size != 8:
raise raise_fake_out_of_memory()
with self.assertRaises(__UpperCamelCase ) as cm:
mock_training_loop_function(1_2_8 , "hello" , "world" )
self.assertIn("Batch size was passed into `f`" , cm.exception.args[0] )
self.assertIn("`f(arg1='hello', arg2='world')" , cm.exception.args[0] )
def __UpperCAmelCase ( self )-> Any:
@find_executable_batch_size(starting_batch_size=1_6 )
def mock_training_loop_function(__UpperCamelCase ):
raise ValueError("Oops, we had an error!" )
with self.assertRaises(__UpperCamelCase ) as cm:
mock_training_loop_function()
self.assertIn("Oops, we had an error!" , cm.exception.args[0] )
@require_cuda
def __UpperCAmelCase ( self )-> Union[str, Any]:
__lowerCAmelCase = torch.cuda.memory_allocated()
__lowerCAmelCase = ModelForTest()
model.cuda()
self.assertGreater(torch.cuda.memory_allocated() , __UpperCamelCase )
__lowerCAmelCase = release_memory(__UpperCamelCase )
self.assertEqual(torch.cuda.memory_allocated() , __UpperCamelCase )
| 290 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_batched,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
class _UpperCamelCase (a_ ):
snake_case_ = ["""pixel_values"""]
def __init__( self , __UpperCamelCase = True , __UpperCamelCase = None , __UpperCamelCase = PILImageResampling.BICUBIC , __UpperCamelCase = True , __UpperCamelCase = True , __UpperCamelCase = 1 / 2_5_5 , __UpperCamelCase = None , __UpperCamelCase = True , __UpperCamelCase = None , __UpperCamelCase = None , **__UpperCamelCase , )-> None:
super().__init__(**__UpperCamelCase )
__lowerCAmelCase = size if size is not None else {"height": 2_2_4, "width": 2_2_4}
__lowerCAmelCase = get_size_dict(__UpperCamelCase )
__lowerCAmelCase = crop_size if crop_size is not None else {"height": 2_2_4, "width": 2_2_4}
__lowerCAmelCase = get_size_dict(__UpperCamelCase , default_to_square=__UpperCamelCase , param_name="crop_size" )
__lowerCAmelCase = do_resize
__lowerCAmelCase = do_rescale
__lowerCAmelCase = do_normalize
__lowerCAmelCase = do_center_crop
__lowerCAmelCase = crop_size
__lowerCAmelCase = size
__lowerCAmelCase = resample
__lowerCAmelCase = rescale_factor
__lowerCAmelCase = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
__lowerCAmelCase = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def __UpperCAmelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = PILImageResampling.BILINEAR , __UpperCamelCase = None , **__UpperCamelCase , )-> np.ndarray:
__lowerCAmelCase = get_size_dict(__UpperCamelCase )
if "shortest_edge" in size:
__lowerCAmelCase = get_resize_output_image_size(__UpperCamelCase , size=size["shortest_edge"] , default_to_square=__UpperCamelCase )
# size = get_resize_output_image_size(image, size["shortest_edge"], size["longest_edge"])
elif "height" in size and "width" in size:
__lowerCAmelCase = (size["height"], size["width"])
else:
raise ValueError(F"""Size must contain 'height' and 'width' keys or 'shortest_edge' key. Got {size.keys()}""" )
return resize(__UpperCamelCase , size=__UpperCamelCase , resample=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase )
def __UpperCAmelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = None , **__UpperCamelCase , )-> np.ndarray:
__lowerCAmelCase = get_size_dict(__UpperCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(F"""The `size` parameter must contain the keys (height, width). Got {size.keys()}""" )
return center_crop(__UpperCamelCase , size=(size["height"], size["width"]) , data_format=__UpperCamelCase , **__UpperCamelCase )
def __UpperCAmelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = None , **__UpperCamelCase )-> np.ndarray:
return rescale(__UpperCamelCase , scale=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase )
def __UpperCAmelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = None , **__UpperCamelCase , )-> np.ndarray:
return normalize(__UpperCamelCase , mean=__UpperCamelCase , std=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase )
def __UpperCAmelCase ( self , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = ChannelDimension.FIRST , **__UpperCamelCase , )-> BatchFeature:
__lowerCAmelCase = do_resize if do_resize is not None else self.do_resize
__lowerCAmelCase = do_rescale if do_rescale is not None else self.do_rescale
__lowerCAmelCase = do_normalize if do_normalize is not None else self.do_normalize
__lowerCAmelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
__lowerCAmelCase = crop_size if crop_size is not None else self.crop_size
__lowerCAmelCase = get_size_dict(__UpperCamelCase , param_name="crop_size" , default_to_square=__UpperCamelCase )
__lowerCAmelCase = resample if resample is not None else self.resample
__lowerCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
__lowerCAmelCase = image_mean if image_mean is not None else self.image_mean
__lowerCAmelCase = image_std if image_std is not None else self.image_std
__lowerCAmelCase = size if size is not None else self.size
__lowerCAmelCase = get_size_dict(__UpperCamelCase )
if not is_batched(__UpperCamelCase ):
__lowerCAmelCase = [images]
if not valid_images(__UpperCamelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
# All transformations expect numpy arrays.
__lowerCAmelCase = [to_numpy_array(__UpperCamelCase ) for image in images]
if do_resize:
__lowerCAmelCase = [self.resize(image=__UpperCamelCase , size=__UpperCamelCase , resample=__UpperCamelCase ) for image in images]
if do_center_crop:
__lowerCAmelCase = [self.center_crop(image=__UpperCamelCase , size=__UpperCamelCase ) for image in images]
if do_rescale:
__lowerCAmelCase = [self.rescale(image=__UpperCamelCase , scale=__UpperCamelCase ) for image in images]
if do_normalize:
__lowerCAmelCase = [self.normalize(image=__UpperCamelCase , mean=__UpperCamelCase , std=__UpperCamelCase ) for image in images]
__lowerCAmelCase = [to_channel_dimension_format(__UpperCamelCase , __UpperCamelCase ) for image in images]
__lowerCAmelCase = {"pixel_values": images}
return BatchFeature(data=__UpperCamelCase , tensor_type=__UpperCamelCase )
| 290 | 1 |
from unittest import TestCase
from datasets import Dataset
from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters
def UpperCAmelCase_ ( ) -> Optional[Any]:
__lowercase : List[Any] = {
'''repo_name''': ['''test_repo1''', '''test_repo2''', '''test_repo3'''],
'''path''': ['''test_1.py''', '''test_2.py''', '''unit_test.py'''],
'''content''': ['''a ''' * 20, '''a ''' * 30, '''b ''' * 7],
}
__lowercase : Dict = Dataset.from_dict(__lowerCAmelCase )
return dataset
class __lowerCAmelCase ( lowerCAmelCase_ ):
"""simple docstring"""
def snake_case_ ( self : str ):
__lowercase : Tuple = get_dataset()
__lowercase : int = make_duplicate_clusters(_snake_case , 0.85 )
self.assertEqual(len(duplicate_clusters[0] ) , 2 )
def snake_case_ ( self : Union[str, Any] ):
__lowercase : Optional[int] = get_dataset()
__lowercase , __lowercase : List[Any] = deduplicate_dataset(_snake_case )
self.assertEqual(len(_snake_case ) , 2 )
print(_snake_case )
self.assertEqual(duplicate_clusters[0][0]['''copies'''] , 2 )
self.assertEqual(duplicate_clusters[0][0]['''is_extreme'''] , _snake_case )
| 509 |
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__lowerCAmelCase : List[str] = get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
@require_tokenizers
class __lowerCAmelCase ( lowerCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
A__ : Dict = XGLMTokenizer
A__ : Optional[int] = XGLMTokenizerFast
A__ : int = True
A__ : Optional[Any] = True
def snake_case_ ( self : List[Any] ):
super().setUp()
# We have a SentencePiece fixture for testing
__lowercase : Optional[Any] = XGLMTokenizer(_snake_case , keep_accents=_snake_case )
tokenizer.save_pretrained(self.tmpdirname )
def snake_case_ ( self : List[Any] ):
__lowercase : int = '''<pad>'''
__lowercase : Optional[int] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_snake_case ) , _snake_case )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_snake_case ) , _snake_case )
def snake_case_ ( self : Dict ):
__lowercase : Tuple = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<s>''' )
self.assertEqual(vocab_keys[1] , '''<pad>''' )
self.assertEqual(len(_snake_case ) , 1008 )
def snake_case_ ( self : List[Any] ):
self.assertEqual(self.get_tokenizer().vocab_size , 1008 )
def snake_case_ ( self : Dict ):
__lowercase : List[str] = XGLMTokenizer(_snake_case , keep_accents=_snake_case )
__lowercase : Union[str, Any] = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(_snake_case , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_snake_case ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
__lowercase : Optional[Any] = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
_snake_case , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
__lowercase : Tuple = tokenizer.convert_tokens_to_ids(_snake_case )
self.assertListEqual(
_snake_case , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
__lowercase : Union[str, Any] = tokenizer.convert_ids_to_tokens(_snake_case )
self.assertListEqual(
_snake_case , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
@cached_property
def snake_case_ ( self : List[str] ):
return XGLMTokenizer.from_pretrained('''facebook/xglm-564M''' )
def snake_case_ ( self : Any ):
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(_snake_case , f.name )
__lowercase : Union[str, Any] = XGLMTokenizer(f.name , keep_accents=_snake_case )
__lowercase : List[str] = pickle.dumps(_snake_case )
pickle.loads(_snake_case )
def snake_case_ ( self : str ):
if not self.test_rust_tokenizer:
return
__lowercase : Tuple = self.get_tokenizer()
__lowercase : Optional[int] = self.get_rust_tokenizer()
__lowercase : Dict = '''I was born in 92000, and this is falsé.'''
__lowercase : int = tokenizer.tokenize(_snake_case )
__lowercase : int = rust_tokenizer.tokenize(_snake_case )
self.assertListEqual(_snake_case , _snake_case )
__lowercase : Dict = tokenizer.encode(_snake_case , add_special_tokens=_snake_case )
__lowercase : Tuple = rust_tokenizer.encode(_snake_case , add_special_tokens=_snake_case )
self.assertListEqual(_snake_case , _snake_case )
__lowercase : Any = self.get_rust_tokenizer()
__lowercase : List[str] = tokenizer.encode(_snake_case )
__lowercase : Union[str, Any] = rust_tokenizer.encode(_snake_case )
self.assertListEqual(_snake_case , _snake_case )
@slow
def snake_case_ ( self : Union[str, Any] ):
__lowercase : Optional[Any] = '''Hello World!'''
__lowercase : int = [2, 3_1227, 4447, 35]
self.assertListEqual(_snake_case , self.big_tokenizer.encode(_snake_case ) )
@slow
def snake_case_ ( self : Union[str, Any] ):
__lowercase : Optional[int] = (
'''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'''
''' add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth'''
)
# fmt: off
__lowercase : Any = [2, 1018, 67, 11, 1988, 2617, 5631, 278, 11, 3407, 48, 7_1630, 2_8085, 4, 3234, 157, 13, 6, 5, 6, 4, 3526, 768, 15, 659, 57, 298, 3983, 864, 129, 21, 6, 5, 1_3675, 377, 652, 7580, 1_0341, 155, 2817, 422, 1666, 7, 1674, 53, 113, 20_2277, 1_7892, 33, 60, 87, 4, 3234, 157, 61, 2667, 5_2376, 19, 88, 23, 735]
# fmt: on
self.assertListEqual(_snake_case , self.big_tokenizer.encode(_snake_case ) )
@slow
def snake_case_ ( self : Union[str, Any] ):
# fmt: off
__lowercase : Optional[Any] = {
'''input_ids''': [[2, 10_8825, 1163, 15, 8_8010, 473, 1_5898, 157, 1_3672, 1857, 312, 8, 23_8021, 1163, 53, 1_3672, 1857, 312, 8, 5_3283, 18_2396, 8, 1_8566, 16, 3_6733, 4101, 8, 230, 24_4017, 12_2553, 7, 15, 13_2597, 4, 293, 1_2511, 7610, 4, 3414, 13_2597, 9, 4, 3_2361, 362, 4, 734, 2_8512, 3_2569, 18, 4, 3_2361, 2_6096, 1_4982, 73, 1_8715, 2_1433, 23_5261, 15, 492, 1_2427, 16, 53, 1_8715, 2_1433, 6_5454, 15, 2_3659, 563, 16, 278, 597, 2843, 595, 7931, 18_2396, 6_4186, 22, 886, 595, 13_2981, 53, 2_5540, 3449, 4_3982, 3_9901, 5951, 878, 330, 4, 2_7694, 8_0269, 312, 53, 6517, 1_1780, 611, 2_0408, 5], [2, 6, 13_2597, 67, 4_2897, 33, 592, 8, 16_3729, 2_5540, 361, 13_6997, 10_9514, 17_3230, 7, 501, 60, 10_2913, 196, 5631, 235, 6_3243, 473, 6, 23_1757, 74, 5277, 7905, 53, 3095, 3_7317, 22, 454, 18_3874, 5], [2, 268, 3_1298, 4_6530, 6, 13_2935, 4_3831, 7, 597, 32, 24, 3688, 9865, 5]],
'''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]
} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_snake_case , model_name='''facebook/xglm-564M''' , padding=_snake_case , )
| 509 | 1 |
import unittest
from datasets import load_dataset
from transformers.pipelines import pipeline
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_torch, slow
@is_pipeline_test
@require_torch
class lowerCamelCase ( unittest.TestCase ):
@require_torch
def snake_case_ ( self : Any ) -> List[Any]:
_a : Union[str, Any] = pipeline(
task='''zero-shot-audio-classification''' , model='''hf-internal-testing/tiny-clap-htsat-unfused''' )
_a : Any = load_dataset('''ashraq/esc50''' )
_a : str = dataset['''train''']['''audio'''][-1]['''array''']
_a : List[Any] = audio_classifier(__snake_case , candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''] )
self.assertEqual(
nested_simplify(__snake_case ) , [{'''score''': 0.501, '''label''': '''Sound of a dog'''}, {'''score''': 0.499, '''label''': '''Sound of vaccum cleaner'''}] , )
@unittest.skip('''No models are available in TF''' )
def snake_case_ ( self : str ) -> Dict:
pass
@slow
@require_torch
def snake_case_ ( self : Optional[int] ) -> List[Any]:
_a : Optional[int] = pipeline(
task='''zero-shot-audio-classification''' , model='''laion/clap-htsat-unfused''' , )
# This is an audio of a dog
_a : str = load_dataset('''ashraq/esc50''' )
_a : Optional[Any] = dataset['''train''']['''audio'''][-1]['''array''']
_a : Optional[Any] = audio_classifier(__snake_case , candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''] )
self.assertEqual(
nested_simplify(__snake_case ) , [
{'''score''': 0.999, '''label''': '''Sound of a dog'''},
{'''score''': 0.001, '''label''': '''Sound of vaccum cleaner'''},
] , )
_a : Union[str, Any] = audio_classifier([audio] * 5 , candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''] )
self.assertEqual(
nested_simplify(__snake_case ) , [
[
{'''score''': 0.999, '''label''': '''Sound of a dog'''},
{'''score''': 0.001, '''label''': '''Sound of vaccum cleaner'''},
],
]
* 5 , )
_a : Dict = audio_classifier(
[audio] * 5 , candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''] , batch_size=5 )
self.assertEqual(
nested_simplify(__snake_case ) , [
[
{'''score''': 0.999, '''label''': '''Sound of a dog'''},
{'''score''': 0.001, '''label''': '''Sound of vaccum cleaner'''},
],
]
* 5 , )
@unittest.skip('''No models are available in TF''' )
def snake_case_ ( self : List[str] ) -> List[Any]:
pass
| 249 |
import inspect
import unittest
from transformers import DPTConfig
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel
from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class lowerCamelCase :
def __init__( self : Optional[Any] , __snake_case : Tuple , __snake_case : Tuple=2 , __snake_case : List[str]=32 , __snake_case : List[str]=16 , __snake_case : Optional[Any]=3 , __snake_case : List[str]=True , __snake_case : str=True , __snake_case : Optional[Any]=32 , __snake_case : Optional[int]=4 , __snake_case : str=[0, 1, 2, 3] , __snake_case : List[str]=4 , __snake_case : int=37 , __snake_case : int="gelu" , __snake_case : Tuple=0.1 , __snake_case : int=0.1 , __snake_case : List[str]=0.02 , __snake_case : Any=3 , __snake_case : Tuple=[1, 384, 24, 24] , __snake_case : List[Any]=True , __snake_case : List[Any]=None , ) -> str:
_a : List[Any] = parent
_a : str = batch_size
_a : Dict = image_size
_a : str = patch_size
_a : Union[str, Any] = num_channels
_a : Dict = is_training
_a : Union[str, Any] = use_labels
_a : List[str] = hidden_size
_a : Dict = num_hidden_layers
_a : List[str] = backbone_out_indices
_a : Any = num_attention_heads
_a : str = intermediate_size
_a : List[Any] = hidden_act
_a : Dict = hidden_dropout_prob
_a : str = attention_probs_dropout_prob
_a : Tuple = initializer_range
_a : Dict = num_labels
_a : Any = backbone_featmap_shape
_a : List[Any] = scope
_a : List[str] = is_hybrid
# sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token)
_a : str = (image_size // patch_size) ** 2
_a : Union[str, Any] = num_patches + 1
def snake_case_ ( self : Any ) -> Optional[int]:
_a : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_a : Any = None
if self.use_labels:
_a : Tuple = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
_a : Optional[int] = self.get_config()
return config, pixel_values, labels
def snake_case_ ( self : Union[str, Any] ) -> List[str]:
_a : Optional[Any] = {
'''global_padding''': '''same''',
'''layer_type''': '''bottleneck''',
'''depths''': [3, 4, 9],
'''out_features''': ['''stage1''', '''stage2''', '''stage3'''],
'''embedding_dynamic_padding''': True,
'''hidden_sizes''': [96, 192, 384, 768],
'''num_groups''': 2,
}
return DPTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , backbone_out_indices=self.backbone_out_indices , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__snake_case , initializer_range=self.initializer_range , is_hybrid=self.is_hybrid , backbone_config=__snake_case , backbone_featmap_shape=self.backbone_featmap_shape , )
def snake_case_ ( self : List[Any] , __snake_case : Tuple , __snake_case : Tuple , __snake_case : Dict ) -> int:
_a : Optional[int] = DPTModel(config=__snake_case )
model.to(__snake_case )
model.eval()
_a : List[Any] = model(__snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case_ ( self : List[str] , __snake_case : Optional[int] , __snake_case : int , __snake_case : Optional[Any] ) -> Any:
_a : Any = self.num_labels
_a : int = DPTForDepthEstimation(__snake_case )
model.to(__snake_case )
model.eval()
_a : Optional[int] = model(__snake_case )
self.parent.assertEqual(result.predicted_depth.shape , (self.batch_size, self.image_size, self.image_size) )
def snake_case_ ( self : Any , __snake_case : Tuple , __snake_case : Union[str, Any] , __snake_case : Dict ) -> Optional[Any]:
_a : Optional[Any] = self.num_labels
_a : Any = DPTForSemanticSegmentation(__snake_case )
model.to(__snake_case )
model.eval()
_a : Optional[int] = model(__snake_case , labels=__snake_case )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def snake_case_ ( self : Union[str, Any] ) -> List[Any]:
_a : int = self.prepare_config_and_inputs()
_a , _a , _a : Any = config_and_inputs
_a : Optional[int] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
UpperCAmelCase : List[str] = (DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else ()
UpperCAmelCase : Tuple = (
{
'depth-estimation': DPTForDepthEstimation,
'feature-extraction': DPTModel,
'image-segmentation': DPTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
UpperCAmelCase : int = False
UpperCAmelCase : str = False
UpperCAmelCase : Dict = False
def snake_case_ ( self : Union[str, Any] ) -> List[Any]:
_a : Union[str, Any] = DPTModelTester(self )
_a : Optional[int] = ConfigTester(self , config_class=__snake_case , has_text_modality=__snake_case , hidden_size=37 )
def snake_case_ ( self : List[Any] ) -> Any:
self.config_tester.run_common_tests()
@unittest.skip(reason='''DPT does not use inputs_embeds''' )
def snake_case_ ( self : str ) -> str:
pass
def snake_case_ ( self : int ) -> Optional[int]:
_a , _a : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : Optional[int] = model_class(__snake_case )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_a : Dict = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__snake_case , nn.Linear ) )
def snake_case_ ( self : int ) -> Any:
_a , _a : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : Optional[int] = model_class(__snake_case )
_a : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_a : Tuple = [*signature.parameters.keys()]
_a : Optional[Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __snake_case )
def snake_case_ ( self : int ) -> Any:
_a : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__snake_case )
def snake_case_ ( self : List[Any] ) -> Optional[int]:
_a : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_depth_estimation(*__snake_case )
def snake_case_ ( self : int ) -> Any:
_a : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__snake_case )
def snake_case_ ( self : Union[str, Any] ) -> List[str]:
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
_a , _a : Any = self.model_tester.prepare_config_and_inputs_for_common()
_a : Tuple = True
if model_class in get_values(__snake_case ):
continue
_a : List[str] = model_class(__snake_case )
model.to(__snake_case )
model.train()
_a : Tuple = self._prepare_for_class(__snake_case , __snake_case , return_labels=__snake_case )
_a : Tuple = model(**__snake_case ).loss
loss.backward()
def snake_case_ ( self : Any ) -> Union[str, Any]:
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
_a , _a : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
_a : str = False
_a : int = True
if model_class in get_values(__snake_case ) or not model_class.supports_gradient_checkpointing:
continue
_a : Tuple = model_class(__snake_case )
model.to(__snake_case )
model.gradient_checkpointing_enable()
model.train()
_a : str = self._prepare_for_class(__snake_case , __snake_case , return_labels=__snake_case )
_a : Tuple = model(**__snake_case ).loss
loss.backward()
def snake_case_ ( self : Tuple ) -> Optional[Any]:
_a , _a : Any = self.model_tester.prepare_config_and_inputs_for_common()
_a : Optional[int] = _config_zero_init(__snake_case )
for model_class in self.all_model_classes:
_a : Tuple = model_class(config=__snake_case )
# Skip the check for the backbone
_a : List[str] = []
for name, module in model.named_modules():
if module.__class__.__name__ == "DPTViTHybridEmbeddings":
_a : str = [f"""{name}.{key}""" for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def snake_case_ ( self : Dict ) -> Optional[Any]:
pass
@slow
def snake_case_ ( self : str ) -> str:
for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]:
_a : Optional[int] = DPTModel.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
def snake_case_ ( self : Any ) -> Union[str, Any]:
# We do this test only for DPTForDepthEstimation since it is the only model that uses readout_type
_a , _a : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
_a : Tuple = '''add'''
with self.assertRaises(__snake_case ):
_a : List[str] = DPTForDepthEstimation(__snake_case )
def lowerCamelCase_ ( ):
_a : Dict = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
@slow
class lowerCamelCase ( unittest.TestCase ):
def snake_case_ ( self : Any ) -> Optional[Any]:
_a : Any = DPTImageProcessor.from_pretrained('''Intel/dpt-hybrid-midas''' )
_a : Union[str, Any] = DPTForDepthEstimation.from_pretrained('''Intel/dpt-hybrid-midas''' ).to(__snake_case )
_a : Optional[int] = prepare_img()
_a : Any = image_processor(images=__snake_case , return_tensors='''pt''' ).to(__snake_case )
# forward pass
with torch.no_grad():
_a : List[str] = model(**__snake_case )
_a : Optional[Any] = outputs.predicted_depth
# verify the predicted depth
_a : Optional[Any] = torch.Size((1, 384, 384) )
self.assertEqual(predicted_depth.shape , __snake_case )
_a : int = torch.tensor(
[[[5.6_437, 5.6_146, 5.6_511], [5.4_371, 5.5_649, 5.5_958], [5.5_215, 5.5_184, 5.5_293]]] ).to(__snake_case )
self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 100 , __snake_case , atol=1E-4 ) )
| 249 | 1 |
import warnings
from ...utils import logging
from .image_processing_videomae import VideoMAEImageProcessor
__UpperCAmelCase = logging.get_logger(__name__)
class lowerCamelCase (_snake_case ):
'''simple docstring'''
def __init__( self , *_UpperCamelCase , **_UpperCamelCase ) -> Optional[int]:
warnings.warn(
'The class VideoMAEFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use VideoMAEImageProcessor instead.' , __a , )
super().__init__(*__a , **__a )
| 406 |
import torch
from transformers import CamembertForMaskedLM, CamembertTokenizer
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=5 ):
'''simple docstring'''
assert masked_input.count("<mask>" ) == 1
_lowerCAmelCase : str = torch.tensor(tokenizer.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase ) ).unsqueeze(0 ) # Batch size 1
_lowerCAmelCase : Union[str, Any] = model(_lowerCamelCase )[0] # The last hidden-state is the first element of the output tuple
_lowerCAmelCase : Optional[Any] = (input_ids.squeeze() == tokenizer.mask_token_id).nonzero().item()
_lowerCAmelCase : List[Any] = logits[0, masked_index, :]
_lowerCAmelCase : int = logits.softmax(dim=0 )
_lowerCAmelCase , _lowerCAmelCase : Optional[Any] = prob.topk(k=_lowerCamelCase , dim=0 )
_lowerCAmelCase : Dict = " ".join(
[tokenizer.convert_ids_to_tokens(indices[i].item() ) for i in range(len(_lowerCamelCase ) )] )
_lowerCAmelCase : List[str] = tokenizer.mask_token
_lowerCAmelCase : Optional[Any] = []
for index, predicted_token_bpe in enumerate(topk_predicted_token_bpe.split(" " ) ):
_lowerCAmelCase : Dict = predicted_token_bpe.replace("\u2581" , " " )
if " {0}".format(_lowerCamelCase ) in masked_input:
topk_filled_outputs.append(
(
masked_input.replace(" {0}".format(_lowerCamelCase ) , _lowerCamelCase ),
values[index].item(),
predicted_token,
) )
else:
topk_filled_outputs.append(
(
masked_input.replace(_lowerCamelCase , _lowerCamelCase ),
values[index].item(),
predicted_token,
) )
return topk_filled_outputs
_snake_case = CamembertTokenizer.from_pretrained("camembert-base")
_snake_case = CamembertForMaskedLM.from_pretrained("camembert-base")
model.eval()
_snake_case = "Le camembert est <mask> :)"
print(fill_mask(masked_input, model, tokenizer, topk=3))
| 500 | 0 |
'''simple docstring'''
from urllib.parse import quote
import pytest
from datasets.utils.hub import hf_hub_url
@pytest.mark.parametrize("""repo_id""" , ["""canonical_dataset_name""", """org-name/dataset-name"""] )
@pytest.mark.parametrize("""path""" , ["""filename.csv""", """filename with blanks.csv"""] )
@pytest.mark.parametrize("""revision""" , [None, """v2"""] )
def SCREAMING_SNAKE_CASE ( lowercase_ : Optional[Any] , lowercase_ : Any , lowercase_ : str ):
lowercase = hf_hub_url(repo_id=lowercase_ , path=lowercase_ , revision=lowercase_ )
assert url == F"""https://huggingface.co/datasets/{repo_id}/resolve/{revision or 'main'}/{quote(lowercase_ )}"""
| 710 |
'''simple docstring'''
from random import shuffle
import tensorflow as tf
from numpy import array
def SCREAMING_SNAKE_CASE ( lowercase_ : List[str] , lowercase_ : Optional[int] ):
lowercase = int(lowercase_ )
assert noofclusters < len(lowercase_ )
# Find out the dimensionality
lowercase = len(vectors[0] )
# Will help select random centroids from among the available vectors
lowercase = list(range(len(lowercase_ ) ) )
shuffle(lowercase_ )
# GRAPH OF COMPUTATION
# We initialize a new graph and set it as the default during each run
# of this algorithm. This ensures that as this function is called
# multiple times, the default graph doesn't keep getting crowded with
# unused ops and Variables from previous function calls.
lowercase = tf.Graph()
with graph.as_default():
# SESSION OF COMPUTATION
lowercase = tf.Session()
##CONSTRUCTING THE ELEMENTS OF COMPUTATION
##First lets ensure we have a Variable vector for each centroid,
##initialized to one of the vectors from the available data points
lowercase = [
tf.Variable(vectors[vector_indices[i]] ) for i in range(lowercase_ )
]
##These nodes will assign the centroid Variables the appropriate
##values
lowercase = tf.placeholder("""float64""" , [dim] )
lowercase = []
for centroid in centroids:
cent_assigns.append(tf.assign(lowercase_ , lowercase_ ) )
##Variables for cluster assignments of individual vectors(initialized
##to 0 at first)
lowercase = [tf.Variable(0 ) for i in range(len(lowercase_ ) )]
##These nodes will assign an assignment Variable the appropriate
##value
lowercase = tf.placeholder("""int32""" )
lowercase = []
for assignment in assignments:
cluster_assigns.append(tf.assign(lowercase_ , lowercase_ ) )
##Now lets construct the node that will compute the mean
# The placeholder for the input
lowercase = tf.placeholder("""float""" , [None, dim] )
# The Node/op takes the input and computes a mean along the 0th
# dimension, i.e. the list of input vectors
lowercase = tf.reduce_mean(lowercase_ , 0 )
##Node for computing Euclidean distances
# Placeholders for input
lowercase = tf.placeholder("""float""" , [dim] )
lowercase = tf.placeholder("""float""" , [dim] )
lowercase = tf.sqrt(tf.reduce_sum(tf.pow(tf.sub(lowercase_ , lowercase_ ) , 2 ) ) )
##This node will figure out which cluster to assign a vector to,
##based on Euclidean distances of the vector from the centroids.
# Placeholder for input
lowercase = tf.placeholder("""float""" , [noofclusters] )
lowercase = tf.argmin(lowercase_ , 0 )
##INITIALIZING STATE VARIABLES
##This will help initialization of all Variables defined with respect
##to the graph. The Variable-initializer should be defined after
##all the Variables have been constructed, so that each of them
##will be included in the initialization.
lowercase = tf.initialize_all_variables()
# Initialize all variables
sess.run(lowercase_ )
##CLUSTERING ITERATIONS
# Now perform the Expectation-Maximization steps of K-Means clustering
# iterations. To keep things simple, we will only do a set number of
# iterations, instead of using a Stopping Criterion.
lowercase = 100
for _ in range(lowercase_ ):
##EXPECTATION STEP
##Based on the centroid locations till last iteration, compute
##the _expected_ centroid assignments.
# Iterate over each vector
for vector_n in range(len(lowercase_ ) ):
lowercase = vectors[vector_n]
# Compute Euclidean distance between this vector and each
# centroid. Remember that this list cannot be named
#'centroid_distances', since that is the input to the
# cluster assignment node.
lowercase = [
sess.run(lowercase_ , feed_dict={va: vect, va: sess.run(lowercase_ )} )
for centroid in centroids
]
# Now use the cluster assignment node, with the distances
# as the input
lowercase = sess.run(
lowercase_ , feed_dict={centroid_distances: distances} )
# Now assign the value to the appropriate state variable
sess.run(
cluster_assigns[vector_n] , feed_dict={assignment_value: assignment} )
##MAXIMIZATION STEP
# Based on the expected state computed from the Expectation Step,
# compute the locations of the centroids so as to maximize the
# overall objective of minimizing within-cluster Sum-of-Squares
for cluster_n in range(lowercase_ ):
# Collect all the vectors assigned to this cluster
lowercase = [
vectors[i]
for i in range(len(lowercase_ ) )
if sess.run(assignments[i] ) == cluster_n
]
# Compute new centroid location
lowercase = sess.run(
lowercase_ , feed_dict={mean_input: array(lowercase_ )} )
# Assign value to appropriate variable
sess.run(
cent_assigns[cluster_n] , feed_dict={centroid_value: new_location} )
# Return centroids and assignments
lowercase = sess.run(lowercase_ )
lowercase = sess.run(lowercase_ )
return centroids, assignments
| 653 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
_snake_case : Union[str, Any] = {
'configuration_mega': ['MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MegaConfig', 'MegaOnnxConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : Dict = [
'MEGA_PRETRAINED_MODEL_ARCHIVE_LIST',
'MegaForCausalLM',
'MegaForMaskedLM',
'MegaForMultipleChoice',
'MegaForQuestionAnswering',
'MegaForSequenceClassification',
'MegaForTokenClassification',
'MegaModel',
'MegaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mega import (
MEGA_PRETRAINED_MODEL_ARCHIVE_LIST,
MegaForCausalLM,
MegaForMaskedLM,
MegaForMultipleChoice,
MegaForQuestionAnswering,
MegaForSequenceClassification,
MegaForTokenClassification,
MegaModel,
MegaPreTrainedModel,
)
else:
import sys
_snake_case : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 53 |
from __future__ import annotations
def _A ( SCREAMING_SNAKE_CASE__ : list[int] , SCREAMING_SNAKE_CASE__ : int ):
UpperCamelCase :list[list[int]] = []
UpperCamelCase :list[int] = []
UpperCamelCase :List[str] = 0
UpperCamelCase :Any = sum(SCREAMING_SNAKE_CASE__ )
create_state_space_tree(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return result
def _A ( SCREAMING_SNAKE_CASE__ : list[int] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : list[int] , SCREAMING_SNAKE_CASE__ : list[list[int]] , SCREAMING_SNAKE_CASE__ : int , ):
if sum(SCREAMING_SNAKE_CASE__ ) > max_sum or (remaining_nums_sum + sum(SCREAMING_SNAKE_CASE__ )) < max_sum:
return
if sum(SCREAMING_SNAKE_CASE__ ) == max_sum:
result.append(SCREAMING_SNAKE_CASE__ )
return
for index in range(SCREAMING_SNAKE_CASE__ , len(SCREAMING_SNAKE_CASE__ ) ):
create_state_space_tree(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , index + 1 , [*path, nums[index]] , SCREAMING_SNAKE_CASE__ , remaining_nums_sum - nums[index] , )
__snake_case = [3, 34, 4, 12, 5, 2]
__snake_case = 9
__snake_case = generate_sum_of_subsets_soln(nums, max_sum)
print(*result)
| 658 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__magic_name__ : int = {
'''configuration_graphormer''': ['''GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GraphormerConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ : Tuple = [
'''GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GraphormerForGraphClassification''',
'''GraphormerModel''',
'''GraphormerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_graphormer import GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, GraphormerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_graphormer import (
GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
GraphormerForGraphClassification,
GraphormerModel,
GraphormerPreTrainedModel,
)
else:
import sys
__magic_name__ : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 705 |
__magic_name__ : List[str] = tuple[float, float, float]
__magic_name__ : Optional[int] = tuple[float, float, float]
def lowercase__ ( _UpperCamelCase , _UpperCamelCase) -> Vectorad:
"""simple docstring"""
UpperCamelCase = end_pointa[0] - end_pointa[0]
UpperCamelCase = end_pointa[1] - end_pointa[1]
UpperCamelCase = end_pointa[2] - end_pointa[2]
return (x, y, z)
def lowercase__ ( _UpperCamelCase , _UpperCamelCase) -> Vectorad:
"""simple docstring"""
UpperCamelCase = ab[1] * ac[2] - ab[2] * ac[1] # *i
UpperCamelCase = (ab[0] * ac[2] - ab[2] * ac[0]) * -1 # *j
UpperCamelCase = ab[0] * ac[1] - ab[1] * ac[0] # *k
return (x, y, z)
def lowercase__ ( _UpperCamelCase , _UpperCamelCase) -> bool:
"""simple docstring"""
return tuple(round(_UpperCamelCase , _UpperCamelCase) for x in vector) == (0, 0, 0)
def lowercase__ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 10) -> bool:
"""simple docstring"""
UpperCamelCase = create_vector(_UpperCamelCase , _UpperCamelCase)
UpperCamelCase = create_vector(_UpperCamelCase , _UpperCamelCase)
return is_zero_vector(get_ad_vectors_cross(_UpperCamelCase , _UpperCamelCase) , _UpperCamelCase)
| 410 | 0 |
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> List[str]:
if n == 0:
return 1
elif n % 2 == 1:
return (binary_exponentiation(lowerCamelCase_ , n - 1 , lowerCamelCase_ ) * a) % mod
else:
_lowercase : str = binary_exponentiation(lowerCamelCase_ , n / 2 , lowerCamelCase_ )
return (b * b) % mod
# a prime number
SCREAMING_SNAKE_CASE : str = 701
SCREAMING_SNAKE_CASE : Optional[int] = 1000000000
SCREAMING_SNAKE_CASE : Optional[int] = 10
# using binary exponentiation function, O(log(p)):
print((a / b) % p == (a * binary_exponentiation(b, p - 2, p)) % p)
print((a / b) % p == (a * b ** (p - 2)) % p)
| 89 |
"""simple docstring"""
from __future__ import annotations
import math
from collections.abc import Callable
def __A ( a_ : Callable[[int | float], int | float] , a_ : int | float , a_ : int | float , a_ : int = 1_00 , )-> float:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = x_start
SCREAMING_SNAKE_CASE : Union[str, Any] = fnc(a_ )
SCREAMING_SNAKE_CASE : Optional[int] = 0.0
for _ in range(a_ ):
# Approximates curve as a sequence of linear lines and sums their length
SCREAMING_SNAKE_CASE : int = (x_end - x_start) / steps + xa
SCREAMING_SNAKE_CASE : Optional[int] = fnc(a_ )
length += math.hypot(xa - xa , fxa - fxa )
# Increment step
SCREAMING_SNAKE_CASE : str = xa
SCREAMING_SNAKE_CASE : Any = fxa
return length
if __name__ == "__main__":
def __A ( a_ : Optional[Any] )-> List[Any]:
'''simple docstring'''
return math.sin(10 * x )
print("f(x) = sin(10 * x)")
print("The length of the curve from x = -10 to x = 10 is:")
lowerCamelCase__ : str = 10
while i <= 100000:
print(f'''With {i} steps: {line_length(f, -10, 10, i)}''')
i *= 10
| 698 | 0 |
"""simple docstring"""
import argparse
import os
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_task_guides.py
a : Optional[int] = '''src/transformers'''
a : List[str] = '''docs/source/en/tasks'''
def _SCREAMING_SNAKE_CASE ( _lowercase : Optional[Any] , _lowercase : int , _lowercase : int ) ->List[Any]:
'''simple docstring'''
with open(_lowercase , "r" , encoding="utf-8" , newline="\n" ) as f:
a : int = f.readlines()
# Find the start prompt.
a : Dict = 0
while not lines[start_index].startswith(_lowercase ):
start_index += 1
start_index += 1
a : int = start_index
while not lines[end_index].startswith(_lowercase ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# This is to make sure the transformers module imported is the one in the repo.
a : Any = direct_transformers_import(TRANSFORMERS_PATH)
a : List[Any] = {
'''asr.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES,
'''audio_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES,
'''language_modeling.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES,
'''image_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES,
'''masked_language_modeling.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES,
'''multiple_choice.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES,
'''object_detection.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES,
'''question_answering.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES,
'''semantic_segmentation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES,
'''sequence_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES,
'''summarization.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'''token_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES,
'''translation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'''video_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES,
'''document_question_answering.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES,
'''monocular_depth_estimation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES,
}
# This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any
# `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`).
a : Optional[Any] = {
'''summarization.md''': ('''nllb''',),
'''translation.md''': ('''nllb''',),
}
def _SCREAMING_SNAKE_CASE ( _lowercase : Tuple ) ->Optional[Any]:
'''simple docstring'''
a : str = TASK_GUIDE_TO_MODELS[task_guide]
a : int = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(_lowercase , set() )
a : Optional[Any] = {
code: name
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if (code in model_maping_names or code in special_model_types)
}
return ", ".join([F"""[{name}](../model_doc/{code})""" for code, name in model_names.items()] ) + "\n"
def _SCREAMING_SNAKE_CASE ( _lowercase : Optional[int] , _lowercase : int=False ) ->Union[str, Any]:
'''simple docstring'''
a : str = _find_text_in_file(
filename=os.path.join(_lowercase , _lowercase ) , start_prompt="<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->" , end_prompt="<!--End of the generated tip-->" , )
a : Union[str, Any] = get_model_list_for_task(_lowercase )
if current_list != new_list:
if overwrite:
with open(os.path.join(_lowercase , _lowercase ) , "w" , encoding="utf-8" , newline="\n" ) as f:
f.writelines(lines[:start_index] + [new_list] + lines[end_index:] )
else:
raise ValueError(
F"""The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`"""
" to fix this." )
if __name__ == "__main__":
a : Optional[Any] = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
a : Dict = parser.parse_args()
for task_guide in TASK_GUIDE_TO_MODELS.keys():
check_model_list_for_task(task_guide, args.fix_and_overwrite)
| 705 |
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
class __UpperCamelCase ( a__ ):
lowerCamelCase : torch.FloatTensor
lowerCamelCase : torch.FloatTensor
lowerCamelCase : Optional[torch.FloatTensor] =None
class __UpperCamelCase ( a__ , a__ ):
lowerCamelCase : Tuple =2
@register_to_config
def __init__( self , lowerCAmelCase__ = 0.02 , lowerCAmelCase__ = 100 , lowerCAmelCase__ = 1.007 , lowerCAmelCase__ = 80 , lowerCAmelCase__ = 0.05 , lowerCAmelCase__ = 50 , ) -> Union[str, Any]:
# standard deviation of the initial noise distribution
a : Tuple = sigma_max
# setable values
a : int = None
a : np.IntTensor = None
a : torch.FloatTensor = None # sigma(t_i)
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> torch.FloatTensor:
return sample
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> List[str]:
a : List[Any] = num_inference_steps
a : List[str] = np.arange(0 , self.num_inference_steps )[::-1].copy()
a : int = torch.from_numpy(lowerCAmelCase__ ).to(lowerCAmelCase__ )
a : List[str] = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in self.timesteps
]
a : Any = torch.tensor(lowerCAmelCase__ , dtype=torch.floataa , device=lowerCAmelCase__ )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> Tuple[torch.FloatTensor, float]:
if self.config.s_min <= sigma <= self.config.s_max:
a : str = min(self.config.s_churn / self.num_inference_steps , 2**0.5 - 1 )
else:
a : Dict = 0
# sample eps ~ N(0, S_noise^2 * I)
a : Union[str, Any] = self.config.s_noise * randn_tensor(sample.shape , generator=lowerCAmelCase__ ).to(sample.device )
a : Any = sigma + gamma * sigma
a : Tuple = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = True , ) -> Union[KarrasVeOutput, Tuple]:
a : Union[str, Any] = sample_hat + sigma_hat * model_output
a : Tuple = (sample_hat - pred_original_sample) / sigma_hat
a : List[Any] = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=lowerCAmelCase__ , derivative=lowerCAmelCase__ , pred_original_sample=lowerCAmelCase__ )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = True , ) -> Union[KarrasVeOutput, Tuple]:
a : Optional[int] = sample_prev + sigma_prev * model_output
a : str = (sample_prev - pred_original_sample) / sigma_prev
a : Dict = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=lowerCAmelCase__ , derivative=lowerCAmelCase__ , pred_original_sample=lowerCAmelCase__ )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> str:
raise NotImplementedError()
| 31 | 0 |
from __future__ import annotations
import math
def lowerCamelCase_ ( _lowercase ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__UpperCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
UpperCamelCase = [num for num in range(3, 100_001, 2) if not is_prime(num)]
def lowerCamelCase_ ( _lowercase ) -> list[int]:
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
raise ValueError("n must be an integer" )
if n <= 0:
raise ValueError("n must be >= 0" )
__A : int = []
for num in range(len(__UpperCAmelCase ) ):
__A : Dict = 0
while 2 * i * i <= odd_composites[num]:
__A : Dict = odd_composites[num] - 2 * i * i
if is_prime(__UpperCAmelCase ):
break
i += 1
else:
list_nums.append(odd_composites[num] )
if len(__UpperCAmelCase ) == n:
return list_nums
return []
def lowerCamelCase_ ( ) -> int:
return compute_nums(1 )[0]
if __name__ == "__main__":
print(F'''{solution() = }''')
| 520 |
'''simple docstring'''
import argparse
import torch
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_from_original_stable_diffusion_ckpt
if __name__ == "__main__":
a : List[str] = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.'
)
# !wget https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml
parser.add_argument(
'--original_config_file',
default=None,
type=str,
help='The YAML config file corresponding to the original architecture.',
)
parser.add_argument(
'--num_in_channels',
default=None,
type=int,
help='The number of input channels. If `None` number of input channels will be automatically inferred.',
)
parser.add_argument(
'--scheduler_type',
default='pndm',
type=str,
help='Type of scheduler to use. Should be one of [\'pndm\', \'lms\', \'ddim\', \'euler\', \'euler-ancestral\', \'dpm\']',
)
parser.add_argument(
'--pipeline_type',
default=None,
type=str,
help=(
'The pipeline type. One of \'FrozenOpenCLIPEmbedder\', \'FrozenCLIPEmbedder\', \'PaintByExample\''
'. If `None` pipeline will be automatically inferred.'
),
)
parser.add_argument(
'--image_size',
default=None,
type=int,
help=(
'The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2'
' Base. Use 768 for Stable Diffusion v2.'
),
)
parser.add_argument(
'--prediction_type',
default=None,
type=str,
help=(
'The prediction type that the model was trained on. Use \'epsilon\' for Stable Diffusion v1.X and Stable'
' Diffusion v2 Base. Use \'v_prediction\' for Stable Diffusion v2.'
),
)
parser.add_argument(
'--extract_ema',
action='store_true',
help=(
'Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights'
' or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield'
' higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.'
),
)
parser.add_argument(
'--upcast_attention',
action='store_true',
help=(
'Whether the attention computation should always be upcasted. This is necessary when running stable'
' diffusion 2.1.'
),
)
parser.add_argument(
'--from_safetensors',
action='store_true',
help='If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.',
)
parser.add_argument(
'--to_safetensors',
action='store_true',
help='Whether to store pipeline in safetensors format or not.',
)
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument('--device', type=str, help='Device to use (e.g. cpu, cuda:0, cuda:1, etc.)')
parser.add_argument(
'--stable_unclip',
type=str,
default=None,
required=False,
help='Set if this is a stable unCLIP model. One of \'txt2img\' or \'img2img\'.',
)
parser.add_argument(
'--stable_unclip_prior',
type=str,
default=None,
required=False,
help='Set if this is a stable unCLIP txt2img model. Selects which prior to use. If `--stable_unclip` is set to `txt2img`, the karlo prior (https://huggingface.co/kakaobrain/karlo-v1-alpha/tree/main/prior) is selected by default.',
)
parser.add_argument(
'--clip_stats_path',
type=str,
help='Path to the clip stats file. Only required if the stable unclip model\'s config specifies `model.params.noise_aug_config.params.clip_stats_path`.',
required=False,
)
parser.add_argument(
'--controlnet', action='store_true', default=None, help='Set flag if this is a controlnet checkpoint.'
)
parser.add_argument('--half', action='store_true', help='Save weights in half precision.')
parser.add_argument(
'--vae_path',
type=str,
default=None,
required=False,
help='Set to a path, hub id to an already converted vae to not convert it again.',
)
a : str = parser.parse_args()
a : List[Any] = download_from_original_stable_diffusion_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
prediction_type=args.prediction_type,
model_type=args.pipeline_type,
extract_ema=args.extract_ema,
scheduler_type=args.scheduler_type,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
stable_unclip=args.stable_unclip,
stable_unclip_prior=args.stable_unclip_prior,
clip_stats_path=args.clip_stats_path,
controlnet=args.controlnet,
vae_path=args.vae_path,
)
if args.half:
pipe.to(torch_dtype=torch.floataa)
if args.controlnet:
# only save the controlnet model
pipe.controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
else:
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 640 | 0 |
"""simple docstring"""
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
a_ = abspath(join(dirname(dirname(__file__)), "src"))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="ignore", category=FutureWarning)
def a__ ( __lowercase ) -> str:
from diffusers.utils.testing_utils import pytest_addoption_shared
pytest_addoption_shared(__lowercase )
def a__ ( __lowercase ) -> Optional[int]:
from diffusers.utils.testing_utils import pytest_terminal_summary_main
_A = terminalreporter.config.getoption("--make-reports" )
if make_reports:
pytest_terminal_summary_main(__lowercase , id=__lowercase ) | 717 |
"""simple docstring"""
import argparse
import torch
from torch import nn
from transformers import MBartConfig, MBartForConditionalGeneration
def a__ ( __lowercase ) -> Optional[int]:
_A = [
"encoder.version",
"decoder.version",
"model.encoder.version",
"model.decoder.version",
"_float_tensor",
"decoder.output_projection.weight",
]
for k in ignore_keys:
state_dict.pop(__lowercase , __lowercase )
def a__ ( __lowercase ) -> List[Any]:
_A , _A = emb.weight.shape
_A = nn.Linear(__lowercase , __lowercase , bias=__lowercase )
_A = emb.weight.data
return lin_layer
def a__ ( __lowercase , __lowercase="facebook/mbart-large-en-ro" , __lowercase=False , __lowercase=False ) -> List[str]:
_A = torch.load(__lowercase , map_location="cpu" )["model"]
remove_ignore_keys_(__lowercase )
_A = state_dict["encoder.embed_tokens.weight"].shape[0]
_A = MBartConfig.from_pretrained(__lowercase , vocab_size=__lowercase )
if mbart_aa and finetuned:
_A = "relu"
_A = state_dict["decoder.embed_tokens.weight"]
_A = MBartForConditionalGeneration(__lowercase )
model.model.load_state_dict(__lowercase )
if finetuned:
_A = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"fairseq_path", type=str, help="bart.large, bart.large.cnn or a path to a model.pt on local filesystem."
)
parser.add_argument("pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument(
"--hf_config",
default="facebook/mbart-large-cc25",
type=str,
help="Which huggingface architecture to use: mbart-large",
)
parser.add_argument("--mbart_50", action="store_true", help="whether the model is mMART-50 checkpoint")
parser.add_argument("--finetuned", action="store_true", help="whether the model is a fine-tuned checkpoint")
a_ = parser.parse_args()
a_ = convert_fairseq_mbart_checkpoint_from_disk(
args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa
)
model.save_pretrained(args.pytorch_dump_folder_path) | 621 | 0 |
import collections
import json
import os
import re
from typing import TYPE_CHECKING, List, Optional, Tuple
import numpy as np
from ...tokenization_utils_fast import PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
UpperCamelCase__ : int = logging.get_logger(__name__)
UpperCamelCase__ : int = {"""vocab_file""": """vocab.txt""", """emoji_file""": """emoji.json"""}
UpperCamelCase__ : Tuple = {
"""vocab_file""": {
"""abeja/gpt-neox-japanese-2.7b""": """https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt""",
},
"""emoji_file""": {
"""abeja/gpt-neox-japanese-2.7b""": """https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json""",
},
}
UpperCamelCase__ : Union[str, Any] = {
"""abeja/gpt-neox-japanese-2.7b""": 2_048,
}
def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_ ) -> int:
"""simple docstring"""
with open(snake_case_, '''r''', encoding='''utf-8''' ) as f:
a = json.loads(f.read() )
a = collections.OrderedDict()
a = collections.OrderedDict()
a = collections.OrderedDict()
with open(snake_case_, '''r''', encoding='''utf-8''' ) as f:
a = f.readlines()
a = [[t.rstrip('''\n''' )] if (t == ''',''' or ''',''' not in t) else t.rstrip('''\n''' ).split(''',''' ) for t in token]
for idx, b in enumerate(snake_case_ ):
a = b
a = idx
for wd in b:
a = idx
return vocab, raw_vocab, ids_to_tokens, emoji
class lowerCamelCase_ ( a_ ):
SCREAMING_SNAKE_CASE_ = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ = ['input_ids', 'attention_mask']
def __init__( self : Tuple ,__lowerCamelCase : Dict ,__lowerCamelCase : Tuple ,__lowerCamelCase : Optional[int]="<|endoftext|>" ,__lowerCamelCase : Any="<|endoftext|>" ,__lowerCamelCase : int="<|startoftext|>" ,__lowerCamelCase : List[Any]="<|endoftext|>" ,__lowerCamelCase : Tuple=False ,**__lowerCamelCase : List[Any] ,):
'''simple docstring'''
super().__init__(
unk_token=__lowerCamelCase ,pad_token=__lowerCamelCase ,bos_token=__lowerCamelCase ,eos_token=__lowerCamelCase ,do_clean_text=__lowerCamelCase ,**__lowerCamelCase ,)
if not os.path.isfile(__lowerCamelCase ):
raise ValueError(
F"""Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained"""
''' model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`''' )
if not os.path.isfile(__lowerCamelCase ):
raise ValueError(
F"""Can't find a emoji file at path '{emoji_file}'. To load the emoji information from a Google"""
''' pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`''' )
a = do_clean_text
a , a , a , a = load_vocab_and_emoji(__lowerCamelCase ,__lowerCamelCase )
a = SubWordJapaneseTokenizer(
vocab=self.vocab ,ids_to_tokens=self.ids_to_tokens ,emoji=self.emoji )
@property
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
'''simple docstring'''
return len(self.raw_vocab )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
'''simple docstring'''
return dict(self.raw_vocab ,**self.added_tokens_encoder )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ,__lowerCamelCase : int ):
'''simple docstring'''
return self.subword_tokenizer.tokenize(__lowerCamelCase ,clean=self.do_clean_text )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ,__lowerCamelCase : Dict ):
'''simple docstring'''
return self.vocab.get(__lowerCamelCase ,self.vocab.get(self.unk_token ) )
def SCREAMING_SNAKE_CASE_ ( self : int ,__lowerCamelCase : List[Any] ):
'''simple docstring'''
return self.subword_tokenizer.convert_id_to_token(__lowerCamelCase )
def SCREAMING_SNAKE_CASE_ ( self : int ,__lowerCamelCase : Optional[Any] ):
'''simple docstring'''
a = ''''''.join(__lowerCamelCase ).strip()
return out_string
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ,__lowerCamelCase : "Conversation" ):
'''simple docstring'''
a = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(__lowerCamelCase ,add_special_tokens=__lowerCamelCase ) + [self.eos_token_id] )
if len(__lowerCamelCase ) > self.model_max_length:
a = input_ids[-self.model_max_length :]
return input_ids
def SCREAMING_SNAKE_CASE_ ( self : str ,__lowerCamelCase : str ,__lowerCamelCase : Optional[str] = None ):
'''simple docstring'''
a = 0
if os.path.isdir(__lowerCamelCase ):
a = os.path.join(
__lowerCamelCase ,(filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
a = os.path.join(
__lowerCamelCase ,(filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''emoji_file'''] )
else:
a = (
(filename_prefix + '''-''' if filename_prefix else '''''') + save_directory + VOCAB_FILES_NAMES['''vocab_file''']
)
a = (
(filename_prefix + '''-''' if filename_prefix else '''''') + save_directory + VOCAB_FILES_NAMES['''emoji_file''']
)
with open(__lowerCamelCase ,'''w''' ,encoding='''utf-8''' ) as writer:
for token_index, token in self.ids_to_tokens.items():
if index != token_index:
logger.warning(
F"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
''' Please check that the vocabulary is not corrupted!''' )
a = token_index
writer.write(''','''.join(__lowerCamelCase ) + '''\n''' )
index += 1
with open(__lowerCamelCase ,'''w''' ,encoding='''utf-8''' ) as writer:
json.dump(self.emoji ,__lowerCamelCase )
return vocab_file, emoji_file
class lowerCamelCase_ ( a_ ):
def __init__( self : List[Any] ,__lowerCamelCase : str ,__lowerCamelCase : str ,__lowerCamelCase : Optional[Any] ):
'''simple docstring'''
a = vocab # same as swe
a = ids_to_tokens # same as bpe
a = emoji
a = np.max([len(__lowerCamelCase ) for w in self.vocab.keys()] )
a = re.compile(r'''(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)''' )
a = re.compile(r'''[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*''' )
a = re.compile(r'''[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}''' )
a = re.compile(
r'''([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*''' )
a = re.compile(
r'''(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*''' )
a = re.compile(
r'''((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*''' )
a = '''─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿'''
a = '''▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟'''
a = str.maketrans({k: '''<BLOCK>''' for k in keisen + blocks} )
def __len__( self : Optional[int] ):
'''simple docstring'''
return len(self.ids_to_tokens )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ,__lowerCamelCase : int ):
'''simple docstring'''
a = self.content_repattera.sub('''<URL>''' ,__lowerCamelCase )
a = self.content_repattera.sub('''<EMAIL>''' ,__lowerCamelCase )
a = self.content_repattera.sub('''<TEL>''' ,__lowerCamelCase )
a = self.content_repattera.sub('''<DATE>''' ,__lowerCamelCase )
a = self.content_repattera.sub('''<DATE>''' ,__lowerCamelCase )
a = self.content_repattera.sub('''<PRICE>''' ,__lowerCamelCase )
a = content.translate(self.content_transa )
while "<BLOCK><BLOCK>" in content:
a = content.replace('''<BLOCK><BLOCK>''' ,'''<BLOCK>''' )
return content
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ,__lowerCamelCase : List[Any] ,__lowerCamelCase : List[str]=False ):
'''simple docstring'''
a = text.replace(''' ''' ,'''<SP>''' )
a = text.replace(''' ''' ,'''<SP>''' )
a = text.replace('''\r\n''' ,'''<BR>''' )
a = text.replace('''\n''' ,'''<BR>''' )
a = text.replace('''\r''' ,'''<BR>''' )
a = text.replace('''\t''' ,'''<TAB>''' )
a = text.replace('''—''' ,'''ー''' )
a = text.replace('''−''' ,'''ー''' )
for k, v in self.emoji["emoji"].items():
if k in text:
a = text.replace(__lowerCamelCase ,__lowerCamelCase )
if clean:
a = self.clean_text(__lowerCamelCase )
def check_simbol(__lowerCamelCase : Dict ):
a = x.encode()
if len(__lowerCamelCase ) == 1 and len(__lowerCamelCase ) == 2:
a = (int(e[0] ) << 8) + int(e[1] )
if (
(c >= 0Xc_2a1 and c <= 0Xc_2bf)
or (c >= 0Xc_780 and c <= 0Xc_783)
or (c >= 0Xc_ab9 and c <= 0Xc_bbf)
or (c >= 0Xc_c80 and c <= 0Xc_da2)
):
return True
return False
def checkuae(__lowerCamelCase : Any ):
a = x.encode()
if len(__lowerCamelCase ) == 1 and len(__lowerCamelCase ) == 3:
a = (int(e[0] ) << 16) + (int(e[1] ) << 8) + int(e[2] )
if c >= 0Xe28_080 and c <= 0Xe2b_07f:
return True
return False
a = 0
a = []
while pos < len(__lowerCamelCase ):
a = min(len(__lowerCamelCase ) ,pos + self.maxlen + 1 ) if text[pos] == '''<''' else pos + 3
a = [] # (token_id, token, pos)
for e in range(__lowerCamelCase ,__lowerCamelCase ,-1 ):
a = text[pos:e]
if wd in self.vocab:
if wd[0] == "<" and len(__lowerCamelCase ) > 2:
a = [(self.vocab[wd], wd, e)]
break
else:
candidates.append((self.vocab[wd], wd, e) )
if len(__lowerCamelCase ) > 0:
# the smallest token_id is adopted
a , a , a = sorted(__lowerCamelCase ,key=lambda __lowerCamelCase : x[0] )[0]
result.append(__lowerCamelCase )
a = e
else:
a = pos + 1
a = text[pos:end]
if check_simbol(__lowerCamelCase ):
result.append('''<KIGOU>''' )
elif checkuae(__lowerCamelCase ):
result.append('''<U2000U2BFF>''' )
else:
for i in wd.encode('''utf-8''' ):
result.append('''<|byte%d|>''' % i )
a = end
return result
def SCREAMING_SNAKE_CASE_ ( self : List[str] ,__lowerCamelCase : str ,__lowerCamelCase : Any="\n" ):
'''simple docstring'''
a = []
a = []
a = self.ids_to_tokens[index][0]
if word[:6] == "<|byte" and word[-2:] == "|>":
byte_tokens.append(int(word[6:-2] ) )
else:
if len(__lowerCamelCase ) > 0:
words.append(bytearray(__lowerCamelCase ).decode('''utf-8''' ,errors='''replace''' ) )
a = []
if word[:7] == "<|emoji" and word[-2:] == "|>":
words.append(self.emoji['''emoji_inv'''][word] )
elif word == "<SP>":
words.append(''' ''' )
elif word == "<BR>":
words.append(__lowerCamelCase )
elif word == "<TAB>":
words.append('''\t''' )
elif word == "<BLOCK>":
words.append('''▀''' )
elif word == "<KIGOU>":
words.append('''ǀ''' )
elif word == "<U2000U2BFF>":
words.append('''‖''' )
else:
words.append(__lowerCamelCase )
if len(__lowerCamelCase ) > 0:
words.append(bytearray(__lowerCamelCase ).decode('''utf-8''' ,errors='''replace''' ) )
a = ''''''.join(__lowerCamelCase )
return text
| 387 |
import os
import unittest
from transformers.models.cpmant.tokenization_cpmant import VOCAB_FILES_NAMES, CpmAntTokenizer
from transformers.testing_utils import require_jieba, tooslow
from ...test_tokenization_common import TokenizerTesterMixin
@require_jieba
class lowerCamelCase_ ( a_ , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ = CpmAntTokenizer
SCREAMING_SNAKE_CASE_ = False
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
'''simple docstring'''
super().setUp()
a = [
'''<d>''',
'''</d>''',
'''<s>''',
'''</s>''',
'''</_>''',
'''<unk>''',
'''<pad>''',
'''</n>''',
'''我''',
'''是''',
'''C''',
'''P''',
'''M''',
'''A''',
'''n''',
'''t''',
]
a = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file ,'''w''' ,encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
@tooslow
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
'''simple docstring'''
a = CpmAntTokenizer.from_pretrained('''openbmb/cpm-ant-10b''' )
a = '''今天天气真好!'''
a = ['''今天''', '''天气''', '''真''', '''好''', '''!''']
a = tokenizer.tokenize(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase ,__lowerCamelCase )
a = '''今天天气真好!'''
a = [tokenizer.bos_token] + tokens
a = [6, 98_02, 1_49_62, 20_82, 8_31, 2_44]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCamelCase ) ,__lowerCamelCase )
a = tokenizer.decode(__lowerCamelCase )
self.assertEqual(__lowerCamelCase ,__lowerCamelCase )
| 387 | 1 |
from __future__ import annotations
# This is the precision for this function which can be altered.
# It is recommended for users to keep this number greater than or equal to 10.
__SCREAMING_SNAKE_CASE : Optional[int] = 10
def snake_case_ ( lowercase__ : int , lowercase__ : int , lowercase__ : list[int] , lowercase__ : int ):
'''simple docstring'''
for i in range(lowercase__ , lowercase__ ):
if array[i] == target:
return i
return -1
def snake_case_ ( lowercase__ : list[int] , lowercase__ : int ):
'''simple docstring'''
_lowerCAmelCase =0
_lowerCAmelCase =len(lowercase__ )
while left <= right:
if right - left < precision:
return lin_search(lowercase__ , lowercase__ , lowercase__ , lowercase__ )
_lowerCAmelCase =(left + right) // 3 + 1
_lowerCAmelCase =2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
_lowerCAmelCase =one_third - 1
elif array[two_third] < target:
_lowerCAmelCase =two_third + 1
else:
_lowerCAmelCase =one_third + 1
_lowerCAmelCase =two_third - 1
else:
return -1
def snake_case_ ( lowercase__ : int , lowercase__ : int , lowercase__ : list[int] , lowercase__ : int ):
'''simple docstring'''
if left < right:
if right - left < precision:
return lin_search(lowercase__ , lowercase__ , lowercase__ , lowercase__ )
_lowerCAmelCase =(left + right) // 3 + 1
_lowerCAmelCase =2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
return rec_ternary_search(lowercase__ , one_third - 1 , lowercase__ , lowercase__ )
elif array[two_third] < target:
return rec_ternary_search(two_third + 1 , lowercase__ , lowercase__ , lowercase__ )
else:
return rec_ternary_search(one_third + 1 , two_third - 1 , lowercase__ , lowercase__ )
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
__SCREAMING_SNAKE_CASE : Dict = input('''Enter numbers separated by comma:\n''').strip()
__SCREAMING_SNAKE_CASE : List[Any] = [int(item.strip()) for item in user_input.split(''',''')]
assert collection == sorted(collection), F"List must be ordered.\n{collection}."
__SCREAMING_SNAKE_CASE : Optional[int] = int(input('''Enter the number to be found in the list:\n''').strip())
__SCREAMING_SNAKE_CASE : Optional[int] = ite_ternary_search(collection, target)
__SCREAMING_SNAKE_CASE : Optional[int] = rec_ternary_search(0, len(collection) - 1, collection, target)
if resulta != -1:
print(F'Iterative search: {target} found at positions: {resulta}')
print(F'Recursive search: {target} found at positions: {resulta}')
else:
print('''Not found''')
| 149 |
from __future__ import annotations
import unittest
from transformers import EsmConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers.models.esm.modeling_tf_esm import (
TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
TFEsmModel,
)
class __lowerCamelCase :
"""simple docstring"""
def __init__( self : int , lowerCamelCase_ : str , ):
_lowerCAmelCase =parent
_lowerCAmelCase =13
_lowerCAmelCase =7
_lowerCAmelCase =True
_lowerCAmelCase =True
_lowerCAmelCase =True
_lowerCAmelCase =99
_lowerCAmelCase =32
_lowerCAmelCase =2
_lowerCAmelCase =4
_lowerCAmelCase =37
_lowerCAmelCase ="""gelu"""
_lowerCAmelCase =0.1
_lowerCAmelCase =0.1
_lowerCAmelCase =512
_lowerCAmelCase =16
_lowerCAmelCase =2
_lowerCAmelCase =0.02
_lowerCAmelCase =3
_lowerCAmelCase =4
_lowerCAmelCase =None
def lowerCAmelCase__ ( self : str ):
_lowerCAmelCase =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCAmelCase =None
if self.use_input_mask:
_lowerCAmelCase =random_attention_mask([self.batch_size, self.seq_length] )
_lowerCAmelCase =None
_lowerCAmelCase =None
_lowerCAmelCase =None
if self.use_labels:
_lowerCAmelCase =ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCAmelCase =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowerCAmelCase =ids_tensor([self.batch_size] , self.num_choices )
_lowerCAmelCase =EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , pad_token_id=1 , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase__ ( self : Any ):
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) =self.prepare_config_and_inputs()
_lowerCAmelCase =True
_lowerCAmelCase =floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
_lowerCAmelCase =ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def lowerCAmelCase__ ( self : List[Any] , lowerCamelCase_ : Tuple , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Any , lowerCamelCase_ : str , lowerCamelCase_ : Any , lowerCamelCase_ : Optional[int] ):
_lowerCAmelCase =TFEsmModel(config=lowerCamelCase_ )
_lowerCAmelCase ={"""input_ids""": input_ids, """attention_mask""": input_mask}
_lowerCAmelCase =model(lowerCamelCase_ )
_lowerCAmelCase =[input_ids, input_mask]
_lowerCAmelCase =model(lowerCamelCase_ )
_lowerCAmelCase =model(lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase__ ( self : Any , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Dict , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Any , lowerCamelCase_ : int , lowerCamelCase_ : Dict , lowerCamelCase_ : List[str] , ):
_lowerCAmelCase =True
_lowerCAmelCase =TFEsmModel(config=lowerCamelCase_ )
_lowerCAmelCase ={
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""encoder_hidden_states""": encoder_hidden_states,
"""encoder_attention_mask""": encoder_attention_mask,
}
_lowerCAmelCase =model(lowerCamelCase_ )
_lowerCAmelCase =[input_ids, input_mask]
_lowerCAmelCase =model(lowerCamelCase_ , encoder_hidden_states=lowerCamelCase_ )
# Also check the case where encoder outputs are not passed
_lowerCAmelCase =model(lowerCamelCase_ , attention_mask=lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase__ ( self : int , lowerCamelCase_ : str , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Tuple , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : List[str] , lowerCamelCase_ : Any ):
_lowerCAmelCase =TFEsmForMaskedLM(config=lowerCamelCase_ )
_lowerCAmelCase =model([input_ids, input_mask] )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase__ ( self : Union[str, Any] , lowerCamelCase_ : str , lowerCamelCase_ : List[Any] , lowerCamelCase_ : str , lowerCamelCase_ : Any , lowerCamelCase_ : Any , lowerCamelCase_ : List[str] ):
_lowerCAmelCase =self.num_labels
_lowerCAmelCase =TFEsmForTokenClassification(config=lowerCamelCase_ )
_lowerCAmelCase ={"""input_ids""": input_ids, """attention_mask""": input_mask}
_lowerCAmelCase =model(lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase__ ( self : Tuple ):
_lowerCAmelCase =self.prepare_config_and_inputs()
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) =config_and_inputs
_lowerCAmelCase ={"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class __lowerCamelCase ( lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
"""simple docstring"""
a_: int = (
(
TFEsmModel,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
)
if is_tf_available()
else ()
)
a_: Union[str, Any] = (
{
"""feature-extraction""": TFEsmModel,
"""fill-mask""": TFEsmForMaskedLM,
"""text-classification""": TFEsmForSequenceClassification,
"""token-classification""": TFEsmForTokenClassification,
"""zero-shot""": TFEsmForSequenceClassification,
}
if is_tf_available()
else {}
)
a_: List[str] = False
a_: List[Any] = False
def lowerCAmelCase__ ( self : Tuple ):
_lowerCAmelCase =TFEsmModelTester(self )
_lowerCAmelCase =ConfigTester(self , config_class=lowerCamelCase_ , hidden_size=37 )
def lowerCAmelCase__ ( self : Tuple ):
self.config_tester.run_common_tests()
def lowerCAmelCase__ ( self : Dict ):
_lowerCAmelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def lowerCAmelCase__ ( self : List[Any] ):
_lowerCAmelCase =self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*lowerCamelCase_ )
def lowerCAmelCase__ ( self : List[str] ):
_lowerCAmelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowerCamelCase_ )
def lowerCAmelCase__ ( self : Any ):
_lowerCAmelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCamelCase_ )
@slow
def lowerCAmelCase__ ( self : Optional[int] ):
for model_name in TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase =TFEsmModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
@unittest.skip("""Protein models do not support embedding resizing.""" )
def lowerCAmelCase__ ( self : List[Any] ):
pass
@unittest.skip("""Protein models do not support embedding resizing.""" )
def lowerCAmelCase__ ( self : Dict ):
pass
def lowerCAmelCase__ ( self : int ):
_lowerCAmelCase , _lowerCAmelCase =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase =model_class(lowerCamelCase_ )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class is TFEsmForMaskedLM:
# Output embedding test differs from the main test because they're a matrix, not a layer
_lowerCAmelCase =model.get_bias()
assert isinstance(lowerCamelCase_ , lowerCamelCase_ )
for k, v in name.items():
assert isinstance(lowerCamelCase_ , tf.Variable )
else:
_lowerCAmelCase =model.get_output_embeddings()
assert x is None
_lowerCAmelCase =model.get_bias()
assert name is None
@require_tf
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowerCAmelCase__ ( self : Any ):
_lowerCAmelCase =TFEsmForMaskedLM.from_pretrained("""facebook/esm2_t6_8M_UR50D""" )
_lowerCAmelCase =tf.constant([[0, 1, 2, 3, 4, 5]] )
_lowerCAmelCase =model(lowerCamelCase_ )[0]
_lowerCAmelCase =[1, 6, 33]
self.assertEqual(list(output.numpy().shape ) , lowerCamelCase_ )
# compare the actual values for a slice.
_lowerCAmelCase =tf.constant(
[
[
[8.92_1518, -10.58_9814, -6.467_1307],
[-6.396_7156, -13.91_1377, -1.121_1915],
[-7.78_1247, -13.95_1557, -3.74_0592],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-2 ) )
@slow
def lowerCAmelCase__ ( self : Tuple ):
_lowerCAmelCase =TFEsmModel.from_pretrained("""facebook/esm2_t6_8M_UR50D""" )
_lowerCAmelCase =tf.constant([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
_lowerCAmelCase =model(lowerCamelCase_ )[0]
# compare the actual values for a slice.
_lowerCAmelCase =tf.constant(
[
[
[0.1444_3092, 0.5412_5327, 0.324_7739],
[0.3034_0484, 0.0052_6676, 0.3107_7722],
[0.3227_8043, -0.2498_7096, 0.341_4628],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 149 | 1 |
'''simple docstring'''
import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def _UpperCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
__magic_name__ : Tuple = {}
__magic_name__ : str = tokenizer(example["content"] , truncation=UpperCamelCase__ )["input_ids"]
__magic_name__ : str = len(example["content"] ) / len(output["input_ids"] )
return output
_SCREAMING_SNAKE_CASE : List[str] = HfArgumentParser(PretokenizationArguments)
_SCREAMING_SNAKE_CASE : Optional[Any] = parser.parse_args()
if args.num_workers is None:
_SCREAMING_SNAKE_CASE : Optional[Any] = multiprocessing.cpu_count()
_SCREAMING_SNAKE_CASE : int = AutoTokenizer.from_pretrained(args.tokenizer_dir)
_SCREAMING_SNAKE_CASE : Dict = time.time()
_SCREAMING_SNAKE_CASE : Tuple = load_dataset(args.dataset_name, split="train")
print(f"Dataset loaded in {time.time()-t_start:.2f}s")
_SCREAMING_SNAKE_CASE : Tuple = time.time()
_SCREAMING_SNAKE_CASE : List[Any] = ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
"repo_name",
"path",
"copies",
"size",
"content",
"license",
"hash",
"line_mean",
"line_max",
"alpha_frac",
"autogenerated",
],
)
print(f"Dataset tokenized in {time.time()-t_start:.2f}s")
_SCREAMING_SNAKE_CASE : int = time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(f"Data pushed to the hub in {time.time()-t_start:.2f}s") | 436 |
'''simple docstring'''
from __future__ import annotations
from numpy import array, cos, cross, floataa, radians, sin
from numpy.typing import NDArray
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = False ):
"""simple docstring"""
if radian_mode:
return [magnitude * cos(UpperCamelCase__ ), magnitude * sin(UpperCamelCase__ )]
return [magnitude * cos(radians(UpperCamelCase__ ) ), magnitude * sin(radians(UpperCamelCase__ ) )]
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = 10**-1 ):
"""simple docstring"""
__magic_name__ : NDArray[floataa] = cross(UpperCamelCase__ , UpperCamelCase__ )
__magic_name__ : float = sum(UpperCamelCase__ )
return abs(UpperCamelCase__ ) < eps
if __name__ == "__main__":
# Test to check if it works
_SCREAMING_SNAKE_CASE : List[str] = array(
[
polar_force(718.4, 1_80 - 30),
polar_force(879.54, 45),
polar_force(1_00, -90),
]
)
_SCREAMING_SNAKE_CASE : NDArray[floataa] = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem 1 in image_data/2D_problems.jpg
_SCREAMING_SNAKE_CASE : int = array(
[
polar_force(30 * 9.81, 15),
polar_force(2_15, 1_80 - 45),
polar_force(2_64, 90 - 30),
]
)
_SCREAMING_SNAKE_CASE : int = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem in image_data/2D_problems_1.jpg
_SCREAMING_SNAKE_CASE : str = array([[0, -20_00], [0, -12_00], [0, 1_56_00], [0, -1_24_00]])
_SCREAMING_SNAKE_CASE : Optional[Any] = array([[0, 0], [6, 0], [10, 0], [12, 0]])
assert in_static_equilibrium(forces, location)
import doctest
doctest.testmod() | 436 | 1 |
"""simple docstring"""
from math import pi
def __lowercase ( lowerCamelCase_ : int , lowerCamelCase_ : int ):
return 2 * pi * radius * (angle / 360)
if __name__ == "__main__":
print(arc_length(90, 10))
| 112 |
"""simple docstring"""
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("TEST_SAGEMAKER" , "False" ) ) is not True , reason="Skipping test because should only be run when releasing minor transformers version" , )
@pytest.mark.usefixtures("sm_env" )
@parameterized_class(
[
{
"framework": "pytorch",
"script": "run_glue.py",
"model_name_or_path": "distilbert-base-cased",
"instance_type": "ml.g4dn.xlarge",
"results": {"train_runtime": 650, "eval_accuracy": 0.6, "eval_loss": 0.9},
},
{
"framework": "tensorflow",
"script": "run_tf.py",
"model_name_or_path": "distilbert-base-cased",
"instance_type": "ml.g4dn.xlarge",
"results": {"train_runtime": 600, "eval_accuracy": 0.3, "eval_loss": 0.9},
},
] )
class lowerCamelCase_ ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase__ ( self ):
if self.framework == "pytorch":
subprocess.run(
f'''cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'''.split() , encoding="utf-8" , check=UpperCAmelCase__ , )
assert hasattr(self , "env" )
def lowerCAmelCase__ ( self , UpperCAmelCase__=1 ):
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=f'''{self.env.base_job_name}-single''' , instance_count=UpperCAmelCase__ , instance_type=self.instance_type , debugger_hook_config=UpperCAmelCase__ , hyperparameters={**self.env.hyperparameters, "model_name_or_path": self.model_name_or_path} , metric_definitions=self.env.metric_definitions , py_version="py36" , )
def lowerCAmelCase__ ( self , UpperCAmelCase__ ):
TrainingJobAnalytics(UpperCAmelCase__ ).export_csv(f'''{self.env.test_path}/{job_name}_metrics.csv''' )
def lowerCAmelCase__ ( self ):
# create estimator
SCREAMING_SNAKE_CASE__ = self.create_estimator()
# run training
estimator.fit()
# result dataframe
SCREAMING_SNAKE_CASE__ = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
SCREAMING_SNAKE_CASE__ = list(result_metrics_df[result_metrics_df.metric_name == "eval_accuracy"]["value"] )
SCREAMING_SNAKE_CASE__ = list(result_metrics_df[result_metrics_df.metric_name == "eval_loss"]["value"] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
SCREAMING_SNAKE_CASE__ = (
Session().describe_training_job(estimator.latest_training_job.name ).get("TrainingTimeInSeconds" , 99_9999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["eval_accuracy"] for t in eval_accuracy )
assert all(t <= self.results["eval_loss"] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f'''{estimator.latest_training_job.name}.json''' , "w" ) as outfile:
json.dump({"train_time": train_runtime, "eval_accuracy": eval_accuracy, "eval_loss": eval_loss} , UpperCAmelCase__ )
| 112 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_tf_available,
is_torch_available,
)
_lowercase : List[str] ={
"configuration_speech_to_text": ["SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP", "Speech2TextConfig"],
"processing_speech_to_text": ["Speech2TextProcessor"],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : str =["Speech2TextTokenizer"]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Optional[Any] =["Speech2TextFeatureExtractor"]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Optional[Any] =[
"TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFSpeech2TextForConditionalGeneration",
"TFSpeech2TextModel",
"TFSpeech2TextPreTrainedModel",
]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : str =[
"SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST",
"Speech2TextForConditionalGeneration",
"Speech2TextModel",
"Speech2TextPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig
from .processing_speech_to_text import SpeechaTextProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speech_to_text import SpeechaTextTokenizer
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_speech_to_text import (
TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSpeechaTextForConditionalGeneration,
TFSpeechaTextModel,
TFSpeechaTextPreTrainedModel,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_to_text import (
SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechaTextForConditionalGeneration,
SpeechaTextModel,
SpeechaTextPreTrainedModel,
)
else:
import sys
_lowercase : Optional[int] =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 305 |
'''simple docstring'''
from __future__ import annotations
def __lowerCamelCase ( A__ , A__ ) -> list[tuple[int, int]]:
"""simple docstring"""
UpperCamelCase , UpperCamelCase = position
UpperCamelCase = [
(y + 1, x + 2),
(y - 1, x + 2),
(y + 1, x - 2),
(y - 1, x - 2),
(y + 2, x + 1),
(y + 2, x - 1),
(y - 2, x + 1),
(y - 2, x - 1),
]
UpperCamelCase = []
for position in positions:
UpperCamelCase , UpperCamelCase = position
if 0 <= y_test < n and 0 <= x_test < n:
permissible_positions.append(A__ )
return permissible_positions
def __lowerCamelCase ( A__ ) -> bool:
"""simple docstring"""
return not any(elem == 0 for row in board for elem in row )
def __lowerCamelCase ( A__ , A__ , A__ ) -> bool:
"""simple docstring"""
if is_complete(A__ ):
return True
for position in get_valid_pos(A__ , len(A__ ) ):
UpperCamelCase , UpperCamelCase = position
if board[y][x] == 0:
UpperCamelCase = curr + 1
if open_knight_tour_helper(A__ , A__ , curr + 1 ):
return True
UpperCamelCase = 0
return False
def __lowerCamelCase ( A__ ) -> list[list[int]]:
"""simple docstring"""
UpperCamelCase = [[0 for i in range(A__ )] for j in range(A__ )]
for i in range(A__ ):
for j in range(A__ ):
UpperCamelCase = 1
if open_knight_tour_helper(A__ , (i, j) , 1 ):
return board
UpperCamelCase = 0
UpperCamelCase = F"""Open Kight Tour cannot be performed on a board of size {n}"""
raise ValueError(A__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 430 | 0 |
import numpy as np
def a_ ( _A , _A , _A , _A , _A ) -> Optional[Any]:
"""simple docstring"""
snake_case__ = int(np.ceil((x_end - xa) / h ) )
snake_case__ = np.zeros((n + 1,) )
snake_case__ = ya
snake_case__ = xa
for k in range(_A ):
snake_case__ = f(_A , y[k] )
snake_case__ = f(x + 0.5 * h , y[k] + 0.5 * h * ka )
snake_case__ = f(x + 0.5 * h , y[k] + 0.5 * h * ka )
snake_case__ = f(x + h , y[k] + h * ka )
snake_case__ = y[k] + (1 / 6) * h * (ka + 2 * ka + 2 * ka + ka)
x += h
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 717 |
from collections.abc import Sequence
def a_ ( _A , _A ) -> float:
"""simple docstring"""
return sum(c * (x**i) for i, c in enumerate(_A ) )
def a_ ( _A , _A ) -> float:
"""simple docstring"""
snake_case__ = 0.0
for coeff in reversed(_A ):
snake_case__ = result * x + coeff
return result
if __name__ == "__main__":
__UpperCamelCase : str = (0.0, 0.0, 5.0, 9.3, 7.0)
__UpperCamelCase : Dict = 1_0.0
print(evaluate_poly(poly, x))
print(horner(poly, x))
| 372 | 0 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from ...utils.dataclasses import (
ComputeEnvironment,
DistributedType,
DynamoBackend,
PrecisionType,
SageMakerDistributedType,
)
from ..menu import BulletMenu
_lowerCAmelCase : int =[
'''EAGER''',
'''AOT_EAGER''',
'''INDUCTOR''',
'''NVFUSER''',
'''AOT_NVFUSER''',
'''AOT_CUDAGRAPHS''',
'''OFI''',
'''FX2TRT''',
'''ONNXRT''',
'''IPEX''',
]
def _A ( SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE=None ,SCREAMING_SNAKE_CASE=None ,SCREAMING_SNAKE_CASE=None ):
UpperCAmelCase__: Optional[Any] = True
while ask_again:
UpperCAmelCase__: str = input(_a )
try:
if default is not None and len(_a ) == 0:
return default
return convert_value(_a ) if convert_value is not None else result
except Exception:
if error_message is not None:
print(_a )
def _A ( SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE=[] ,SCREAMING_SNAKE_CASE=None ,SCREAMING_SNAKE_CASE=0 ):
UpperCAmelCase__: List[Any] = BulletMenu(_a ,_a )
UpperCAmelCase__: Union[str, Any] = menu.run(default_choice=_a )
return convert_value(_a ) if convert_value is not None else result
def _A ( SCREAMING_SNAKE_CASE ):
UpperCAmelCase__: str = int(_a )
return ComputeEnvironment(["LOCAL_MACHINE", "AMAZON_SAGEMAKER"][value] )
def _A ( SCREAMING_SNAKE_CASE ):
UpperCAmelCase__: str = int(_a )
return DistributedType(["NO", "MULTI_CPU", "MULTI_XPU", "MULTI_GPU", "MULTI_NPU", "TPU"][value] )
def _A ( SCREAMING_SNAKE_CASE ):
UpperCAmelCase__: int = int(_a )
return DynamoBackend(DYNAMO_BACKENDS[value] ).value
def _A ( SCREAMING_SNAKE_CASE ):
UpperCAmelCase__: str = int(_a )
return PrecisionType(["no", "fp16", "bf16", "fp8"][value] )
def _A ( SCREAMING_SNAKE_CASE ):
UpperCAmelCase__: Optional[int] = int(_a )
return SageMakerDistributedType(["NO", "DATA_PARALLEL", "MODEL_PARALLEL"][value] )
def _A ( SCREAMING_SNAKE_CASE ):
return {"yes": True, "no": False}[value.lower()]
class __UpperCamelCase ( argparse.RawDescriptionHelpFormatter ):
'''simple docstring'''
def _UpperCAmelCase ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
UpperCAmelCase__: Tuple = super()._format_usage(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCAmelCase__: Any = usage.replace("<command> [<args>] " , "" )
return usage | 113 |
def lowerCAmelCase__ ( _a : str ):
snake_case_ : List[Any] = ""
for ch in key:
if ch == " " or ch not in key_no_dups and ch.isalpha():
key_no_dups += ch
return key_no_dups
def lowerCAmelCase__ ( _a : str ):
snake_case_ : Optional[int] = [chr(i + 65 ) for i in range(26 )]
# Remove duplicate characters from key
snake_case_ : Optional[Any] = remove_duplicates(key.upper() )
snake_case_ : Union[str, Any] = len(_a )
# First fill cipher with key characters
snake_case_ : Union[str, Any] = {alphabet[i]: char for i, char in enumerate(_a )}
# Then map remaining characters in alphabet to
# the alphabet from the beginning
for i in range(len(_a ) , 26 ):
snake_case_ : Any = alphabet[i - offset]
# Ensure we are not mapping letters to letters previously mapped
while char in key:
offset -= 1
snake_case_ : Dict = alphabet[i - offset]
snake_case_ : Optional[Any] = char
return cipher_alphabet
def lowerCAmelCase__ ( _a : str , _a : dict[str, str] ):
return "".join(cipher_map.get(_a , _a ) for ch in message.upper() )
def lowerCAmelCase__ ( _a : str , _a : dict[str, str] ):
snake_case_ : Optional[int] = {v: k for k, v in cipher_map.items()}
return "".join(rev_cipher_map.get(_a , _a ) for ch in message.upper() )
def lowerCAmelCase__ ( ):
snake_case_ : Optional[Any] = input("Enter message to encode or decode: " ).strip()
snake_case_ : Dict = input("Enter keyword: " ).strip()
snake_case_ : Optional[Any] = input("Encipher or decipher? E/D:" ).strip()[0].lower()
try:
snake_case_ : int = {"e": encipher, "d": decipher}[option]
except KeyError:
raise KeyError("invalid input option" )
snake_case_ : Any = create_cipher_map(_a )
print(func(_a , _a ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 568 | 0 |
"""simple docstring"""
import numpy as np
class _UpperCAmelCase :
def __init__( self : List[str] , _lowercase : Dict=None , _lowercase : str=None , _lowercase : str=None , _lowercase : List[str]=None , _lowercase : Optional[Any]=None ):
self.set_matricies(red=__UpperCamelCase , green=__UpperCamelCase , blue=__UpperCamelCase , red_edge=__UpperCamelCase , nir=__UpperCamelCase )
def a ( self : str , _lowercase : Dict=None , _lowercase : List[Any]=None , _lowercase : List[Any]=None , _lowercase : str=None , _lowercase : int=None ):
if red is not None:
__UpperCAmelCase = red
if green is not None:
__UpperCAmelCase = green
if blue is not None:
__UpperCAmelCase = blue
if red_edge is not None:
__UpperCAmelCase = red_edge
if nir is not None:
__UpperCAmelCase = nir
return True
def a ( self : Tuple , _lowercase : List[str]="" , _lowercase : Union[str, Any]=None , _lowercase : List[Any]=None , _lowercase : Optional[int]=None , _lowercase : Optional[int]=None , _lowercase : Dict=None ):
self.set_matricies(red=__UpperCamelCase , green=__UpperCamelCase , blue=__UpperCamelCase , red_edge=__UpperCamelCase , nir=__UpperCamelCase )
__UpperCAmelCase = {
'''ARVI2''': self.arvaa,
'''CCCI''': self.ccci,
'''CVI''': self.cvi,
'''GLI''': self.gli,
'''NDVI''': self.ndvi,
'''BNDVI''': self.bndvi,
'''redEdgeNDVI''': self.red_edge_ndvi,
'''GNDVI''': self.gndvi,
'''GBNDVI''': self.gbndvi,
'''GRNDVI''': self.grndvi,
'''RBNDVI''': self.rbndvi,
'''PNDVI''': self.pndvi,
'''ATSAVI''': self.atsavi,
'''BWDRVI''': self.bwdrvi,
'''CIgreen''': self.ci_green,
'''CIrededge''': self.ci_rededge,
'''CI''': self.ci,
'''CTVI''': self.ctvi,
'''GDVI''': self.gdvi,
'''EVI''': self.evi,
'''GEMI''': self.gemi,
'''GOSAVI''': self.gosavi,
'''GSAVI''': self.gsavi,
'''Hue''': self.hue,
'''IVI''': self.ivi,
'''IPVI''': self.ipvi,
'''I''': self.i,
'''RVI''': self.rvi,
'''MRVI''': self.mrvi,
'''MSAVI''': self.m_savi,
'''NormG''': self.norm_g,
'''NormNIR''': self.norm_nir,
'''NormR''': self.norm_r,
'''NGRDI''': self.ngrdi,
'''RI''': self.ri,
'''S''': self.s,
'''IF''': self._if,
'''DVI''': self.dvi,
'''TVI''': self.tvi,
'''NDRE''': self.ndre,
}
try:
return funcs[index]()
except KeyError:
print('''Index not in the list!''' )
return False
def a ( self : Any ):
return -0.18 + (1.17 * ((self.nir - self.red) / (self.nir + self.red)))
def a ( self : Optional[Any] ):
return ((self.nir - self.redEdge) / (self.nir + self.redEdge)) / (
(self.nir - self.red) / (self.nir + self.red)
)
def a ( self : Optional[int] ):
return self.nir * (self.red / (self.green**2))
def a ( self : int ):
return (2 * self.green - self.red - self.blue) / (
2 * self.green + self.red + self.blue
)
def a ( self : Dict ):
return (self.nir - self.red) / (self.nir + self.red)
def a ( self : Optional[Any] ):
return (self.nir - self.blue) / (self.nir + self.blue)
def a ( self : Dict ):
return (self.redEdge - self.red) / (self.redEdge + self.red)
def a ( self : Optional[Any] ):
return (self.nir - self.green) / (self.nir + self.green)
def a ( self : Tuple ):
return (self.nir - (self.green + self.blue)) / (
self.nir + (self.green + self.blue)
)
def a ( self : Optional[Any] ):
return (self.nir - (self.green + self.red)) / (
self.nir + (self.green + self.red)
)
def a ( self : Union[str, Any] ):
return (self.nir - (self.blue + self.red)) / (self.nir + (self.blue + self.red))
def a ( self : Optional[int] ):
return (self.nir - (self.green + self.red + self.blue)) / (
self.nir + (self.green + self.red + self.blue)
)
def a ( self : Optional[Any] , _lowercase : List[str]=0.08 , _lowercase : Any=1.22 , _lowercase : Optional[int]=0.03 ):
return a * (
(self.nir - a * self.red - b)
/ (a * self.nir + self.red - a * b + x * (1 + a**2))
)
def a ( self : List[Any] ):
return (0.1 * self.nir - self.blue) / (0.1 * self.nir + self.blue)
def a ( self : List[Any] ):
return (self.nir / self.green) - 1
def a ( self : Any ):
return (self.nir / self.redEdge) - 1
def a ( self : Union[str, Any] ):
return (self.red - self.blue) / self.red
def a ( self : Tuple ):
__UpperCAmelCase = self.ndvi()
return ((ndvi + 0.5) / (abs(ndvi + 0.5 ))) * (abs(ndvi + 0.5 ) ** (1 / 2))
def a ( self : Dict ):
return self.nir - self.green
def a ( self : Optional[int] ):
return 2.5 * (
(self.nir - self.red) / (self.nir + 6 * self.red - 7.5 * self.blue + 1)
)
def a ( self : int ):
__UpperCAmelCase = (2 * (self.nir**2 - self.red**2) + 1.5 * self.nir + 0.5 * self.red) / (
self.nir + self.red + 0.5
)
return n * (1 - 0.25 * n) - (self.red - 0.125) / (1 - self.red)
def a ( self : Dict , _lowercase : Union[str, Any]=0.16 ):
return (self.nir - self.green) / (self.nir + self.green + y)
def a ( self : Optional[Any] , _lowercase : Optional[int]=0.5 ):
return ((self.nir - self.green) / (self.nir + self.green + n)) * (1 + n)
def a ( self : List[Any] ):
return np.arctan(
((2 * self.red - self.green - self.blue) / 30.5) * (self.green - self.blue) )
def a ( self : Dict , _lowercase : int=None , _lowercase : Dict=None ):
return (self.nir - b) / (a * self.red)
def a ( self : List[Any] ):
return (self.nir / ((self.nir + self.red) / 2)) * (self.ndvi() + 1)
def a ( self : Optional[Any] ):
return (self.red + self.green + self.blue) / 30.5
def a ( self : int ):
return self.nir / self.red
def a ( self : Union[str, Any] ):
return (self.rvi() - 1) / (self.rvi() + 1)
def a ( self : Dict ):
return (
(2 * self.nir + 1)
- ((2 * self.nir + 1) ** 2 - 8 * (self.nir - self.red)) ** (1 / 2)
) / 2
def a ( self : str ):
return self.green / (self.nir + self.red + self.green)
def a ( self : List[str] ):
return self.nir / (self.nir + self.red + self.green)
def a ( self : Optional[int] ):
return self.red / (self.nir + self.red + self.green)
def a ( self : List[Any] ):
return (self.green - self.red) / (self.green + self.red)
def a ( self : str ):
return (self.red - self.green) / (self.red + self.green)
def a ( self : int ):
__UpperCAmelCase = np.max([np.max(self.red ), np.max(self.green ), np.max(self.blue )] )
__UpperCAmelCase = np.min([np.min(self.red ), np.min(self.green ), np.min(self.blue )] )
return (max_value - min_value) / max_value
def a ( self : List[str] ):
return (2 * self.red - self.green - self.blue) / (self.green - self.blue)
def a ( self : Dict ):
return self.nir / self.red
def a ( self : Dict ):
return (self.ndvi() + 0.5) ** (1 / 2)
def a ( self : Tuple ):
return (self.nir - self.redEdge) / (self.nir + self.redEdge)
| 719 |
"""simple docstring"""
import unittest
from transformers import DonutProcessor
_lowercase : List[str] = 'naver-clova-ix/donut-base'
class _UpperCAmelCase ( unittest.TestCase ):
def a ( self : Union[str, Any] ):
__UpperCAmelCase = DonutProcessor.from_pretrained(_lowercase )
def a ( self : Optional[int] ):
__UpperCAmelCase = {
'''name''': '''John Doe''',
'''age''': '''99''',
'''city''': '''Atlanta''',
'''state''': '''GA''',
'''zip''': '''30301''',
'''phone''': '''123-4567''',
'''nicknames''': [{'''nickname''': '''Johnny'''}, {'''nickname''': '''JD'''}],
}
__UpperCAmelCase = (
'''<s_name>John Doe</s_name><s_age>99</s_age><s_city>Atlanta</s_city>'''
'''<s_state>GA</s_state><s_zip>30301</s_zip><s_phone>123-4567</s_phone>'''
'''<s_nicknames><s_nickname>Johnny</s_nickname>'''
'''<sep/><s_nickname>JD</s_nickname></s_nicknames>'''
)
__UpperCAmelCase = self.processor.tokenajson(_lowercase )
self.assertDictEqual(_lowercase , _lowercase )
| 397 | 0 |
"""simple docstring"""
from __future__ import annotations
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import is_tf_available, is_vision_available
from ...test_modeling_tf_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_tf_bert import TFBertModelTester
from ..clip.test_modeling_tf_clip import TFCLIPVisionModelTester
from ..deit.test_modeling_tf_deit import TFDeiTModelTester
from ..roberta.test_modeling_tf_roberta import TFRobertaModelTester
from ..vit.test_modeling_tf_vit import TFViTModelTester
if is_tf_available():
from transformers import (
TFBertModel,
TFCLIPVisionModel,
TFDeiTModel,
TFRobertaModel,
TFVisionTextDualEncoderModel,
TFViTModel,
VisionTextDualEncoderConfig,
)
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor
def _A( lowerCAmelCase ):
if isinstance(lowerCAmelCase , collections.abc.Iterable ):
return x
return (x, x)
@require_tf
class __UpperCAmelCase :
'''simple docstring'''
def lowerCamelCase ( self , snake_case_ , snake_case_ ):
'''simple docstring'''
pass
def lowerCamelCase ( self ):
'''simple docstring'''
pass
def lowerCamelCase ( self ):
'''simple docstring'''
pass
def lowerCamelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_=None , **snake_case_ ):
'''simple docstring'''
A__ : Optional[int] = VisionTextDualEncoderConfig.from_vision_text_configs(snake_case_ , snake_case_ )
A__ : Dict = TFVisionTextDualEncoderModel(snake_case_ )
A__ : Optional[int] = model(input_ids=snake_case_ , pixel_values=snake_case_ , attention_mask=snake_case_ )
self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], config.projection_dim) )
def lowerCamelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_=None , **snake_case_ ):
'''simple docstring'''
A__ , A__ : Optional[int] = self.get_vision_text_model(snake_case_ , snake_case_ )
A__ : int = TFVisionTextDualEncoderModel(vision_model=snake_case_ , text_model=snake_case_ )
A__ : Tuple = model(input_ids=snake_case_ , pixel_values=snake_case_ , attention_mask=snake_case_ )
self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], model.config.projection_dim) )
def lowerCamelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_=None , **snake_case_ ):
'''simple docstring'''
A__ , A__ : List[str] = self.get_vision_text_model(snake_case_ , snake_case_ )
A__ : Dict = {"""vision_model""": vision_model, """text_model""": text_model}
A__ : List[str] = TFVisionTextDualEncoderModel.from_vision_text_pretrained(**snake_case_ )
A__ : List[Any] = model(input_ids=snake_case_ , pixel_values=snake_case_ , attention_mask=snake_case_ )
self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], model.config.projection_dim) )
def lowerCamelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_=None , **snake_case_ ):
'''simple docstring'''
A__ , A__ : List[Any] = self.get_vision_text_model(snake_case_ , snake_case_ )
A__ : Tuple = TFVisionTextDualEncoderModel(vision_model=snake_case_ , text_model=snake_case_ )
A__ : List[Any] = model(input_ids=snake_case_ , pixel_values=snake_case_ , attention_mask=snake_case_ )
A__ : Optional[Any] = output[0].numpy()
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(snake_case_ )
A__ : Dict = TFVisionTextDualEncoderModel.from_pretrained(snake_case_ )
A__ : Dict = model(input_ids=snake_case_ , pixel_values=snake_case_ , attention_mask=snake_case_ )
A__ : List[Any] = after_output[0].numpy()
A__ : Any = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(snake_case_ , 1E-5 )
def lowerCamelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_=None , **snake_case_ ):
'''simple docstring'''
A__ , A__ : Any = self.get_vision_text_model(snake_case_ , snake_case_ )
A__ : Any = TFVisionTextDualEncoderModel(vision_model=snake_case_ , text_model=snake_case_ )
A__ : Optional[int] = model(
input_ids=snake_case_ , pixel_values=snake_case_ , attention_mask=snake_case_ , output_attentions=snake_case_ )
A__ : Tuple = output.vision_model_output.attentions
self.assertEqual(len(snake_case_ ) , vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
A__ : Union[str, Any] = to_atuple(vision_model.config.image_size )
A__ : Optional[int] = to_atuple(vision_model.config.patch_size )
A__ : List[str] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
A__ : List[str] = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
A__ : Any = output.text_model_output.attentions
self.assertEqual(len(snake_case_ ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def lowerCamelCase ( self , snake_case_ , snake_case_ , snake_case_ ):
'''simple docstring'''
A__ : List[Any] = np.abs((a - b) ).max()
self.assertLessEqual(snake_case_ , snake_case_ , F'''Difference between torch and flax is {diff} (>= {tol}).''' )
def lowerCamelCase ( self ):
'''simple docstring'''
A__ : List[Any] = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_model(**snake_case_ )
def lowerCamelCase ( self ):
'''simple docstring'''
A__ : Optional[int] = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**snake_case_ )
def lowerCamelCase ( self ):
'''simple docstring'''
A__ : Optional[Any] = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**snake_case_ )
def lowerCamelCase ( self ):
'''simple docstring'''
A__ : int = self.prepare_config_and_inputs()
self.check_save_load(**snake_case_ )
def lowerCamelCase ( self ):
'''simple docstring'''
A__ : str = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**snake_case_ )
@slow
def lowerCamelCase ( self ):
'''simple docstring'''
A__ , A__ : int = self.get_pretrained_model_and_inputs()
A__ : Union[str, Any] = model_a(**snake_case_ )
A__ : Union[str, Any] = outputs[0].numpy()
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(snake_case_ )
A__ : Optional[int] = TFVisionTextDualEncoderModel.from_pretrained(snake_case_ )
A__ : int = model_a(**snake_case_ )
A__ : List[str] = after_outputs[0].numpy()
A__ : Any = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(snake_case_ , 1E-5 )
@require_tf
class __UpperCAmelCase (__A , unittest.TestCase ):
'''simple docstring'''
def lowerCamelCase ( self ):
'''simple docstring'''
A__ : str = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
"""hf-internal-testing/tiny-random-vit""" , """hf-internal-testing/tiny-random-bert""" )
A__ : str = 13
A__ : str = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
A__ : Any = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
A__ : int = random_attention_mask([batch_size, 4] )
A__ : Union[str, Any] = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask}
return model, inputs
def lowerCamelCase ( self , snake_case_ , snake_case_ ):
'''simple docstring'''
A__ : Tuple = TFViTModel(snake_case_ , name="""vision_model""" )
A__ : Dict = TFBertModel(snake_case_ , name="""text_model""" )
return vision_model, text_model
def lowerCamelCase ( self ):
'''simple docstring'''
A__ : List[str] = TFViTModelTester(self )
A__ : Any = TFBertModelTester(self )
A__ : Dict = vit_model_tester.prepare_config_and_inputs()
A__ : Dict = bert_model_tester.prepare_config_and_inputs()
A__ , A__ , A__ : Tuple = vision_config_and_inputs
(
(
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) ,
) : List[str] = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class __UpperCAmelCase (__A , unittest.TestCase ):
'''simple docstring'''
def lowerCamelCase ( self ):
'''simple docstring'''
A__ : List[str] = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
"""Rocketknight1/tiny-random-deit-tf""" , """hf-internal-testing/tiny-random-roberta""" )
A__ : List[Any] = 13
A__ : List[Any] = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
A__ : Any = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
A__ : Tuple = random_attention_mask([batch_size, 4] )
A__ : str = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask}
return model, inputs
def lowerCamelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_=None , **snake_case_ ):
'''simple docstring'''
A__ , A__ : List[str] = self.get_vision_text_model(snake_case_ , snake_case_ )
A__ : List[str] = TFVisionTextDualEncoderModel(vision_model=snake_case_ , text_model=snake_case_ )
A__ : Any = model(
input_ids=snake_case_ , pixel_values=snake_case_ , attention_mask=snake_case_ , output_attentions=snake_case_ )
A__ : Any = output.vision_model_output.attentions
self.assertEqual(len(snake_case_ ) , vision_config.num_hidden_layers )
# in DEiT, the seq_len equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
A__ : List[str] = to_atuple(vision_model.config.image_size )
A__ : str = to_atuple(vision_model.config.patch_size )
A__ : Union[str, Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
A__ : int = num_patches + 2
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
A__ : List[str] = output.text_model_output.attentions
self.assertEqual(len(snake_case_ ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def lowerCamelCase ( self , snake_case_ , snake_case_ ):
'''simple docstring'''
A__ : Dict = TFDeiTModel(snake_case_ , name="""vision_model""" )
A__ : Any = TFRobertaModel(snake_case_ , name="""text_model""" )
return vision_model, text_model
def lowerCamelCase ( self ):
'''simple docstring'''
A__ : Union[str, Any] = TFDeiTModelTester(self )
A__ : Optional[int] = TFRobertaModelTester(self )
A__ : Tuple = vit_model_tester.prepare_config_and_inputs()
A__ : int = bert_model_tester.prepare_config_and_inputs()
A__ , A__ , A__ : Union[str, Any] = vision_config_and_inputs
(
(
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) ,
) : List[str] = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class __UpperCAmelCase (__A , unittest.TestCase ):
'''simple docstring'''
def lowerCamelCase ( self ):
'''simple docstring'''
A__ : Dict = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
"""Rocketknight1/tiny-random-clip-tf""" , """hf-internal-testing/tiny-random-bert""" )
A__ : Dict = 13
A__ : List[str] = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
A__ : int = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
A__ : List[str] = random_attention_mask([batch_size, 4] )
A__ : List[str] = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask}
return model, inputs
def lowerCamelCase ( self , snake_case_ , snake_case_ ):
'''simple docstring'''
A__ : str = TFCLIPVisionModel(snake_case_ , name="""vision_model""" )
A__ : Any = TFBertModel(snake_case_ , name="""text_model""" )
return vision_model, text_model
def lowerCamelCase ( self ):
'''simple docstring'''
A__ : List[Any] = TFCLIPVisionModelTester(self )
A__ : int = TFBertModelTester(self )
A__ : Optional[Any] = clip_model_tester.prepare_config_and_inputs()
A__ : Optional[int] = bert_model_tester.prepare_config_and_inputs()
A__ , A__ : Union[str, Any] = vision_config_and_inputs
(
(
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) ,
) : List[str] = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_vision
@require_tf
class __UpperCAmelCase (unittest.TestCase ):
'''simple docstring'''
@slow
def lowerCamelCase ( self ):
'''simple docstring'''
A__ : Tuple = TFVisionTextDualEncoderModel.from_pretrained(
"""clip-italian/clip-italian""" , logit_scale_init_value=1.0 , from_pt=snake_case_ )
A__ : List[str] = VisionTextDualEncoderProcessor.from_pretrained("""clip-italian/clip-italian""" )
A__ : Optional[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
A__ : List[str] = processor(
text=["""una foto di un gatto""", """una foto di un cane"""] , images=snake_case_ , padding=snake_case_ , return_tensors="""np""" )
A__ : Any = model(**snake_case_ )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
A__ : Optional[Any] = np.array([[1.2_28_47_27, 0.3_10_41_22]] )
self.assertTrue(np.allclose(outputs.logits_per_image.numpy() , snake_case_ , atol=1E-3 ) )
| 363 | """simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = {
"asapp/sew-tiny-100k": "https://huggingface.co/asapp/sew-tiny-100k/resolve/main/config.json",
# See all SEW models at https://huggingface.co/models?filter=sew
}
class __UpperCAmelCase (__A ):
'''simple docstring'''
_UpperCamelCase : Dict = 'sew'
def __init__( self , snake_case_=32 , snake_case_=768 , snake_case_=12 , snake_case_=12 , snake_case_=3_072 , snake_case_=2 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=0.1 , snake_case_=0.0 , snake_case_=0.1 , snake_case_=0.1 , snake_case_=0.02 , snake_case_=1E-5 , snake_case_="group" , snake_case_="gelu" , snake_case_=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , snake_case_=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , snake_case_=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , snake_case_=False , snake_case_=128 , snake_case_=16 , snake_case_=True , snake_case_=0.05 , snake_case_=10 , snake_case_=2 , snake_case_=0.0 , snake_case_=10 , snake_case_=0 , snake_case_="mean" , snake_case_=False , snake_case_=False , snake_case_=256 , snake_case_=0 , snake_case_=1 , snake_case_=2 , **snake_case_ , ):
'''simple docstring'''
super().__init__(**snake_case_ , pad_token_id=snake_case_ , bos_token_id=snake_case_ , eos_token_id=snake_case_ )
A__ : Dict = hidden_size
A__ : Dict = feat_extract_norm
A__ : str = feat_extract_activation
A__ : Optional[Any] = list(snake_case_ )
A__ : str = list(snake_case_ )
A__ : Any = list(snake_case_ )
A__ : Any = conv_bias
A__ : Any = num_conv_pos_embeddings
A__ : Any = num_conv_pos_embedding_groups
A__ : str = len(self.conv_dim )
A__ : Tuple = num_hidden_layers
A__ : int = intermediate_size
A__ : Union[str, Any] = squeeze_factor
A__ : Union[str, Any] = hidden_act
A__ : List[str] = num_attention_heads
A__ : List[str] = hidden_dropout
A__ : Dict = attention_dropout
A__ : Tuple = activation_dropout
A__ : Optional[int] = feat_proj_dropout
A__ : Optional[Any] = final_dropout
A__ : int = layerdrop
A__ : List[Any] = layer_norm_eps
A__ : int = initializer_range
A__ : Dict = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect."""
"""It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,"""
F'''but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)'''
F'''= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
A__ : Optional[Any] = apply_spec_augment
A__ : int = mask_time_prob
A__ : Tuple = mask_time_length
A__ : Optional[Any] = mask_time_min_masks
A__ : Any = mask_feature_prob
A__ : List[Any] = mask_feature_length
A__ : Any = mask_feature_min_masks
# ctc loss
A__ : str = ctc_loss_reduction
A__ : List[Any] = ctc_zero_infinity
# sequence classification
A__ : Union[str, Any] = use_weighted_layer_sum
A__ : str = classifier_proj_size
@property
def lowerCamelCase ( self ):
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 363 | 1 |
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
UpperCAmelCase : List[str] ={"""configuration_gpt_neox""": ["""GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTNeoXConfig"""]}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Optional[int] =["""GPTNeoXTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Dict =[
"""GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GPTNeoXForCausalLM""",
"""GPTNeoXForQuestionAnswering""",
"""GPTNeoXForSequenceClassification""",
"""GPTNeoXForTokenClassification""",
"""GPTNeoXLayer""",
"""GPTNeoXModel""",
"""GPTNeoXPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_gpt_neox import GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_neox_fast import GPTNeoXTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox import (
GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXLayer,
GPTNeoXModel,
GPTNeoXPreTrainedModel,
)
else:
import sys
UpperCAmelCase : List[Any] =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 504 |
import functools
import logging
import os
import sys
import threading
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
import huggingface_hub.utils as hf_hub_utils
from tqdm import auto as tqdm_lib
UpperCAmelCase : int =threading.Lock()
UpperCAmelCase : Optional[logging.Handler] =None
UpperCAmelCase : List[Any] ={
"""debug""": logging.DEBUG,
"""info""": logging.INFO,
"""warning""": logging.WARNING,
"""error""": logging.ERROR,
"""critical""": logging.CRITICAL,
}
UpperCAmelCase : Any =logging.WARNING
UpperCAmelCase : List[Any] =True
def _lowerCAmelCase ():
UpperCamelCase_ = os.getenv("TRANSFORMERS_VERBOSITY" , _lowerCAmelCase)
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
f"""Unknown option TRANSFORMERS_VERBOSITY={env_level_str}, """
f"""has to be one of: { ', '.join(log_levels.keys()) }""")
return _default_log_level
def _lowerCAmelCase ():
return __name__.split(".")[0]
def _lowerCAmelCase ():
return logging.getLogger(_get_library_name())
def _lowerCAmelCase ():
global _default_handler
with _lock:
if _default_handler:
# This library has already configured the library root logger.
return
UpperCamelCase_ = logging.StreamHandler() # Set sys.stderr as stream.
UpperCamelCase_ = sys.stderr.flush
# Apply our default configuration to the library root logger.
UpperCamelCase_ = _get_library_root_logger()
library_root_logger.addHandler(_default_handler)
library_root_logger.setLevel(_get_default_logging_level())
UpperCamelCase_ = False
def _lowerCAmelCase ():
global _default_handler
with _lock:
if not _default_handler:
return
UpperCamelCase_ = _get_library_root_logger()
library_root_logger.removeHandler(_default_handler)
library_root_logger.setLevel(logging.NOTSET)
UpperCamelCase_ = None
def _lowerCAmelCase ():
return log_levels
def _lowerCAmelCase (_lowerCAmelCase = None):
if name is None:
UpperCamelCase_ = _get_library_name()
_configure_library_root_logger()
return logging.getLogger(_lowerCAmelCase)
def _lowerCAmelCase ():
_configure_library_root_logger()
return _get_library_root_logger().getEffectiveLevel()
def _lowerCAmelCase (_lowerCAmelCase):
_configure_library_root_logger()
_get_library_root_logger().setLevel(_lowerCAmelCase)
def _lowerCAmelCase ():
return set_verbosity(_lowerCAmelCase)
def _lowerCAmelCase ():
return set_verbosity(_lowerCAmelCase)
def _lowerCAmelCase ():
return set_verbosity(_lowerCAmelCase)
def _lowerCAmelCase ():
return set_verbosity(_lowerCAmelCase)
def _lowerCAmelCase ():
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().removeHandler(_default_handler)
def _lowerCAmelCase ():
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().addHandler(_default_handler)
def _lowerCAmelCase (_lowerCAmelCase):
_configure_library_root_logger()
assert handler is not None
_get_library_root_logger().addHandler(_lowerCAmelCase)
def _lowerCAmelCase (_lowerCAmelCase):
_configure_library_root_logger()
assert handler is not None and handler not in _get_library_root_logger().handlers
_get_library_root_logger().removeHandler(_lowerCAmelCase)
def _lowerCAmelCase ():
_configure_library_root_logger()
UpperCamelCase_ = False
def _lowerCAmelCase ():
_configure_library_root_logger()
UpperCamelCase_ = True
def _lowerCAmelCase ():
UpperCamelCase_ = _get_library_root_logger().handlers
for handler in handlers:
UpperCamelCase_ = logging.Formatter("[%(levelname)s|%(filename)s:%(lineno)s] %(asctime)s >> %(message)s")
handler.setFormatter(_lowerCAmelCase)
def _lowerCAmelCase ():
UpperCamelCase_ = _get_library_root_logger().handlers
for handler in handlers:
handler.setFormatter(_lowerCAmelCase)
def _lowerCAmelCase (self , *_lowerCAmelCase , **_lowerCAmelCase):
UpperCamelCase_ = os.getenv("TRANSFORMERS_NO_ADVISORY_WARNINGS" , _lowerCAmelCase)
if no_advisory_warnings:
return
self.warning(*_lowerCAmelCase , **_lowerCAmelCase)
UpperCAmelCase : Dict =warning_advice
@functools.lru_cache(_lowerCAmelCase)
def _lowerCAmelCase (self , *_lowerCAmelCase , **_lowerCAmelCase):
self.warning(*_lowerCAmelCase , **_lowerCAmelCase)
UpperCAmelCase : Optional[Any] =warning_once
class _lowercase :
'''simple docstring'''
def __init__( self , *snake_case__ , **snake_case__ ): # pylint: disable=unused-argument
'''simple docstring'''
UpperCamelCase_ = args[0] if args else None
def __iter__( self ):
'''simple docstring'''
return iter(self._iterator )
def __getattr__( self , snake_case__ ):
'''simple docstring'''
def empty_fn(*snake_case__ , **snake_case__ ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__( self ):
'''simple docstring'''
return self
def __exit__( self , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
return
class _lowercase :
'''simple docstring'''
def __call__( self , *snake_case__ , **snake_case__ ):
'''simple docstring'''
if _tqdm_active:
return tqdm_lib.tqdm(*snake_case__ , **snake_case__ )
else:
return EmptyTqdm(*snake_case__ , **snake_case__ )
def _lowerCamelCase ( self , *snake_case__ , **snake_case__ ):
'''simple docstring'''
UpperCamelCase_ = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*snake_case__ , **snake_case__ )
def _lowerCamelCase ( self ):
'''simple docstring'''
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
UpperCAmelCase : Tuple =_tqdm_cls()
def _lowerCAmelCase ():
global _tqdm_active
return bool(_tqdm_active)
def _lowerCAmelCase ():
global _tqdm_active
UpperCamelCase_ = True
hf_hub_utils.enable_progress_bars()
def _lowerCAmelCase ():
global _tqdm_active
UpperCamelCase_ = False
hf_hub_utils.disable_progress_bars()
| 504 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
snake_case__ : Dict = {
"""configuration_megatron_bert""": ["""MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MegatronBertConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : str = [
"""MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MegatronBertForCausalLM""",
"""MegatronBertForMaskedLM""",
"""MegatronBertForMultipleChoice""",
"""MegatronBertForNextSentencePrediction""",
"""MegatronBertForPreTraining""",
"""MegatronBertForQuestionAnswering""",
"""MegatronBertForSequenceClassification""",
"""MegatronBertForTokenClassification""",
"""MegatronBertModel""",
"""MegatronBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_megatron_bert import MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, MegatronBertConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_megatron_bert import (
MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
MegatronBertPreTrainedModel,
)
else:
import sys
snake_case__ : Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 23 |
import os
import tempfile
import unittest
import numpy as np
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline
@require_flax
class UpperCamelCase ( unittest.TestCase ):
def UpperCamelCase ( self : Tuple ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
# pipeline has Flax weights
SCREAMING_SNAKE_CASE = FlaxDiffusionPipeline.from_pretrained(
'hf-internal-testing/tiny-stable-diffusion-pipe' , safety_checker=snake_case__ , cache_dir=snake_case__ )
SCREAMING_SNAKE_CASE = [t[-1] for t in os.walk(os.path.join(snake_case__ , os.listdir(snake_case__ )[0] , 'snapshots' ) )]
SCREAMING_SNAKE_CASE = [item for sublist in all_root_files for item in sublist]
# None of the downloaded files should be a PyTorch file even if we have some here:
# https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin
assert not any(f.endswith('.bin' ) for f in files )
@slow
@require_flax
class UpperCamelCase ( unittest.TestCase ):
def UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = FlaxStableDiffusionPipeline.from_pretrained(
'hf-internal-testing/tiny-stable-diffusion-pipe' , safety_checker=snake_case__ )
SCREAMING_SNAKE_CASE = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
SCREAMING_SNAKE_CASE = jax.random.PRNGKey(0 )
SCREAMING_SNAKE_CASE = 4
SCREAMING_SNAKE_CASE = jax.device_count()
SCREAMING_SNAKE_CASE = num_samples * [prompt]
SCREAMING_SNAKE_CASE = pipeline.prepare_inputs(snake_case__ )
# shard inputs and rng
SCREAMING_SNAKE_CASE = replicate(snake_case__ )
SCREAMING_SNAKE_CASE = jax.random.split(snake_case__ , snake_case__ )
SCREAMING_SNAKE_CASE = shard(snake_case__ )
SCREAMING_SNAKE_CASE = pipeline(snake_case__ , snake_case__ , snake_case__ , snake_case__ , jit=snake_case__ ).images
assert images.shape == (num_samples, 1, 6_4, 6_4, 3)
if jax.device_count() == 8:
assert np.abs(np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 4.1_514_745 ) < 1E-3
assert np.abs(np.abs(snake_case__ , dtype=np.floataa ).sum() - 49_947.875 ) < 5E-1
SCREAMING_SNAKE_CASE = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:] ) ) )
assert len(snake_case__ ) == num_samples
def UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='flax' , safety_checker=snake_case__ )
SCREAMING_SNAKE_CASE = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
SCREAMING_SNAKE_CASE = jax.random.PRNGKey(0 )
SCREAMING_SNAKE_CASE = 5_0
SCREAMING_SNAKE_CASE = jax.device_count()
SCREAMING_SNAKE_CASE = num_samples * [prompt]
SCREAMING_SNAKE_CASE = pipeline.prepare_inputs(snake_case__ )
# shard inputs and rng
SCREAMING_SNAKE_CASE = replicate(snake_case__ )
SCREAMING_SNAKE_CASE = jax.random.split(snake_case__ , snake_case__ )
SCREAMING_SNAKE_CASE = shard(snake_case__ )
SCREAMING_SNAKE_CASE = pipeline(snake_case__ , snake_case__ , snake_case__ , snake_case__ , jit=snake_case__ ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.05_652_401) ) < 1E-3
assert np.abs((np.abs(snake_case__ , dtype=np.floataa ).sum() - 2_383_808.2) ) < 5E-1
def UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=snake_case__ )
SCREAMING_SNAKE_CASE = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
SCREAMING_SNAKE_CASE = jax.random.PRNGKey(0 )
SCREAMING_SNAKE_CASE = 5_0
SCREAMING_SNAKE_CASE = jax.device_count()
SCREAMING_SNAKE_CASE = num_samples * [prompt]
SCREAMING_SNAKE_CASE = pipeline.prepare_inputs(snake_case__ )
# shard inputs and rng
SCREAMING_SNAKE_CASE = replicate(snake_case__ )
SCREAMING_SNAKE_CASE = jax.random.split(snake_case__ , snake_case__ )
SCREAMING_SNAKE_CASE = shard(snake_case__ )
SCREAMING_SNAKE_CASE = pipeline(snake_case__ , snake_case__ , snake_case__ , snake_case__ , jit=snake_case__ ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.04_003_906) ) < 1E-3
assert np.abs((np.abs(snake_case__ , dtype=np.floataa ).sum() - 2_373_516.75) ) < 5E-1
def UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa )
SCREAMING_SNAKE_CASE = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
SCREAMING_SNAKE_CASE = jax.random.PRNGKey(0 )
SCREAMING_SNAKE_CASE = 5_0
SCREAMING_SNAKE_CASE = jax.device_count()
SCREAMING_SNAKE_CASE = num_samples * [prompt]
SCREAMING_SNAKE_CASE = pipeline.prepare_inputs(snake_case__ )
# shard inputs and rng
SCREAMING_SNAKE_CASE = replicate(snake_case__ )
SCREAMING_SNAKE_CASE = jax.random.split(snake_case__ , snake_case__ )
SCREAMING_SNAKE_CASE = shard(snake_case__ )
SCREAMING_SNAKE_CASE = pipeline(snake_case__ , snake_case__ , snake_case__ , snake_case__ , jit=snake_case__ ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.04_003_906) ) < 1E-3
assert np.abs((np.abs(snake_case__ , dtype=np.floataa ).sum() - 2_373_516.75) ) < 5E-1
def UpperCamelCase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = FlaxDDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='scaled_linear' , set_alpha_to_one=snake_case__ , steps_offset=1 , )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , scheduler=snake_case__ , safety_checker=snake_case__ , )
SCREAMING_SNAKE_CASE = scheduler.create_state()
SCREAMING_SNAKE_CASE = scheduler_state
SCREAMING_SNAKE_CASE = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
SCREAMING_SNAKE_CASE = jax.random.PRNGKey(0 )
SCREAMING_SNAKE_CASE = 5_0
SCREAMING_SNAKE_CASE = jax.device_count()
SCREAMING_SNAKE_CASE = num_samples * [prompt]
SCREAMING_SNAKE_CASE = pipeline.prepare_inputs(snake_case__ )
# shard inputs and rng
SCREAMING_SNAKE_CASE = replicate(snake_case__ )
SCREAMING_SNAKE_CASE = jax.random.split(snake_case__ , snake_case__ )
SCREAMING_SNAKE_CASE = shard(snake_case__ )
SCREAMING_SNAKE_CASE = pipeline(snake_case__ , snake_case__ , snake_case__ , snake_case__ , jit=snake_case__ ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.045_043_945) ) < 1E-3
assert np.abs((np.abs(snake_case__ , dtype=np.floataa ).sum() - 2_347_693.5) ) < 5E-1
def UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
SCREAMING_SNAKE_CASE = jax.device_count()
SCREAMING_SNAKE_CASE = num_samples * [prompt]
SCREAMING_SNAKE_CASE = jax.random.split(jax.random.PRNGKey(0 ) , snake_case__ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=snake_case__ , )
SCREAMING_SNAKE_CASE = replicate(snake_case__ )
SCREAMING_SNAKE_CASE = pipeline.prepare_inputs(snake_case__ )
SCREAMING_SNAKE_CASE = shard(snake_case__ )
SCREAMING_SNAKE_CASE = pipeline(snake_case__ , snake_case__ , snake_case__ , jit=snake_case__ ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
SCREAMING_SNAKE_CASE = images[2, 0, 2_5_6, 1_0:1_7, 1]
# With memory efficient attention
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=snake_case__ , use_memory_efficient_attention=snake_case__ , )
SCREAMING_SNAKE_CASE = replicate(snake_case__ )
SCREAMING_SNAKE_CASE = pipeline.prepare_inputs(snake_case__ )
SCREAMING_SNAKE_CASE = shard(snake_case__ )
SCREAMING_SNAKE_CASE = pipeline(snake_case__ , snake_case__ , snake_case__ , jit=snake_case__ ).images
assert images_eff.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
SCREAMING_SNAKE_CASE = images[2, 0, 2_5_6, 1_0:1_7, 1]
# I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum`
# over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now.
assert abs(slice_eff - slice ).max() < 1E-2
| 439 | 0 |
"""simple docstring"""
import copy
import os
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
from datasets.arrow_writer import ArrowWriter, OptimizedTypedSequence, ParquetWriter, TypedSequence
from datasets.features import ArrayaD, ClassLabel, Features, Image, Value
from datasets.features.features import ArrayaDExtensionType, cast_to_python_objects
from datasets.keyhash import DuplicatedKeysError, InvalidKeyError
from .utils import require_pil
class a ( lowercase ):
def __snake_case ( self ):
UpperCAmelCase__ : List[Any] = pa.array(TypedSequence([1, 2, 3] ) )
self.assertEqual(arr.type , pa.intaa() )
def __snake_case ( self ):
with self.assertRaises(UpperCamelCase_ ):
UpperCAmelCase__ : Any = pa.array(TypedSequence([1, 2, 3] ) , type=pa.intaa() )
def __snake_case ( self ):
with self.assertRaises(UpperCamelCase_ ):
UpperCAmelCase__ : int = pa.array(TypedSequence([1, 2, 3] , try_type=Value('bool' ) , type=Value('int64' ) ) )
def __snake_case ( self ):
UpperCAmelCase__ : Union[str, Any] = pa.array(TypedSequence([1, 2, 3] , type=Value('int32' ) ) )
self.assertEqual(arr.type , pa.intaa() )
def __snake_case ( self ):
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
UpperCAmelCase__ : List[Any] = pa.array(TypedSequence(['foo', 'bar'] , type=Value('int64' ) ) )
def __snake_case ( self ):
UpperCAmelCase__ : Any = pa.array(TypedSequence([1, 2, 3] , try_type=Value('int32' ) ) )
self.assertEqual(arr.type , pa.intaa() )
def __snake_case ( self ):
UpperCAmelCase__ : List[str] = pa.array(TypedSequence(['foo', 'bar'] , try_type=Value('int64' ) ) )
self.assertEqual(arr.type , pa.string() )
def __snake_case ( self ):
UpperCAmelCase__ : Any = pa.array(TypedSequence([[[1, 2, 3]]] , type=ArrayaD((1, 3) , 'int64' ) ) )
self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , 'int64' ) )
def __snake_case ( self ):
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
UpperCAmelCase__ : Dict = pa.array(TypedSequence(['foo', 'bar'] , type=ArrayaD((1, 3) , 'int64' ) ) )
def __snake_case ( self ):
UpperCAmelCase__ : Tuple = pa.array(TypedSequence([[[1, 2, 3]]] , try_type=ArrayaD((1, 3) , 'int64' ) ) )
self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , 'int64' ) )
def __snake_case ( self ):
UpperCAmelCase__ : List[Any] = pa.array(TypedSequence(['foo', 'bar'] , try_type=ArrayaD((1, 3) , 'int64' ) ) )
self.assertEqual(arr.type , pa.string() )
@require_pil
def __snake_case ( self ):
import PIL.Image
UpperCAmelCase__ : str = PIL.Image.fromarray(np.arange(10 , dtype=np.uinta ).reshape(2 , 5 ) )
with patch(
'datasets.arrow_writer.cast_to_python_objects' , side_effect=UpperCamelCase_ ) as mock_cast_to_python_objects:
UpperCAmelCase__ : Optional[Any] = pa.array(TypedSequence([{'path': None, 'bytes': b'image_bytes'}, pil_image] , type=Image() ) )
UpperCAmelCase__ : Any = mock_cast_to_python_objects.call_args_list[-1]
self.assertIn('optimize_list_casting' , UpperCamelCase_ )
self.assertFalse(kwargs['optimize_list_casting'] )
def lowerCamelCase ( _snake_case : int ,_snake_case : Dict ):
UpperCAmelCase__ : Optional[int] = pa.BufferReader(_snake_case ) if isinstance(_snake_case ,pa.Buffer ) else pa.memory_map(_snake_case )
UpperCAmelCase__ : int = pa.ipc.open_stream(_snake_case )
UpperCAmelCase__ : pa.Table = f.read_all()
assert len(pa_table.to_batches() ) == expected_num_chunks
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
del pa_table
@pytest.mark.parametrize('writer_batch_size' ,[None, 1, 10] )
@pytest.mark.parametrize(
'fields' ,[None, {'col_1': pa.string(), 'col_2': pa.intaa()}, {'col_1': pa.string(), 'col_2': pa.intaa()}] )
def lowerCamelCase ( _snake_case : List[str] ,_snake_case : Optional[Any] ):
UpperCAmelCase__ : List[Any] = pa.BufferOutputStream()
UpperCAmelCase__ : Tuple = pa.schema(_snake_case ) if fields else None
with ArrowWriter(stream=_snake_case ,schema=_snake_case ,writer_batch_size=_snake_case ) as writer:
writer.write({'col_1': 'foo', 'col_2': 1} )
writer.write({'col_1': 'bar', 'col_2': 2} )
UpperCAmelCase__ : Union[str, Any] = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
UpperCAmelCase__ : Union[str, Any] = {'col_1': pa.string(), 'col_2': pa.intaa()}
assert writer._schema == pa.schema(_snake_case ,metadata=writer._schema.metadata )
_check_output(output.getvalue() ,expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def lowerCamelCase ( ):
UpperCAmelCase__ : List[str] = pa.BufferOutputStream()
UpperCAmelCase__ : Optional[int] = Features({'labels': ClassLabel(names=['neg', 'pos'] )} )
with ArrowWriter(stream=_snake_case ,features=_snake_case ) as writer:
writer.write({'labels': 0} )
writer.write({'labels': 1} )
UpperCAmelCase__ : int = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == features.arrow_schema
assert writer._schema.metadata == features.arrow_schema.metadata
UpperCAmelCase__ : Dict = pa.BufferReader(output.getvalue() )
UpperCAmelCase__ : List[Any] = pa.ipc.open_stream(_snake_case )
UpperCAmelCase__ : pa.Table = f.read_all()
UpperCAmelCase__ : Optional[Any] = pa_table.schema
assert pa_table.num_rows == 2
assert schema == features.arrow_schema
assert schema.metadata == features.arrow_schema.metadata
assert features == Features.from_arrow_schema(_snake_case )
@pytest.mark.parametrize('writer_batch_size' ,[None, 1, 10] )
def lowerCamelCase ( _snake_case : int ):
UpperCAmelCase__ : Union[str, Any] = pa.BufferOutputStream()
with ArrowWriter(
stream=_snake_case ,writer_batch_size=_snake_case ,hash_salt='split_name' ,check_duplicates=_snake_case ,) as writer:
with pytest.raises(_snake_case ):
writer.write({'col_1': 'foo', 'col_2': 1} ,key=[1, 2] )
UpperCAmelCase__ : List[str] = writer.finalize()
@pytest.mark.parametrize('writer_batch_size' ,[None, 2, 10] )
def lowerCamelCase ( _snake_case : List[Any] ):
UpperCAmelCase__ : List[Any] = pa.BufferOutputStream()
with ArrowWriter(
stream=_snake_case ,writer_batch_size=_snake_case ,hash_salt='split_name' ,check_duplicates=_snake_case ,) as writer:
with pytest.raises(_snake_case ):
writer.write({'col_1': 'foo', 'col_2': 1} ,key=10 )
writer.write({'col_1': 'bar', 'col_2': 2} ,key=10 )
UpperCAmelCase__ : Dict = writer.finalize()
@pytest.mark.parametrize('writer_batch_size' ,[None, 2, 10] )
def lowerCamelCase ( _snake_case : Union[str, Any] ):
UpperCAmelCase__ : str = pa.BufferOutputStream()
with ArrowWriter(
stream=_snake_case ,writer_batch_size=_snake_case ,hash_salt='split_name' ,check_duplicates=_snake_case ,) as writer:
writer.write({'col_1': 'foo', 'col_2': 1} ,key=1 )
writer.write({'col_1': 'bar', 'col_2': 2} ,key=2 )
UpperCAmelCase__ : str = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
_check_output(output.getvalue() ,expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize('writer_batch_size' ,[None, 1, 10] )
@pytest.mark.parametrize(
'fields' ,[None, {'col_1': pa.string(), 'col_2': pa.intaa()}, {'col_1': pa.string(), 'col_2': pa.intaa()}] )
def lowerCamelCase ( _snake_case : List[Any] ,_snake_case : Union[str, Any] ):
UpperCAmelCase__ : str = pa.BufferOutputStream()
UpperCAmelCase__ : List[Any] = pa.schema(_snake_case ) if fields else None
with ArrowWriter(stream=_snake_case ,schema=_snake_case ,writer_batch_size=_snake_case ) as writer:
writer.write_batch({'col_1': ['foo', 'bar'], 'col_2': [1, 2]} )
writer.write_batch({'col_1': [], 'col_2': []} )
UpperCAmelCase__ : Optional[Any] = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
UpperCAmelCase__ : Any = {'col_1': pa.string(), 'col_2': pa.intaa()}
assert writer._schema == pa.schema(_snake_case ,metadata=writer._schema.metadata )
_check_output(output.getvalue() ,expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize('writer_batch_size' ,[None, 1, 10] )
@pytest.mark.parametrize(
'fields' ,[None, {'col_1': pa.string(), 'col_2': pa.intaa()}, {'col_1': pa.string(), 'col_2': pa.intaa()}] )
def lowerCamelCase ( _snake_case : Union[str, Any] ,_snake_case : Any ):
UpperCAmelCase__ : Optional[Any] = pa.BufferOutputStream()
UpperCAmelCase__ : int = pa.schema(_snake_case ) if fields else None
with ArrowWriter(stream=_snake_case ,schema=_snake_case ,writer_batch_size=_snake_case ) as writer:
writer.write_table(pa.Table.from_pydict({'col_1': ['foo', 'bar'], 'col_2': [1, 2]} ) )
UpperCAmelCase__ : Dict = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
UpperCAmelCase__ : Any = {'col_1': pa.string(), 'col_2': pa.intaa()}
assert writer._schema == pa.schema(_snake_case ,metadata=writer._schema.metadata )
_check_output(output.getvalue() ,expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize('writer_batch_size' ,[None, 1, 10] )
@pytest.mark.parametrize(
'fields' ,[None, {'col_1': pa.string(), 'col_2': pa.intaa()}, {'col_1': pa.string(), 'col_2': pa.intaa()}] )
def lowerCamelCase ( _snake_case : Dict ,_snake_case : List[str] ):
UpperCAmelCase__ : str = pa.BufferOutputStream()
UpperCAmelCase__ : List[str] = pa.schema(_snake_case ) if fields else None
with ArrowWriter(stream=_snake_case ,schema=_snake_case ,writer_batch_size=_snake_case ) as writer:
writer.write_row(pa.Table.from_pydict({'col_1': ['foo'], 'col_2': [1]} ) )
writer.write_row(pa.Table.from_pydict({'col_1': ['bar'], 'col_2': [2]} ) )
UpperCAmelCase__ : Optional[int] = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
UpperCAmelCase__ : str = {'col_1': pa.string(), 'col_2': pa.intaa()}
assert writer._schema == pa.schema(_snake_case ,metadata=writer._schema.metadata )
_check_output(output.getvalue() ,expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def lowerCamelCase ( ):
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase__ : str = {'col_1': pa.string(), 'col_2': pa.intaa()}
UpperCAmelCase__ : int = os.path.join(_snake_case ,'test.arrow' )
with ArrowWriter(path=_snake_case ,schema=pa.schema(_snake_case ) ) as writer:
writer.write_batch({'col_1': ['foo', 'bar'], 'col_2': [1, 2]} )
UpperCAmelCase__ : Any = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == pa.schema(_snake_case ,metadata=writer._schema.metadata )
_check_output(_snake_case ,1 )
def lowerCamelCase ( _snake_case : List[Any] ):
if pa.types.is_list(_snake_case ):
return get_base_dtype(arr_type.value_type )
else:
return arr_type
def lowerCamelCase ( _snake_case : Union[str, Any] ,_snake_case : str ):
if isinstance(lst[0] ,_snake_case ):
change_first_primitive_element_in_list(lst[0] ,_snake_case )
else:
UpperCAmelCase__ : Union[str, Any] = value
@pytest.mark.parametrize('optimized_int_type, expected_dtype' ,[(None, pa.intaa()), (Value('int32' ), pa.intaa())] )
@pytest.mark.parametrize('sequence' ,[[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def lowerCamelCase ( _snake_case : Any ,_snake_case : List[Any] ,_snake_case : Optional[int] ):
UpperCAmelCase__ : int = pa.array(TypedSequence(_snake_case ,optimized_int_type=_snake_case ) )
assert get_base_dtype(arr.type ) == expected_dtype
@pytest.mark.parametrize(
'col, expected_dtype' ,[
('attention_mask', pa.inta()),
('special_tokens_mask', pa.inta()),
('token_type_ids', pa.inta()),
('input_ids', pa.intaa()),
('other', pa.intaa()),
] ,)
@pytest.mark.parametrize('sequence' ,[[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def lowerCamelCase ( _snake_case : Union[str, Any] ,_snake_case : Optional[int] ,_snake_case : Union[str, Any] ):
# in range
UpperCAmelCase__ : int = pa.array(OptimizedTypedSequence(_snake_case ,col=_snake_case ) )
assert get_base_dtype(arr.type ) == expected_dtype
# not in range
if col != "other":
# avoids errors due to in-place modifications
UpperCAmelCase__ : Union[str, Any] = copy.deepcopy(_snake_case )
UpperCAmelCase__ : Any = np.iinfo(expected_dtype.to_pandas_dtype() ).max + 1
change_first_primitive_element_in_list(_snake_case ,_snake_case )
UpperCAmelCase__ : str = pa.array(OptimizedTypedSequence(_snake_case ,col=_snake_case ) )
assert get_base_dtype(arr.type ) == pa.intaa()
@pytest.mark.parametrize('raise_exception' ,[False, True] )
def lowerCamelCase ( _snake_case : Optional[Any] ,_snake_case : Optional[Any] ):
UpperCAmelCase__ : Dict = str(tmp_path / 'dataset-train.arrow' )
try:
with ArrowWriter(path=_snake_case ) as writer:
if raise_exception:
raise pa.lib.ArrowInvalid()
else:
writer.stream.close()
except pa.lib.ArrowInvalid:
pass
finally:
assert writer.stream.closed
def lowerCamelCase ( _snake_case : int ):
UpperCAmelCase__ : List[Any] = 'mock://dataset-train.arrow'
with ArrowWriter(path=_snake_case ,storage_options=mockfs.storage_options ) as writer:
assert isinstance(writer._fs ,type(_snake_case ) )
assert writer._fs.storage_options == mockfs.storage_options
writer.write({'col_1': 'foo', 'col_2': 1} )
writer.write({'col_1': 'bar', 'col_2': 2} )
UpperCAmelCase__ : int = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert mockfs.exists(_snake_case )
def lowerCamelCase ( ):
UpperCAmelCase__ : Any = pa.BufferOutputStream()
with ParquetWriter(stream=_snake_case ) as writer:
writer.write({'col_1': 'foo', 'col_2': 1} )
writer.write({'col_1': 'bar', 'col_2': 2} )
UpperCAmelCase__ : Union[str, Any] = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
UpperCAmelCase__ : Optional[Any] = pa.BufferReader(output.getvalue() )
UpperCAmelCase__ : pa.Table = pq.read_table(_snake_case )
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
@require_pil
@pytest.mark.parametrize('embed_local_files' ,[False, True] )
def lowerCamelCase ( _snake_case : Optional[Any] ,_snake_case : int ):
import PIL.Image
UpperCAmelCase__ : List[Any] = str(tmp_path / 'test_image_rgb.jpg' )
PIL.Image.fromarray(np.zeros((5, 5) ,dtype=np.uinta ) ).save(_snake_case ,format='png' )
UpperCAmelCase__ : Any = pa.BufferOutputStream()
with ParquetWriter(
stream=_snake_case ,features=Features({'image': Image()} ) ,embed_local_files=_snake_case ) as writer:
writer.write({'image': image_path} )
writer.finalize()
UpperCAmelCase__ : Tuple = pa.BufferReader(output.getvalue() )
UpperCAmelCase__ : pa.Table = pq.read_table(_snake_case )
UpperCAmelCase__ : int = pa_table.to_pydict()
if embed_local_files:
assert isinstance(out['image'][0]['path'] ,_snake_case )
with open(_snake_case ,'rb' ) as f:
assert out["image"][0]["bytes"] == f.read()
else:
assert out["image"][0]["path"] == image_path
assert out["image"][0]["bytes"] is None
def lowerCamelCase ( ):
UpperCAmelCase__ : List[str] = pa.schema([pa.field('col_1' ,pa.string() ,nullable=_snake_case )] )
UpperCAmelCase__ : str = pa.BufferOutputStream()
with ArrowWriter(stream=_snake_case ) as writer:
writer._build_writer(inferred_schema=_snake_case )
assert writer._schema == pa.schema([pa.field('col_1' ,pa.string() )] )
| 706 |
"""simple docstring"""
from typing import Optional
import numpy as np
import torch
from torch import nn
from transformers import GPTaConfig, GPTaLMHeadModel
from transformers.modeling_utils import ModuleUtilsMixin
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class a ( lowercase , lowercase , lowercase ):
UpperCamelCase : Any = [r"""h\.\d+\.attn\.bias""", r"""h\.\d+\.attn\.masked_bias"""]
@register_to_config
def __init__( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = 50_257 , UpperCamelCase_ = 1_024 , UpperCamelCase_ = 768 , UpperCamelCase_ = 12 , UpperCamelCase_ = 12 , UpperCamelCase_ = None , UpperCamelCase_ = "gelu_new" , UpperCamelCase_ = 0.1 , UpperCamelCase_ = 0.1 , UpperCamelCase_ = 0.1 , UpperCamelCase_ = 1E-5 , UpperCamelCase_ = 0.02 , UpperCamelCase_ = True , UpperCamelCase_ = True , UpperCamelCase_ = False , UpperCamelCase_ = False , ):
super().__init__()
UpperCAmelCase__ : List[Any] = prefix_length
if prefix_inner_dim != n_embd and prefix_hidden_dim is None:
raise ValueError(
F'''`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and'''
F''' `n_embd`: {n_embd} are not equal.''' )
UpperCAmelCase__ : Dict = prefix_inner_dim
UpperCAmelCase__ : List[Any] = prefix_hidden_dim
UpperCAmelCase__ : List[Any] = (
nn.Linear(self.prefix_inner_dim , self.prefix_hidden_dim )
if self.prefix_hidden_dim is not None
else nn.Identity()
)
UpperCAmelCase__ : Any = (
nn.Linear(self.prefix_hidden_dim , UpperCamelCase_ ) if self.prefix_hidden_dim is not None else nn.Identity()
)
UpperCAmelCase__ : Union[str, Any] = GPTaConfig(
vocab_size=UpperCamelCase_ , n_positions=UpperCamelCase_ , n_embd=UpperCamelCase_ , n_layer=UpperCamelCase_ , n_head=UpperCamelCase_ , n_inner=UpperCamelCase_ , activation_function=UpperCamelCase_ , resid_pdrop=UpperCamelCase_ , embd_pdrop=UpperCamelCase_ , attn_pdrop=UpperCamelCase_ , layer_norm_epsilon=UpperCamelCase_ , initializer_range=UpperCamelCase_ , scale_attn_weights=UpperCamelCase_ , use_cache=UpperCamelCase_ , scale_attn_by_inverse_layer_idx=UpperCamelCase_ , reorder_and_upcast_attn=UpperCamelCase_ , )
UpperCAmelCase__ : List[str] = GPTaLMHeadModel(UpperCamelCase_ )
def __snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = None , ):
UpperCAmelCase__ : Optional[Any] = self.transformer.transformer.wte(UpperCamelCase_ )
UpperCAmelCase__ : str = self.encode_prefix(UpperCamelCase_ )
UpperCAmelCase__ : Tuple = self.decode_prefix(UpperCamelCase_ )
UpperCAmelCase__ : List[str] = torch.cat((prefix_embeds, embedding_text) , dim=1 )
if labels is not None:
UpperCAmelCase__ : List[Any] = self.get_dummy_token(input_ids.shape[0] , input_ids.device )
UpperCAmelCase__ : List[Any] = torch.cat((dummy_token, input_ids) , dim=1 )
UpperCAmelCase__ : List[str] = self.transformer(inputs_embeds=UpperCamelCase_ , labels=UpperCamelCase_ , attention_mask=UpperCamelCase_ )
if self.prefix_hidden_dim is not None:
return out, hidden
else:
return out
def __snake_case ( self , UpperCamelCase_ , UpperCamelCase_ ):
return torch.zeros(UpperCamelCase_ , self.prefix_length , dtype=torch.intaa , device=UpperCamelCase_ )
def __snake_case ( self , UpperCamelCase_ ):
return self.encode_prefix(UpperCamelCase_ )
@torch.no_grad()
def __snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
UpperCAmelCase__ : List[str] = torch.split(UpperCamelCase_ , 1 , dim=0 )
UpperCAmelCase__ : Any = []
UpperCAmelCase__ : Optional[Any] = []
for feature in features:
UpperCAmelCase__ : List[str] = self.decode_prefix(feature.to(UpperCamelCase_ ) ) # back to the clip feature
# Only support beam search for now
UpperCAmelCase__ , UpperCAmelCase__ : int = self.generate_beam(
input_embeds=UpperCamelCase_ , device=UpperCamelCase_ , eos_token_id=UpperCamelCase_ )
generated_tokens.append(output_tokens[0] )
generated_seq_lengths.append(seq_lengths[0] )
UpperCAmelCase__ : Optional[int] = torch.stack(UpperCamelCase_ )
UpperCAmelCase__ : Any = torch.stack(UpperCamelCase_ )
return generated_tokens, generated_seq_lengths
@torch.no_grad()
def __snake_case ( self , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_ = 5 , UpperCamelCase_ = 67 , UpperCamelCase_ = 1.0 , UpperCamelCase_ = None , ):
UpperCAmelCase__ : Optional[int] = eos_token_id
UpperCAmelCase__ : List[Any] = None
UpperCAmelCase__ : Union[str, Any] = None
UpperCAmelCase__ : List[str] = torch.ones(UpperCamelCase_ , device=UpperCamelCase_ , dtype=torch.int )
UpperCAmelCase__ : str = torch.zeros(UpperCamelCase_ , device=UpperCamelCase_ , dtype=torch.bool )
if input_embeds is not None:
UpperCAmelCase__ : List[Any] = input_embeds
else:
UpperCAmelCase__ : Union[str, Any] = self.transformer.transformer.wte(UpperCamelCase_ )
for i in range(UpperCamelCase_ ):
UpperCAmelCase__ : Tuple = self.transformer(inputs_embeds=UpperCamelCase_ )
UpperCAmelCase__ : Dict = outputs.logits
UpperCAmelCase__ : List[Any] = logits[:, -1, :] / (temperature if temperature > 0 else 1.0)
UpperCAmelCase__ : int = logits.softmax(-1 ).log()
if scores is None:
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = logits.topk(UpperCamelCase_ , -1 )
UpperCAmelCase__ : List[Any] = generated.expand(UpperCamelCase_ , *generated.shape[1:] )
UpperCAmelCase__ , UpperCAmelCase__ : List[str] = next_tokens.permute(1 , 0 ), scores.squeeze(0 )
if tokens is None:
UpperCAmelCase__ : Dict = next_tokens
else:
UpperCAmelCase__ : Optional[int] = tokens.expand(UpperCamelCase_ , *tokens.shape[1:] )
UpperCAmelCase__ : List[Any] = torch.cat((tokens, next_tokens) , dim=1 )
else:
UpperCAmelCase__ : str = -float(np.inf )
UpperCAmelCase__ : Any = 0
UpperCAmelCase__ : int = scores[:, None] + logits
seq_lengths[~is_stopped] += 1
UpperCAmelCase__ : Optional[int] = scores_sum / seq_lengths[:, None]
UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = scores_sum_average.view(-1 ).topk(UpperCamelCase_ , -1 )
UpperCAmelCase__ : List[Any] = next_tokens // scores_sum.shape[1]
UpperCAmelCase__ : str = seq_lengths[next_tokens_source]
UpperCAmelCase__ : str = next_tokens % scores_sum.shape[1]
UpperCAmelCase__ : Optional[Any] = next_tokens.unsqueeze(1 )
UpperCAmelCase__ : List[str] = tokens[next_tokens_source]
UpperCAmelCase__ : List[str] = torch.cat((tokens, next_tokens) , dim=1 )
UpperCAmelCase__ : Any = generated[next_tokens_source]
UpperCAmelCase__ : Tuple = scores_sum_average * seq_lengths
UpperCAmelCase__ : Tuple = is_stopped[next_tokens_source]
UpperCAmelCase__ : List[Any] = self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0] , 1 , -1 )
UpperCAmelCase__ : Optional[int] = torch.cat((generated, next_token_embed) , dim=1 )
UpperCAmelCase__ : List[Any] = is_stopped + next_tokens.eq(UpperCamelCase_ ).squeeze()
if is_stopped.all():
break
UpperCAmelCase__ : Dict = scores / seq_lengths
UpperCAmelCase__ : Optional[Any] = scores.argsort(descending=UpperCamelCase_ )
# tokens tensors are already padded to max_seq_length
UpperCAmelCase__ : Dict = [tokens[i] for i in order]
UpperCAmelCase__ : Optional[Any] = torch.stack(UpperCamelCase_ , dim=0 )
UpperCAmelCase__ : List[str] = torch.tensor([seq_lengths[i] for i in order] , dtype=seq_lengths.dtype )
return output_texts, seq_lengths
| 254 | 0 |
"""simple docstring"""
import json
import os
import torch
from diffusers import UNetaDModel
os.makedirs('hub/hopper-medium-v2/unet/hor32', exist_ok=True)
os.makedirs('hub/hopper-medium-v2/unet/hor128', exist_ok=True)
os.makedirs('hub/hopper-medium-v2/value_function', exist_ok=True)
def snake_case__ ( _snake_case : str ):
"""simple docstring"""
if hor == 1_28:
UpperCamelCase__ = ("DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D")
UpperCamelCase__ = (32, 1_28, 2_56)
UpperCamelCase__ = ("UpResnetBlock1D", "UpResnetBlock1D")
elif hor == 32:
UpperCamelCase__ = ("DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D")
UpperCamelCase__ = (32, 64, 1_28, 2_56)
UpperCamelCase__ = ("UpResnetBlock1D", "UpResnetBlock1D", "UpResnetBlock1D")
UpperCamelCase__ = torch.load(F'/Users/bglickenhaus/Documents/diffuser/temporal_unet-hopper-mediumv2-hor{hor}.torch' )
UpperCamelCase__ = model.state_dict()
UpperCamelCase__ = {
"down_block_types": down_block_types,
"block_out_channels": block_out_channels,
"up_block_types": up_block_types,
"layers_per_block": 1,
"use_timestep_embedding": True,
"out_block_type": "OutConv1DBlock",
"norm_num_groups": 8,
"downsample_each_block": False,
"in_channels": 14,
"out_channels": 14,
"extra_in_channels": 0,
"time_embedding_type": "positional",
"flip_sin_to_cos": False,
"freq_shift": 1,
"sample_size": 6_55_36,
"mid_block_type": "MidResTemporalBlock1D",
"act_fn": "mish",
}
UpperCamelCase__ = UNetaDModel(**_snake_case )
print(F'length of state dict: {len(state_dict.keys() )}' )
print(F'length of value function dict: {len(hf_value_function.state_dict().keys() )}' )
UpperCamelCase__ = dict(zip(model.state_dict().keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
UpperCamelCase__ = state_dict.pop(_snake_case )
hf_value_function.load_state_dict(_snake_case )
torch.save(hf_value_function.state_dict() , F'hub/hopper-medium-v2/unet/hor{hor}/diffusion_pytorch_model.bin' )
with open(F'hub/hopper-medium-v2/unet/hor{hor}/config.json' , "w" ) as f:
json.dump(_snake_case , _snake_case )
def snake_case__ ( ):
"""simple docstring"""
UpperCamelCase__ = {
"in_channels": 14,
"down_block_types": ("DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D"),
"up_block_types": (),
"out_block_type": "ValueFunction",
"mid_block_type": "ValueFunctionMidBlock1D",
"block_out_channels": (32, 64, 1_28, 2_56),
"layers_per_block": 1,
"downsample_each_block": True,
"sample_size": 6_55_36,
"out_channels": 14,
"extra_in_channels": 0,
"time_embedding_type": "positional",
"use_timestep_embedding": True,
"flip_sin_to_cos": False,
"freq_shift": 1,
"norm_num_groups": 8,
"act_fn": "mish",
}
UpperCamelCase__ = torch.load("/Users/bglickenhaus/Documents/diffuser/value_function-hopper-mediumv2-hor32.torch" )
UpperCamelCase__ = model
UpperCamelCase__ = UNetaDModel(**_snake_case )
print(F'length of state dict: {len(state_dict.keys() )}' )
print(F'length of value function dict: {len(hf_value_function.state_dict().keys() )}' )
UpperCamelCase__ = dict(zip(state_dict.keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
UpperCamelCase__ = state_dict.pop(_snake_case )
hf_value_function.load_state_dict(_snake_case )
torch.save(hf_value_function.state_dict() , "hub/hopper-medium-v2/value_function/diffusion_pytorch_model.bin" )
with open("hub/hopper-medium-v2/value_function/config.json" , "w" ) as f:
json.dump(_snake_case , _snake_case )
if __name__ == "__main__":
unet(32)
# unet(128)
value_function() | 516 | """simple docstring"""
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SegformerConfig,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
A : Tuple = logging.get_logger(__name__)
def snake_case__ ( _snake_case : Union[str, Any] , _snake_case : Dict=False ):
"""simple docstring"""
UpperCamelCase__ = OrderedDict()
for key, value in state_dict.items():
if encoder_only and not key.startswith("head" ):
UpperCamelCase__ = "segformer.encoder." + key
if key.startswith("backbone" ):
UpperCamelCase__ = key.replace("backbone" , "segformer.encoder" )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
UpperCamelCase__ = key[key.find("patch_embed" ) + len("patch_embed" )]
UpperCamelCase__ = key.replace(F'patch_embed{idx}' , F'patch_embeddings.{int(_snake_case )-1}' )
if "norm" in key:
UpperCamelCase__ = key.replace("norm" , "layer_norm" )
if "segformer.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
UpperCamelCase__ = key[key.find("segformer.encoder.layer_norm" ) + len("segformer.encoder.layer_norm" )]
UpperCamelCase__ = key.replace(F'layer_norm{idx}' , F'layer_norm.{int(_snake_case )-1}' )
if "layer_norm1" in key:
UpperCamelCase__ = key.replace("layer_norm1" , "layer_norm_1" )
if "layer_norm2" in key:
UpperCamelCase__ = key.replace("layer_norm2" , "layer_norm_2" )
if "block" in key:
# replace for example block1 by block.0
UpperCamelCase__ = key[key.find("block" ) + len("block" )]
UpperCamelCase__ = key.replace(F'block{idx}' , F'block.{int(_snake_case )-1}' )
if "attn.q" in key:
UpperCamelCase__ = key.replace("attn.q" , "attention.self.query" )
if "attn.proj" in key:
UpperCamelCase__ = key.replace("attn.proj" , "attention.output.dense" )
if "attn" in key:
UpperCamelCase__ = key.replace("attn" , "attention.self" )
if "fc1" in key:
UpperCamelCase__ = key.replace("fc1" , "dense1" )
if "fc2" in key:
UpperCamelCase__ = key.replace("fc2" , "dense2" )
if "linear_pred" in key:
UpperCamelCase__ = key.replace("linear_pred" , "classifier" )
if "linear_fuse" in key:
UpperCamelCase__ = key.replace("linear_fuse.conv" , "linear_fuse" )
UpperCamelCase__ = key.replace("linear_fuse.bn" , "batch_norm" )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
UpperCamelCase__ = key[key.find("linear_c" ) + len("linear_c" )]
UpperCamelCase__ = key.replace(F'linear_c{idx}' , F'linear_c.{int(_snake_case )-1}' )
if key.startswith("head" ):
UpperCamelCase__ = key.replace("head" , "classifier" )
UpperCamelCase__ = value
return new_state_dict
def snake_case__ ( _snake_case : str , _snake_case : List[Any] ):
"""simple docstring"""
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
UpperCamelCase__ = state_dict.pop(F'segformer.encoder.block.{i}.{j}.attention.self.kv.weight' )
UpperCamelCase__ = state_dict.pop(F'segformer.encoder.block.{i}.{j}.attention.self.kv.bias' )
# next, add keys and values (in that order) to the state dict
UpperCamelCase__ = kv_weight[
: config.hidden_sizes[i], :
]
UpperCamelCase__ = kv_bias[: config.hidden_sizes[i]]
UpperCamelCase__ = kv_weight[
config.hidden_sizes[i] :, :
]
UpperCamelCase__ = kv_bias[
config.hidden_sizes[i] :
]
def snake_case__ ( ):
"""simple docstring"""
UpperCamelCase__ = "http://images.cocodataset.org/val2017/000000039769.jpg"
UpperCamelCase__ = Image.open(requests.get(_snake_case , stream=_snake_case ).raw )
return image
@torch.no_grad()
def snake_case__ ( _snake_case : Tuple , _snake_case : Optional[int] , _snake_case : List[Any] ):
"""simple docstring"""
UpperCamelCase__ = SegformerConfig()
UpperCamelCase__ = False
# set attributes based on model_name
UpperCamelCase__ = "huggingface/label-files"
if "segformer" in model_name:
UpperCamelCase__ = model_name[len("segformer." ) : len("segformer." ) + 2]
if "ade" in model_name:
UpperCamelCase__ = 1_50
UpperCamelCase__ = "ade20k-id2label.json"
UpperCamelCase__ = (1, 1_50, 1_28, 1_28)
elif "city" in model_name:
UpperCamelCase__ = 19
UpperCamelCase__ = "cityscapes-id2label.json"
UpperCamelCase__ = (1, 19, 1_28, 1_28)
else:
raise ValueError(F'Model {model_name} not supported' )
elif "mit" in model_name:
UpperCamelCase__ = True
UpperCamelCase__ = model_name[4:6]
UpperCamelCase__ = 10_00
UpperCamelCase__ = "imagenet-1k-id2label.json"
UpperCamelCase__ = (1, 10_00)
else:
raise ValueError(F'Model {model_name} not supported' )
# set config attributes
UpperCamelCase__ = json.load(open(hf_hub_download(_snake_case , _snake_case , repo_type="dataset" ) , "r" ) )
UpperCamelCase__ = {int(_snake_case ): v for k, v in idalabel.items()}
UpperCamelCase__ = idalabel
UpperCamelCase__ = {v: k for k, v in idalabel.items()}
if size == "b0":
pass
elif size == "b1":
UpperCamelCase__ = [64, 1_28, 3_20, 5_12]
UpperCamelCase__ = 2_56
elif size == "b2":
UpperCamelCase__ = [64, 1_28, 3_20, 5_12]
UpperCamelCase__ = 7_68
UpperCamelCase__ = [3, 4, 6, 3]
elif size == "b3":
UpperCamelCase__ = [64, 1_28, 3_20, 5_12]
UpperCamelCase__ = 7_68
UpperCamelCase__ = [3, 4, 18, 3]
elif size == "b4":
UpperCamelCase__ = [64, 1_28, 3_20, 5_12]
UpperCamelCase__ = 7_68
UpperCamelCase__ = [3, 8, 27, 3]
elif size == "b5":
UpperCamelCase__ = [64, 1_28, 3_20, 5_12]
UpperCamelCase__ = 7_68
UpperCamelCase__ = [3, 6, 40, 3]
else:
raise ValueError(F'Size {size} not supported' )
# load image processor (only resize + normalize)
UpperCamelCase__ = SegformerImageProcessor(
image_scale=(5_12, 5_12) , keep_ratio=_snake_case , align=_snake_case , do_random_crop=_snake_case )
# prepare image
UpperCamelCase__ = prepare_img()
UpperCamelCase__ = image_processor(images=_snake_case , return_tensors="pt" ).pixel_values
logger.info(F'Converting model {model_name}...' )
# load original state dict
if encoder_only:
UpperCamelCase__ = torch.load(_snake_case , map_location=torch.device("cpu" ) )
else:
UpperCamelCase__ = torch.load(_snake_case , map_location=torch.device("cpu" ) )["state_dict"]
# rename keys
UpperCamelCase__ = rename_keys(_snake_case , encoder_only=_snake_case )
if not encoder_only:
del state_dict["decode_head.conv_seg.weight"]
del state_dict["decode_head.conv_seg.bias"]
# key and value matrices need special treatment
read_in_k_v(_snake_case , _snake_case )
# create HuggingFace model and load state dict
if encoder_only:
UpperCamelCase__ = False
UpperCamelCase__ = SegformerForImageClassification(_snake_case )
else:
UpperCamelCase__ = SegformerForSemanticSegmentation(_snake_case )
model.load_state_dict(_snake_case )
model.eval()
# forward pass
UpperCamelCase__ = model(_snake_case )
UpperCamelCase__ = outputs.logits
# set expected_slice based on model name
# ADE20k checkpoints
if model_name == "segformer.b0.512x512.ade.160k":
UpperCamelCase__ = torch.tensor(
[
[[-4.6310, -5.5232, -6.2356], [-5.1921, -6.1444, -6.5996], [-5.4424, -6.2790, -6.7574]],
[[-12.1391, -13.3122, -13.9554], [-12.8732, -13.9352, -14.3563], [-12.9438, -13.8226, -14.2513]],
[[-12.5134, -13.4686, -14.4915], [-12.8669, -14.4343, -14.7758], [-13.2523, -14.5819, -15.0694]],
] )
elif model_name == "segformer.b1.512x512.ade.160k":
UpperCamelCase__ = torch.tensor(
[
[[-7.5820, -8.7231, -8.3215], [-8.0600, -10.3529, -10.0304], [-7.5208, -9.4103, -9.6239]],
[[-12.6918, -13.8994, -13.7137], [-13.3196, -15.7523, -15.4789], [-12.9343, -14.8757, -14.9689]],
[[-11.1911, -11.9421, -11.3243], [-11.3342, -13.6839, -13.3581], [-10.3909, -12.1832, -12.4858]],
] )
elif model_name == "segformer.b2.512x512.ade.160k":
UpperCamelCase__ = torch.tensor(
[
[[-11.8173, -14.3850, -16.3128], [-14.5648, -16.5804, -18.6568], [-14.7223, -15.7387, -18.4218]],
[[-15.7290, -17.9171, -19.4423], [-18.3105, -19.9448, -21.4661], [-17.9296, -18.6497, -20.7910]],
[[-15.0783, -17.0336, -18.2789], [-16.8771, -18.6870, -20.1612], [-16.2454, -17.1426, -19.5055]],
] )
elif model_name == "segformer.b3.512x512.ade.160k":
UpperCamelCase__ = torch.tensor(
[
[[-9.0878, -10.2081, -10.1891], [-9.3144, -10.7941, -10.9843], [-9.2294, -10.3855, -10.5704]],
[[-12.2316, -13.9068, -13.6102], [-12.9161, -14.3702, -14.3235], [-12.5233, -13.7174, -13.7932]],
[[-14.6275, -15.2490, -14.9727], [-14.3400, -15.9687, -16.2827], [-14.1484, -15.4033, -15.8937]],
] )
elif model_name == "segformer.b4.512x512.ade.160k":
UpperCamelCase__ = torch.tensor(
[
[[-12.3144, -13.2447, -14.0802], [-13.3614, -14.5816, -15.6117], [-13.3340, -14.4433, -16.2219]],
[[-19.2781, -20.4128, -20.7506], [-20.6153, -21.6566, -22.0998], [-19.9800, -21.0430, -22.1494]],
[[-18.8739, -19.7804, -21.1834], [-20.1233, -21.6765, -23.2944], [-20.0315, -21.2641, -23.6944]],
] )
elif model_name == "segformer.b5.640x640.ade.160k":
UpperCamelCase__ = torch.tensor(
[
[[-9.5524, -12.0835, -11.7348], [-10.5229, -13.6446, -14.5662], [-9.5842, -12.8851, -13.9414]],
[[-15.3432, -17.5323, -17.0818], [-16.3330, -18.9255, -19.2101], [-15.1340, -17.7848, -18.3971]],
[[-12.6072, -14.9486, -14.6631], [-13.7629, -17.0907, -17.7745], [-12.7899, -16.1695, -17.1671]],
] )
# Cityscapes checkpoints
elif model_name == "segformer.b0.1024x1024.city.160k":
UpperCamelCase__ = torch.tensor(
[
[[-11.9295, -13.4057, -14.8106], [-13.3431, -14.8179, -15.3781], [-14.2836, -15.5942, -16.1588]],
[[-11.4906, -12.8067, -13.6564], [-13.1189, -14.0500, -14.1543], [-13.8748, -14.5136, -14.8789]],
[[0.5374, 0.1067, -0.4742], [0.1141, -0.2255, -0.7099], [-0.3000, -0.5924, -1.3105]],
] )
elif model_name == "segformer.b0.512x1024.city.160k":
UpperCamelCase__ = torch.tensor(
[
[[-7.8217, -9.8767, -10.1717], [-9.4438, -10.9058, -11.4047], [-9.7939, -12.3495, -12.1079]],
[[-7.1514, -9.5336, -10.0860], [-9.7776, -11.6822, -11.8439], [-10.1411, -12.7655, -12.8972]],
[[0.3021, 0.0805, -0.2310], [-0.0328, -0.1605, -0.2714], [-0.1408, -0.5477, -0.6976]],
] )
elif model_name == "segformer.b0.640x1280.city.160k":
UpperCamelCase__ = torch.tensor(
[
[
[-1.1_372E01, -1.2_787E01, -1.3_477E01],
[-1.2_536E01, -1.4_194E01, -1.4_409E01],
[-1.3_217E01, -1.4_888E01, -1.5_327E01],
],
[
[-1.4_791E01, -1.7_122E01, -1.8_277E01],
[-1.7_163E01, -1.9_192E01, -1.9_533E01],
[-1.7_897E01, -1.9_991E01, -2.0_315E01],
],
[
[7.6_723E-01, 4.1_921E-01, -7.7_878E-02],
[4.7_772E-01, 9.5_557E-03, -2.8_082E-01],
[3.6_032E-01, -2.4_826E-01, -5.1_168E-01],
],
] )
elif model_name == "segformer.b0.768x768.city.160k":
UpperCamelCase__ = torch.tensor(
[
[[-9.4959, -11.3087, -11.7479], [-11.0025, -12.6540, -12.3319], [-11.4064, -13.0487, -12.9905]],
[[-9.8905, -11.3084, -12.0854], [-11.1726, -12.7698, -12.9583], [-11.5985, -13.3278, -14.1774]],
[[0.2213, 0.0192, -0.2466], [-0.1731, -0.4213, -0.4874], [-0.3126, -0.6541, -1.1389]],
] )
elif model_name == "segformer.b1.1024x1024.city.160k":
UpperCamelCase__ = torch.tensor(
[
[[-13.5748, -13.9111, -12.6500], [-14.3500, -15.3683, -14.2328], [-14.7532, -16.0424, -15.6087]],
[[-17.1651, -15.8725, -12.9653], [-17.2580, -17.3718, -14.8223], [-16.6058, -16.8783, -16.7452]],
[[-3.6456, -3.0209, -1.4203], [-3.0797, -3.1959, -2.0000], [-1.8757, -1.9217, -1.6997]],
] )
elif model_name == "segformer.b2.1024x1024.city.160k":
UpperCamelCase__ = torch.tensor(
[
[[-16.0976, -16.4856, -17.3962], [-16.6234, -19.0342, -19.7685], [-16.0900, -18.0661, -19.1180]],
[[-18.4750, -18.8488, -19.5074], [-19.4030, -22.1570, -22.5977], [-19.1191, -20.8486, -22.3783]],
[[-4.5178, -5.5037, -6.5109], [-5.0884, -7.2174, -8.0334], [-4.4156, -5.8117, -7.2970]],
] )
elif model_name == "segformer.b3.1024x1024.city.160k":
UpperCamelCase__ = torch.tensor(
[
[[-14.2081, -14.4732, -14.1977], [-14.5867, -16.4423, -16.6356], [-13.4441, -14.9685, -16.8696]],
[[-14.4576, -14.7073, -15.0451], [-15.0816, -17.6237, -17.9873], [-14.4213, -16.0199, -18.5992]],
[[-4.7349, -4.9588, -5.0966], [-4.3210, -6.9325, -7.2591], [-3.4312, -4.7484, -7.1917]],
] )
elif model_name == "segformer.b4.1024x1024.city.160k":
UpperCamelCase__ = torch.tensor(
[
[[-11.7737, -11.9526, -11.3273], [-13.6692, -14.4574, -13.8878], [-13.8937, -14.6924, -15.9345]],
[[-14.6706, -14.5330, -14.1306], [-16.1502, -16.8180, -16.4269], [-16.8338, -17.8939, -20.1746]],
[[1.0491, 0.8289, 1.0310], [1.1044, 0.5219, 0.8055], [1.0899, 0.6926, 0.5590]],
] )
elif model_name == "segformer.b5.1024x1024.city.160k":
UpperCamelCase__ = torch.tensor(
[
[[-12.5641, -13.4777, -13.0684], [-13.9587, -15.8983, -16.6557], [-13.3109, -15.7350, -16.3141]],
[[-14.7074, -15.4352, -14.5944], [-16.6353, -18.1663, -18.6120], [-15.1702, -18.0329, -18.1547]],
[[-1.7990, -2.0951, -1.7784], [-2.6397, -3.8245, -3.9686], [-1.5264, -2.8126, -2.9316]],
] )
else:
UpperCamelCase__ = logits.argmax(-1 ).item()
print("Predicted class:" , model.config.idalabel[predicted_class_idx] )
# verify logits
if not encoder_only:
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3, :3, :3] , _snake_case , atol=1E-2 )
# finally, save model and image processor
logger.info(F'Saving PyTorch model and image processor to {pytorch_dump_folder_path}...' )
Path(_snake_case ).mkdir(exist_ok=_snake_case )
model.save_pretrained(_snake_case )
image_processor.save_pretrained(_snake_case )
if __name__ == "__main__":
A : Dict = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
default='segformer.b0.512x512.ade.160k',
type=str,
help='Name of the model you\'d like to convert.',
)
parser.add_argument(
'--checkpoint_path', default=None, type=str, help='Path to the original PyTorch checkpoint (.pth file).'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
A : Dict = parser.parse_args()
convert_segformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path) | 516 | 1 |
'''simple docstring'''
def UpperCAmelCase ( A : int , A : int ):
while second != 0:
SCREAMING_SNAKE_CASE : List[str] = first & second
first ^= second
SCREAMING_SNAKE_CASE : Any = c << 1
return first
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase_ : Optional[int] = int(input('Enter the first number: ').strip())
lowerCAmelCase_ : List[str] = int(input('Enter the second number: ').strip())
print(f'{add(first, second) = }')
| 464 |
'''simple docstring'''
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowerCamelCase_ ( snake_case_ ):
_lowerCAmelCase : int = ['image_processor', 'tokenizer']
_lowerCAmelCase : Union[str, Any] = 'LayoutLMv3ImageProcessor'
_lowerCAmelCase : List[Any] = ('LayoutLMv3Tokenizer', 'LayoutLMv3TokenizerFast')
def __init__( self : str , lowerCAmelCase__ : int=None , lowerCAmelCase__ : str=None , **lowerCAmelCase__ : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , lowerCAmelCase__ , )
SCREAMING_SNAKE_CASE : List[str] = kwargs.pop('''feature_extractor''' )
SCREAMING_SNAKE_CASE : Any = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(lowerCAmelCase__ , lowerCAmelCase__ )
def __call__( self : Dict , lowerCAmelCase__ : Any , lowerCAmelCase__ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , lowerCAmelCase__ : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , lowerCAmelCase__ : Union[List[List[int]], List[List[List[int]]]] = None , lowerCAmelCase__ : Optional[Union[List[int], List[List[int]]]] = None , lowerCAmelCase__ : bool = True , lowerCAmelCase__ : Union[bool, str, PaddingStrategy] = False , lowerCAmelCase__ : Union[bool, str, TruncationStrategy] = None , lowerCAmelCase__ : Optional[int] = None , lowerCAmelCase__ : int = 0 , lowerCAmelCase__ : Optional[int] = None , lowerCAmelCase__ : Optional[bool] = None , lowerCAmelCase__ : Optional[bool] = None , lowerCAmelCase__ : bool = False , lowerCAmelCase__ : bool = False , lowerCAmelCase__ : bool = False , lowerCAmelCase__ : bool = False , lowerCAmelCase__ : bool = True , lowerCAmelCase__ : Optional[Union[str, TensorType]] = None , **lowerCAmelCase__ : Any , ):
"""simple docstring"""
# verify input
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
'''You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True.''' )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
'''You cannot provide word labels if you initialized the image processor with apply_ocr set to True.''' )
# first, apply the image processor
SCREAMING_SNAKE_CASE : List[Any] = self.image_processor(images=lowerCAmelCase__ , return_tensors=lowerCAmelCase__ )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE : List[Any] = [text] # add batch dimension (as the image processor always adds a batch dimension)
SCREAMING_SNAKE_CASE : Optional[Any] = features['''words''']
SCREAMING_SNAKE_CASE : Tuple = self.tokenizer(
text=text if text is not None else features['''words'''] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['''boxes'''] , word_labels=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=lowerCAmelCase__ , stride=lowerCAmelCase__ , pad_to_multiple_of=lowerCAmelCase__ , return_token_type_ids=lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , return_overflowing_tokens=lowerCAmelCase__ , return_special_tokens_mask=lowerCAmelCase__ , return_offsets_mapping=lowerCAmelCase__ , return_length=lowerCAmelCase__ , verbose=lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , **lowerCAmelCase__ , )
# add pixel values
SCREAMING_SNAKE_CASE : List[Any] = features.pop('''pixel_values''' )
if return_overflowing_tokens is True:
SCREAMING_SNAKE_CASE : Any = self.get_overflowing_images(lowerCAmelCase__ , encoded_inputs['''overflow_to_sample_mapping'''] )
SCREAMING_SNAKE_CASE : Any = images
return encoded_inputs
def __lowercase ( self : str , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Optional[int] ):
"""simple docstring"""
# in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
SCREAMING_SNAKE_CASE : Dict = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(lowerCAmelCase__ ) != len(lowerCAmelCase__ ):
raise ValueError(
'''Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got'''
F""" {len(lowerCAmelCase__ )} and {len(lowerCAmelCase__ )}""" )
return images_with_overflow
def __lowercase ( self : Any , *lowerCAmelCase__ : Union[str, Any] , **lowerCAmelCase__ : List[Any] ):
"""simple docstring"""
return self.tokenizer.batch_decode(*lowerCAmelCase__ , **lowerCAmelCase__ )
def __lowercase ( self : Optional[int] , *lowerCAmelCase__ : Optional[Any] , **lowerCAmelCase__ : Tuple ):
"""simple docstring"""
return self.tokenizer.decode(*lowerCAmelCase__ , **lowerCAmelCase__ )
@property
def __lowercase ( self : Union[str, Any] ):
"""simple docstring"""
return ["input_ids", "bbox", "attention_mask", "pixel_values"]
@property
def __lowercase ( self : Union[str, Any] ):
"""simple docstring"""
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , lowerCAmelCase__ , )
return self.image_processor_class
@property
def __lowercase ( self : List[Any] ):
"""simple docstring"""
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , lowerCAmelCase__ , )
return self.image_processor
| 464 | 1 |
'''simple docstring'''
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
torch.set_grad_enabled(False)
def SCREAMING_SNAKE_CASE ( a_ : List[Any] , a_ : List[str]=False ):
__a = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"module.blocks.{i}.norm1.weight", f"vit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((f"module.blocks.{i}.norm1.bias", f"vit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append(
(f"module.blocks.{i}.attn.proj.weight", f"vit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append((f"module.blocks.{i}.attn.proj.bias", f"vit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((f"module.blocks.{i}.norm2.weight", f"vit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((f"module.blocks.{i}.norm2.bias", f"vit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((f"module.blocks.{i}.mlp.fc1.weight", f"vit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((f"module.blocks.{i}.mlp.fc1.bias", f"vit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((f"module.blocks.{i}.mlp.fc2.weight", f"vit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((f"module.blocks.{i}.mlp.fc2.bias", f"vit.encoder.layer.{i}.output.dense.bias") )
# projection layer + position embeddings
rename_keys.extend(
[
('module.cls_token', 'vit.embeddings.cls_token'),
('module.patch_embed.proj.weight', 'vit.embeddings.patch_embeddings.projection.weight'),
('module.patch_embed.proj.bias', 'vit.embeddings.patch_embeddings.projection.bias'),
('module.pos_embed', 'vit.embeddings.position_embeddings'),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('module.norm.weight', 'layernorm.weight'),
('module.norm.bias', 'layernorm.bias'),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
__a = [(pair[0], pair[1][4:]) if pair[1].startswith('vit' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('norm.weight', 'vit.layernorm.weight'),
('norm.bias', 'vit.layernorm.bias'),
('head.weight', 'classifier.weight'),
('head.bias', 'classifier.bias'),
] )
return rename_keys
def SCREAMING_SNAKE_CASE ( a_ : Any , a_ : List[Any] , a_ : Dict=False ):
for i in range(config.num_hidden_layers ):
if base_model:
__a = ''
else:
__a = 'vit.'
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
__a = state_dict.pop(f"module.blocks.{i}.attn.qkv.weight" )
__a = state_dict.pop(f"module.blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
__a = in_proj_weight[
: config.hidden_size, :
]
__a = in_proj_bias[: config.hidden_size]
__a = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__a = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
__a = in_proj_weight[
-config.hidden_size :, :
]
__a = in_proj_bias[-config.hidden_size :]
def SCREAMING_SNAKE_CASE ( a_ : Optional[int] ):
__a = ['head.weight', 'head.bias']
for k in ignore_keys:
state_dict.pop(a_ , a_ )
def SCREAMING_SNAKE_CASE ( a_ : Tuple ):
# projection head is used in the self-supervised pre-training in MSN,
# for downstream task it's not needed.
__a = [
'module.fc.fc1.weight',
'module.fc.fc1.bias',
'module.fc.bn1.weight',
'module.fc.bn1.bias',
'module.fc.bn1.running_mean',
'module.fc.bn1.running_var',
'module.fc.bn1.num_batches_tracked',
'module.fc.fc2.weight',
'module.fc.fc2.bias',
'module.fc.bn2.weight',
'module.fc.bn2.bias',
'module.fc.bn2.running_mean',
'module.fc.bn2.running_var',
'module.fc.bn2.num_batches_tracked',
'module.fc.fc3.weight',
'module.fc.fc3.bias',
]
for k in ignore_keys:
state_dict.pop(a_ , a_ )
def SCREAMING_SNAKE_CASE ( a_ : Union[str, Any] , a_ : str , a_ : List[Any] ):
__a = dct.pop(a_ )
__a = val
def SCREAMING_SNAKE_CASE ( a_ : Union[str, Any] , a_ : List[str] ):
__a = ViTMSNConfig()
__a = 1000
__a = 'datasets/huggingface/label-files'
__a = 'imagenet-1k-id2label.json'
__a = json.load(open(hf_hub_download(a_ , a_ ) , 'r' ) )
__a = {int(a_ ): v for k, v in idalabel.items()}
__a = idalabel
__a = {v: k for k, v in idalabel.items()}
if "s16" in checkpoint_url:
__a = 384
__a = 1536
__a = 6
elif "l16" in checkpoint_url:
__a = 1024
__a = 4096
__a = 24
__a = 16
__a = 0.1
elif "b4" in checkpoint_url:
__a = 4
elif "l7" in checkpoint_url:
__a = 7
__a = 1024
__a = 4096
__a = 24
__a = 16
__a = 0.1
__a = ViTMSNModel(a_ )
__a = torch.hub.load_state_dict_from_url(a_ , map_location='cpu' )['target_encoder']
__a = ViTImageProcessor(size=config.image_size )
remove_projection_head(a_ )
__a = create_rename_keys(a_ , base_model=a_ )
for src, dest in rename_keys:
rename_key(a_ , a_ , a_ )
read_in_q_k_v(a_ , a_ , base_model=a_ )
model.load_state_dict(a_ )
model.eval()
__a = 'http://images.cocodataset.org/val2017/000000039769.jpg'
__a = Image.open(requests.get(a_ , stream=a_ ).raw )
__a = ViTImageProcessor(
size=config.image_size , image_mean=a_ , image_std=a_ )
__a = image_processor(images=a_ , return_tensors='pt' )
# forward pass
torch.manual_seed(2 )
__a = model(**a_ )
__a = outputs.last_hidden_state
# The following Colab Notebook was used to generate these outputs:
# https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb
if "s16" in checkpoint_url:
__a = torch.tensor([[-1.09_15, -1.48_76, -1.18_09]] )
elif "b16" in checkpoint_url:
__a = torch.tensor([[14.28_89, -18.90_45, 11.72_81]] )
elif "l16" in checkpoint_url:
__a = torch.tensor([[41.50_28, -22.86_81, 45.64_75]] )
elif "b4" in checkpoint_url:
__a = torch.tensor([[-4.38_68, 5.29_32, -0.41_37]] )
else:
__a = torch.tensor([[-0.17_92, -0.64_65, 2.42_63]] )
# verify logits
assert torch.allclose(last_hidden_state[:, 0, :3] , a_ , atol=1e-4 )
print(f"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(a_ )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(a_ )
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar",
type=str,
help="URL of the checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
UpperCAmelCase_ = parser.parse_args()
convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 539 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase_ = {
"configuration_clap": [
"CLAP_PRETRAINED_MODEL_ARCHIVE_LIST",
"ClapAudioConfig",
"ClapConfig",
"ClapTextConfig",
],
"processing_clap": ["ClapProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
"CLAP_PRETRAINED_MODEL_ARCHIVE_LIST",
"ClapModel",
"ClapPreTrainedModel",
"ClapTextModel",
"ClapTextModelWithProjection",
"ClapAudioModel",
"ClapAudioModelWithProjection",
]
UpperCAmelCase_ = ["ClapFeatureExtractor"]
if TYPE_CHECKING:
from .configuration_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioConfig,
ClapConfig,
ClapTextConfig,
)
from .processing_clap import ClapProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clap import ClapFeatureExtractor
from .modeling_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioModel,
ClapAudioModelWithProjection,
ClapModel,
ClapPreTrainedModel,
ClapTextModel,
ClapTextModelWithProjection,
)
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 539 | 1 |
'''simple docstring'''
import argparse
import torch
from torch import nn
from transformers import SpeechaTextConfig, SpeechaTextForConditionalGeneration
def _a (__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_UpperCamelCase =[
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''decoder.output_projection.weight''',
'''_float_tensor''',
'''encoder.embed_positions._float_tensor''',
'''decoder.embed_positions._float_tensor''',
]
for k in ignore_keys:
state_dict.pop(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def _a (__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_UpperCamelCase =list(s_dict.keys() )
for key in keys:
if "transformer_layers" in key:
_UpperCamelCase =s_dict.pop(__SCREAMING_SNAKE_CASE )
elif "subsample" in key:
_UpperCamelCase =s_dict.pop(__SCREAMING_SNAKE_CASE )
def _a (__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_UpperCamelCase , _UpperCamelCase =emb.weight.shape
_UpperCamelCase =nn.Linear(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , bias=__SCREAMING_SNAKE_CASE )
_UpperCamelCase =emb.weight.data
return lin_layer
def _a (__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_UpperCamelCase =torch.load(__SCREAMING_SNAKE_CASE , map_location='''cpu''' )
_UpperCamelCase =mam_aaa['''args''']
_UpperCamelCase =mam_aaa['''model''']
_UpperCamelCase =state_dict['''decoder.output_projection.weight''']
remove_ignore_keys_(__SCREAMING_SNAKE_CASE )
rename_keys(__SCREAMING_SNAKE_CASE )
_UpperCamelCase =state_dict['''decoder.embed_tokens.weight'''].shape[0]
_UpperCamelCase =args.share_decoder_input_output_embed
_UpperCamelCase =[int(__SCREAMING_SNAKE_CASE ) for i in args.conv_kernel_sizes.split(''',''' )]
_UpperCamelCase =SpeechaTextConfig(
vocab_size=__SCREAMING_SNAKE_CASE , max_source_positions=args.max_source_positions , max_target_positions=args.max_target_positions , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function='''relu''' , num_conv_layers=len(__SCREAMING_SNAKE_CASE ) , conv_channels=args.conv_channels , conv_kernel_sizes=__SCREAMING_SNAKE_CASE , input_feat_per_channel=args.input_feat_per_channel , input_channels=args.input_channels , tie_word_embeddings=__SCREAMING_SNAKE_CASE , num_beams=5 , max_length=200 , use_cache=__SCREAMING_SNAKE_CASE , decoder_start_token_id=2 , early_stopping=__SCREAMING_SNAKE_CASE , )
_UpperCamelCase =SpeechaTextForConditionalGeneration(__SCREAMING_SNAKE_CASE )
_UpperCamelCase , _UpperCamelCase =model.model.load_state_dict(__SCREAMING_SNAKE_CASE , strict=__SCREAMING_SNAKE_CASE )
if len(__SCREAMING_SNAKE_CASE ) > 0 and not set(__SCREAMING_SNAKE_CASE ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
'''Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,'''
f''' but all the following weights are missing {missing}''' )
if tie_embeds:
_UpperCamelCase =make_linear_from_emb(model.model.decoder.embed_tokens )
else:
_UpperCamelCase =lm_head_weights
model.save_pretrained(__SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__lowerCamelCase : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--fairseq_path', type=str, help='Path to the fairseq model (.pt) file.')
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
__lowerCamelCase : int = parser.parse_args()
convert_fairseq_sat_checkpoint_to_tfms(args.fairseq_path, args.pytorch_dump_folder_path)
| 712 |
'''simple docstring'''
import inspect
import os
import torch
from transformers import AutoModel
from transformers.testing_utils import mockenv_context
from transformers.trainer_utils import set_seed
import accelerate
from accelerate.accelerator import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils.testing import (
AccelerateTestCase,
TempDirTestCase,
execute_subprocess_async,
require_cuda,
require_fsdp,
require_multi_gpu,
slow,
)
from accelerate.utils.constants import (
FSDP_AUTO_WRAP_POLICY,
FSDP_BACKWARD_PREFETCH,
FSDP_SHARDING_STRATEGY,
FSDP_STATE_DICT_TYPE,
)
from accelerate.utils.dataclasses import FullyShardedDataParallelPlugin
from accelerate.utils.other import patch_environment
set_seed(42)
__lowerCamelCase : List[Any] = 'bert-base-cased'
__lowerCamelCase : str = 'fp16'
__lowerCamelCase : Optional[int] = 'bf16'
__lowerCamelCase : List[Any] = [FPaa, BFaa]
@require_fsdp
@require_cuda
class UpperCAmelCase ( lowercase_):
"""simple docstring"""
def UpperCamelCase__ ( self : Tuple ) -> Union[str, Any]:
super().setUp()
_UpperCamelCase =dict(
ACCELERATE_USE_FSDP='''true''' , MASTER_ADDR='''localhost''' , MASTER_PORT='''10999''' , RANK='''0''' , LOCAL_RANK='''0''' , WORLD_SIZE='''1''' , )
def UpperCamelCase__ ( self : int ) -> List[Any]:
from torch.distributed.fsdp.fully_sharded_data_parallel import ShardingStrategy
for i, strategy in enumerate(UpperCamelCase__ ):
_UpperCamelCase =self.dist_env.copy()
_UpperCamelCase =F'''{i + 1}'''
_UpperCamelCase =strategy
with mockenv_context(**UpperCamelCase__ ):
_UpperCamelCase =FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.sharding_strategy , ShardingStrategy(i + 1 ) )
def UpperCamelCase__ ( self : Union[str, Any] ) -> Any:
from torch.distributed.fsdp.fully_sharded_data_parallel import BackwardPrefetch
for i, prefetch_policy in enumerate(UpperCamelCase__ ):
_UpperCamelCase =self.dist_env.copy()
_UpperCamelCase =prefetch_policy
with mockenv_context(**UpperCamelCase__ ):
_UpperCamelCase =FullyShardedDataParallelPlugin()
if prefetch_policy == "NO_PREFETCH":
self.assertIsNone(fsdp_plugin.backward_prefetch )
else:
self.assertEqual(fsdp_plugin.backward_prefetch , BackwardPrefetch(i + 1 ) )
def UpperCamelCase__ ( self : Optional[Any] ) -> Tuple:
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
for i, state_dict_type in enumerate(UpperCamelCase__ ):
_UpperCamelCase =self.dist_env.copy()
_UpperCamelCase =state_dict_type
with mockenv_context(**UpperCamelCase__ ):
_UpperCamelCase =FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.state_dict_type , StateDictType(i + 1 ) )
if state_dict_type == "FULL_STATE_DICT":
self.assertTrue(fsdp_plugin.state_dict_config.offload_to_cpu )
self.assertTrue(fsdp_plugin.state_dict_config.ranka_only )
def UpperCamelCase__ ( self : Any ) -> List[Any]:
_UpperCamelCase =AutoModel.from_pretrained(UpperCamelCase__ )
for policy in FSDP_AUTO_WRAP_POLICY:
_UpperCamelCase =self.dist_env.copy()
_UpperCamelCase =policy
if policy == "TRANSFORMER_BASED_WRAP":
_UpperCamelCase ='''BertLayer'''
elif policy == "SIZE_BASED_WRAP":
_UpperCamelCase ='''2000'''
with mockenv_context(**UpperCamelCase__ ):
_UpperCamelCase =FullyShardedDataParallelPlugin()
fsdp_plugin.set_auto_wrap_policy(UpperCamelCase__ )
if policy == "NO_WRAP":
self.assertIsNone(fsdp_plugin.auto_wrap_policy )
else:
self.assertIsNotNone(fsdp_plugin.auto_wrap_policy )
_UpperCamelCase =self.dist_env.copy()
_UpperCamelCase ='''TRANSFORMER_BASED_WRAP'''
_UpperCamelCase ='''T5Layer'''
with mockenv_context(**UpperCamelCase__ ):
_UpperCamelCase =FullyShardedDataParallelPlugin()
with self.assertRaises(UpperCamelCase__ ) as cm:
fsdp_plugin.set_auto_wrap_policy(UpperCamelCase__ )
self.assertTrue('''Could not find the transformer layer class to wrap in the model.''' in str(cm.exception ) )
_UpperCamelCase =self.dist_env.copy()
_UpperCamelCase ='''SIZE_BASED_WRAP'''
_UpperCamelCase ='''0'''
with mockenv_context(**UpperCamelCase__ ):
_UpperCamelCase =FullyShardedDataParallelPlugin()
fsdp_plugin.set_auto_wrap_policy(UpperCamelCase__ )
self.assertIsNone(fsdp_plugin.auto_wrap_policy )
def UpperCamelCase__ ( self : Any ) -> List[Any]:
from torch.distributed.fsdp.fully_sharded_data_parallel import MixedPrecision
from torch.distributed.fsdp.sharded_grad_scaler import ShardedGradScaler
for mp_dtype in dtypes:
_UpperCamelCase =self.dist_env.copy()
_UpperCamelCase =mp_dtype
with mockenv_context(**UpperCamelCase__ ):
_UpperCamelCase =Accelerator()
if mp_dtype == "fp16":
_UpperCamelCase =torch.floataa
elif mp_dtype == "bf16":
_UpperCamelCase =torch.bfloataa
_UpperCamelCase =MixedPrecision(param_dtype=UpperCamelCase__ , reduce_dtype=UpperCamelCase__ , buffer_dtype=UpperCamelCase__ )
self.assertEqual(accelerator.state.fsdp_plugin.mixed_precision_policy , UpperCamelCase__ )
if mp_dtype == FPaa:
self.assertTrue(isinstance(accelerator.scaler , UpperCamelCase__ ) )
elif mp_dtype == BFaa:
self.assertIsNone(accelerator.scaler )
AcceleratorState._reset_state(UpperCamelCase__ )
def UpperCamelCase__ ( self : Dict ) -> Tuple:
from torch.distributed.fsdp.fully_sharded_data_parallel import CPUOffload
for flag in [True, False]:
_UpperCamelCase =self.dist_env.copy()
_UpperCamelCase =str(UpperCamelCase__ ).lower()
with mockenv_context(**UpperCamelCase__ ):
_UpperCamelCase =FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.cpu_offload , CPUOffload(offload_params=UpperCamelCase__ ) )
@require_fsdp
@require_multi_gpu
@slow
class UpperCAmelCase ( lowercase_):
"""simple docstring"""
def UpperCamelCase__ ( self : Union[str, Any] ) -> List[str]:
super().setUp()
_UpperCamelCase =0.82
_UpperCamelCase =[
'''fsdp_shard_grad_op_transformer_based_wrap''',
'''fsdp_full_shard_transformer_based_wrap''',
]
_UpperCamelCase ={
'''multi_gpu_fp16''': 3200,
'''fsdp_shard_grad_op_transformer_based_wrap_fp16''': 2000,
'''fsdp_full_shard_transformer_based_wrap_fp16''': 1900,
# Disabling below test as it overwhelms the RAM memory usage
# on CI self-hosted runner leading to tests getting killed.
# "fsdp_full_shard_cpu_offload_transformer_based_wrap_fp32": 1500, # fp16 was leading to indefinite hang
}
_UpperCamelCase =160
_UpperCamelCase =160
_UpperCamelCase =inspect.getfile(accelerate.test_utils )
_UpperCamelCase =os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''external_deps'''] )
def UpperCamelCase__ ( self : int ) -> str:
_UpperCamelCase =os.path.join(self.test_scripts_folder , '''test_performance.py''' )
_UpperCamelCase =['''accelerate''', '''launch''', '''--num_processes=2''', '''--num_machines=1''', '''--machine_rank=0''', '''--use_fsdp''']
for config in self.performance_configs:
_UpperCamelCase =cmd.copy()
for i, strategy in enumerate(UpperCamelCase__ ):
if strategy.lower() in config:
cmd_config.append(F'''--fsdp_sharding_strategy={i+1}''' )
break
if "fp32" in config:
cmd_config.append('''--mixed_precision=no''' )
else:
cmd_config.append('''--mixed_precision=fp16''' )
if "cpu_offload" in config:
cmd_config.append('''--fsdp_offload_params=True''' )
for policy in FSDP_AUTO_WRAP_POLICY:
if policy.lower() in config:
cmd_config.append(F'''--fsdp_auto_wrap_policy={policy}''' )
break
if policy == "TRANSFORMER_BASED_WRAP":
cmd_config.append('''--fsdp_transformer_layer_cls_to_wrap=BertLayer''' )
elif policy == "SIZE_BASED_WRAP":
cmd_config.append('''--fsdp_min_num_params=2000''' )
cmd_config.extend(
[
self.test_file_path,
F'''--output_dir={self.tmpdir}''',
F'''--performance_lower_bound={self.performance_lower_bound}''',
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(UpperCamelCase__ , env=os.environ.copy() )
def UpperCamelCase__ ( self : Optional[Any] ) -> Any:
_UpperCamelCase =os.path.join(self.test_scripts_folder , '''test_checkpointing.py''' )
_UpperCamelCase =[
'''accelerate''',
'''launch''',
'''--num_processes=2''',
'''--num_machines=1''',
'''--machine_rank=0''',
'''--use_fsdp''',
'''--mixed_precision=fp16''',
'''--fsdp_transformer_layer_cls_to_wrap=BertLayer''',
]
for i, strategy in enumerate(UpperCamelCase__ ):
_UpperCamelCase =cmd.copy()
cmd_config.append(F'''--fsdp_sharding_strategy={i+1}''' )
if strategy != "FULL_SHARD":
continue
_UpperCamelCase =len(UpperCamelCase__ )
for state_dict_type in FSDP_STATE_DICT_TYPE:
_UpperCamelCase =cmd_config[:state_dict_config_index]
cmd_config.append(F'''--fsdp_state_dict_type={state_dict_type}''' )
cmd_config.extend(
[
self.test_file_path,
F'''--output_dir={self.tmpdir}''',
'''--partial_train_epoch=1''',
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(UpperCamelCase__ , env=os.environ.copy() )
_UpperCamelCase =cmd_config[:-1]
_UpperCamelCase =os.path.join(self.tmpdir , '''epoch_0''' )
cmd_config.extend(
[
F'''--resume_from_checkpoint={resume_from_checkpoint}''',
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(UpperCamelCase__ , env=os.environ.copy() )
def UpperCamelCase__ ( self : Dict ) -> int:
_UpperCamelCase =os.path.join(self.test_scripts_folder , '''test_peak_memory_usage.py''' )
_UpperCamelCase =[
'''accelerate''',
'''launch''',
'''--num_processes=2''',
'''--num_machines=1''',
'''--machine_rank=0''',
]
for spec, peak_mem_upper_bound in self.peak_memory_usage_upper_bound.items():
_UpperCamelCase =cmd.copy()
if "fp16" in spec:
cmd_config.extend(['''--mixed_precision=fp16'''] )
else:
cmd_config.extend(['''--mixed_precision=no'''] )
if "multi_gpu" in spec:
continue
else:
cmd_config.extend(['''--use_fsdp'''] )
for i, strategy in enumerate(UpperCamelCase__ ):
if strategy.lower() in spec:
cmd_config.append(F'''--fsdp_sharding_strategy={i+1}''' )
break
if "cpu_offload" in spec:
cmd_config.append('''--fsdp_offload_params=True''' )
for policy in FSDP_AUTO_WRAP_POLICY:
if policy.lower() in spec:
cmd_config.append(F'''--fsdp_auto_wrap_policy={policy}''' )
break
if policy == "TRANSFORMER_BASED_WRAP":
cmd_config.append('''--fsdp_transformer_layer_cls_to_wrap=BertLayer''' )
elif policy == "SIZE_BASED_WRAP":
cmd_config.append('''--fsdp_min_num_params=2000''' )
cmd_config.extend(
[
self.test_file_path,
F'''--output_dir={self.tmpdir}''',
F'''--peak_memory_upper_bound={peak_mem_upper_bound}''',
F'''--n_train={self.n_train}''',
F'''--n_val={self.n_val}''',
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(UpperCamelCase__ , env=os.environ.copy() )
| 271 | 0 |
from __future__ import annotations
import math
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(SCREAMING_SNAKE_CASE ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
UpperCamelCase = [num for num in range(3, 100_001, 2) if not is_prime(num)]
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> list[int]:
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
raise ValueError('n must be an integer' )
if n <= 0:
raise ValueError('n must be >= 0' )
_lowercase : int = []
for num in range(len(SCREAMING_SNAKE_CASE ) ):
_lowercase : int = 0
while 2 * i * i <= odd_composites[num]:
_lowercase : Union[str, Any] = odd_composites[num] - 2 * i * i
if is_prime(SCREAMING_SNAKE_CASE ):
break
i += 1
else:
list_nums.append(odd_composites[num] )
if len(SCREAMING_SNAKE_CASE ) == n:
return list_nums
return []
def __magic_name__ ( ) -> int:
return compute_nums(1 )[0]
if __name__ == "__main__":
print(f'''{solution() = }''')
| 66 |
"""simple docstring"""
from sklearn.metrics import matthews_corrcoef
import datasets
__UpperCAmelCase = '''
Compute the Matthews correlation coefficient (MCC)
The Matthews correlation coefficient is used in machine learning as a
measure of the quality of binary and multiclass classifications. It takes
into account true and false positives and negatives and is generally
regarded as a balanced measure which can be used even if the classes are of
very different sizes. The MCC is in essence a correlation coefficient value
between -1 and +1. A coefficient of +1 represents a perfect prediction, 0
an average random prediction and -1 an inverse prediction. The statistic
is also known as the phi coefficient. [source: Wikipedia]
'''
__UpperCAmelCase = '''
Args:
predictions (list of int): Predicted labels, as returned by a model.
references (list of int): Ground truth labels.
sample_weight (list of int, float, or bool): Sample weights. Defaults to `None`.
Returns:
matthews_correlation (dict containing float): Matthews correlation.
Examples:
Example 1, a basic example with only predictions and references as inputs:
>>> matthews_metric = datasets.load_metric("matthews_correlation")
>>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],
... predictions=[1, 2, 2, 0, 3, 3])
>>> print(round(results[\'matthews_correlation\'], 2))
0.54
Example 2, the same example as above, but also including sample weights:
>>> matthews_metric = datasets.load_metric("matthews_correlation")
>>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],
... predictions=[1, 2, 2, 0, 3, 3],
... sample_weight=[0.5, 3, 1, 1, 1, 2])
>>> print(round(results[\'matthews_correlation\'], 2))
0.1
Example 3, the same example as above, but with sample weights that cause a negative correlation:
>>> matthews_metric = datasets.load_metric("matthews_correlation")
>>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],
... predictions=[1, 2, 2, 0, 3, 3],
... sample_weight=[0.5, 1, 0, 0, 0, 1])
>>> print(round(results[\'matthews_correlation\'], 2))
-0.25
'''
__UpperCAmelCase = '''\
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __UpperCAmelCase ( datasets.Metric ):
def UpperCAmelCase ( self : Optional[int] ) -> str:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("int32" ),
"references": datasets.Value("int32" ),
} ) , reference_urls=[
"https://scikit-learn.org/stable/modules/generated/sklearn.metrics.matthews_corrcoef.html"
] , )
def UpperCAmelCase ( self : Tuple , a_ : Optional[Any] , a_ : Optional[Any] , a_ : Any=None ) -> Union[str, Any]:
'''simple docstring'''
return {
"matthews_correlation": float(matthews_corrcoef(a_ , a_ , sample_weight=a_ ) ),
} | 642 | 0 |
'''simple docstring'''
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
UpperCamelCase__ : List[str] = logging.get_logger(__name__)
UpperCamelCase__ : Dict = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt'}
# See all LED models at https://huggingface.co/models?filter=LED
UpperCamelCase__ : Dict = {
'vocab_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json',
},
'merges_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt',
},
'tokenizer_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json',
},
}
UpperCamelCase__ : Any = {
'allenai/led-base-16384': 16_384,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def __UpperCamelCase( ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = (
list(range(ord('''!''' ) , ord('''~''' ) + 1 ) ) + list(range(ord('''¡''' ) , ord('''¬''' ) + 1 ) ) + list(range(ord('''®''' ) , ord('''ÿ''' ) + 1 ) )
)
UpperCAmelCase__ : Dict = bs[:]
UpperCAmelCase__ : Tuple = 0
for b in range(2**8 ):
if b not in bs:
bs.append(_A )
cs.append(2**8 + n )
n += 1
UpperCAmelCase__ : Optional[int] = [chr(_A ) for n in cs]
return dict(zip(_A , _A ) )
def __UpperCamelCase( _A : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = set()
UpperCAmelCase__ : Optional[int] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
UpperCAmelCase__ : int = char
return pairs
class _lowercase ( lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase_ : int = VOCAB_FILES_NAMES
UpperCAmelCase_ : Dict = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase_ : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase_ : int = ['''input_ids''', '''attention_mask''']
def __init__( self ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_="replace" ,lowerCamelCase_="<s>" ,lowerCamelCase_="</s>" ,lowerCamelCase_="</s>" ,lowerCamelCase_="<s>" ,lowerCamelCase_="<unk>" ,lowerCamelCase_="<pad>" ,lowerCamelCase_="<mask>" ,lowerCamelCase_=False ,**lowerCamelCase_ ,) -> List[Any]:
'''simple docstring'''
UpperCAmelCase__ : Tuple = AddedToken(lowerCamelCase_ ,lstrip=lowerCamelCase_ ,rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ ,lowerCamelCase_ ) else bos_token
UpperCAmelCase__ : List[str] = AddedToken(lowerCamelCase_ ,lstrip=lowerCamelCase_ ,rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ ,lowerCamelCase_ ) else eos_token
UpperCAmelCase__ : Any = AddedToken(lowerCamelCase_ ,lstrip=lowerCamelCase_ ,rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ ,lowerCamelCase_ ) else sep_token
UpperCAmelCase__ : Union[str, Any] = AddedToken(lowerCamelCase_ ,lstrip=lowerCamelCase_ ,rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ ,lowerCamelCase_ ) else cls_token
UpperCAmelCase__ : Union[str, Any] = AddedToken(lowerCamelCase_ ,lstrip=lowerCamelCase_ ,rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ ,lowerCamelCase_ ) else unk_token
UpperCAmelCase__ : List[Any] = AddedToken(lowerCamelCase_ ,lstrip=lowerCamelCase_ ,rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ ,lowerCamelCase_ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
UpperCAmelCase__ : Union[str, Any] = AddedToken(lowerCamelCase_ ,lstrip=lowerCamelCase_ ,rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ ,lowerCamelCase_ ) else mask_token
super().__init__(
errors=lowerCamelCase_ ,bos_token=lowerCamelCase_ ,eos_token=lowerCamelCase_ ,unk_token=lowerCamelCase_ ,sep_token=lowerCamelCase_ ,cls_token=lowerCamelCase_ ,pad_token=lowerCamelCase_ ,mask_token=lowerCamelCase_ ,add_prefix_space=lowerCamelCase_ ,**lowerCamelCase_ ,)
with open(lowerCamelCase_ ,encoding='''utf-8''' ) as vocab_handle:
UpperCAmelCase__ : List[str] = json.load(lowerCamelCase_ )
UpperCAmelCase__ : Union[str, Any] = {v: k for k, v in self.encoder.items()}
UpperCAmelCase__ : Any = errors # how to handle errors in decoding
UpperCAmelCase__ : Optional[int] = bytes_to_unicode()
UpperCAmelCase__ : List[Any] = {v: k for k, v in self.byte_encoder.items()}
with open(lowerCamelCase_ ,encoding='''utf-8''' ) as merges_handle:
UpperCAmelCase__ : Any = merges_handle.read().split('''\n''' )[1:-1]
UpperCAmelCase__ : List[Any] = [tuple(merge.split() ) for merge in bpe_merges]
UpperCAmelCase__ : Any = dict(zip(lowerCamelCase_ ,range(len(lowerCamelCase_ ) ) ) )
UpperCAmelCase__ : Union[str, Any] = {}
UpperCAmelCase__ : int = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
UpperCAmelCase__ : Optional[Any] = re.compile(r'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''' )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def lowerCAmelCase__ ( self ) -> Any:
'''simple docstring'''
return len(self.encoder )
def lowerCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
return dict(self.encoder ,**self.added_tokens_encoder )
def lowerCAmelCase__ ( self ,lowerCamelCase_ ) -> str:
'''simple docstring'''
if token in self.cache:
return self.cache[token]
UpperCAmelCase__ : Any = tuple(lowerCamelCase_ )
UpperCAmelCase__ : Optional[Any] = get_pairs(lowerCamelCase_ )
if not pairs:
return token
while True:
UpperCAmelCase__ : List[Any] = min(lowerCamelCase_ ,key=lambda lowerCamelCase_ : self.bpe_ranks.get(lowerCamelCase_ ,float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
UpperCAmelCase__ , UpperCAmelCase__ : Any = bigram
UpperCAmelCase__ : Union[str, Any] = []
UpperCAmelCase__ : str = 0
while i < len(lowerCamelCase_ ):
try:
UpperCAmelCase__ : Optional[Any] = word.index(lowerCamelCase_ ,lowerCamelCase_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
UpperCAmelCase__ : Any = j
if word[i] == first and i < len(lowerCamelCase_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
UpperCAmelCase__ : List[Any] = tuple(lowerCamelCase_ )
UpperCAmelCase__ : Dict = new_word
if len(lowerCamelCase_ ) == 1:
break
else:
UpperCAmelCase__ : Union[str, Any] = get_pairs(lowerCamelCase_ )
UpperCAmelCase__ : Optional[Any] = ''' '''.join(lowerCamelCase_ )
UpperCAmelCase__ : Optional[Any] = word
return word
def lowerCAmelCase__ ( self ,lowerCamelCase_ ) -> Tuple:
'''simple docstring'''
UpperCAmelCase__ : List[Any] = []
for token in re.findall(self.pat ,lowerCamelCase_ ):
UpperCAmelCase__ : Dict = ''''''.join(
self.byte_encoder[b] for b in token.encode('''utf-8''' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowerCamelCase_ ).split(''' ''' ) )
return bpe_tokens
def lowerCAmelCase__ ( self ,lowerCamelCase_ ) -> Optional[Any]:
'''simple docstring'''
return self.encoder.get(lowerCamelCase_ ,self.encoder.get(self.unk_token ) )
def lowerCAmelCase__ ( self ,lowerCamelCase_ ) -> str:
'''simple docstring'''
return self.decoder.get(lowerCamelCase_ )
def lowerCAmelCase__ ( self ,lowerCamelCase_ ) -> Tuple:
'''simple docstring'''
UpperCAmelCase__ : List[Any] = ''''''.join(lowerCamelCase_ )
UpperCAmelCase__ : Any = bytearray([self.byte_decoder[c] for c in text] ).decode('''utf-8''' ,errors=self.errors )
return text
def lowerCAmelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(lowerCamelCase_ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
UpperCAmelCase__ : Optional[int] = os.path.join(
lowerCamelCase_ ,(filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
UpperCAmelCase__ : Optional[Any] = os.path.join(
lowerCamelCase_ ,(filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(lowerCamelCase_ ,'''w''' ,encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder ,indent=2 ,sort_keys=lowerCamelCase_ ,ensure_ascii=lowerCamelCase_ ) + '''\n''' )
UpperCAmelCase__ : str = 0
with open(lowerCamelCase_ ,'''w''' ,encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() ,key=lambda lowerCamelCase_ : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
''' Please check that the tokenizer is not corrupted!''' )
UpperCAmelCase__ : str = token_index
writer.write(''' '''.join(lowerCamelCase_ ) + '''\n''' )
index += 1
return vocab_file, merge_file
def lowerCAmelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCAmelCase__ : Optional[Any] = [self.cls_token_id]
UpperCAmelCase__ : Any = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowerCAmelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ = None ,lowerCamelCase_ = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase_ ,token_ids_a=lowerCamelCase_ ,already_has_special_tokens=lowerCamelCase_ )
if token_ids_a is None:
return [1] + ([0] * len(lowerCamelCase_ )) + [1]
return [1] + ([0] * len(lowerCamelCase_ )) + [1, 1] + ([0] * len(lowerCamelCase_ )) + [1]
def lowerCAmelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ = None ) -> List[int]:
'''simple docstring'''
UpperCAmelCase__ : Dict = [self.sep_token_id]
UpperCAmelCase__ : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCAmelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_=False ,**lowerCamelCase_ ) -> Any:
'''simple docstring'''
UpperCAmelCase__ : int = kwargs.pop('''add_prefix_space''' ,self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(lowerCamelCase_ ) > 0 and not text[0].isspace()):
UpperCAmelCase__ : Any = ''' ''' + text
return (text, kwargs)
def lowerCAmelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ = None ,lowerCamelCase_ = PaddingStrategy.DO_NOT_PAD ,lowerCamelCase_ = None ,lowerCamelCase_ = None ,) -> dict:
'''simple docstring'''
UpperCAmelCase__ : str = super()._pad(
encoded_inputs=lowerCamelCase_ ,max_length=lowerCamelCase_ ,padding_strategy=lowerCamelCase_ ,pad_to_multiple_of=lowerCamelCase_ ,return_attention_mask=lowerCamelCase_ ,)
# Load from model defaults
if return_attention_mask is None:
UpperCAmelCase__ : Dict = '''attention_mask''' in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
UpperCAmelCase__ : Any = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
UpperCAmelCase__ : List[str] = len(encoded_inputs['''global_attention_mask'''] ) != len(lowerCamelCase_ )
if needs_to_be_padded:
UpperCAmelCase__ : List[str] = len(lowerCamelCase_ ) - len(encoded_inputs['''global_attention_mask'''] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
UpperCAmelCase__ : Union[str, Any] = (
encoded_inputs['''global_attention_mask'''] + [-1] * difference
)
elif self.padding_side == "left":
UpperCAmelCase__ : Optional[int] = [-1] * difference + encoded_inputs[
'''global_attention_mask'''
]
else:
raise ValueError('''Invalid padding strategy:''' + str(self.padding_side ) )
return encoded_inputs
| 496 | '''simple docstring'''
import argparse
import json
from collections import OrderedDict
import torch
from huggingface_hub import cached_download, hf_hub_url
from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification
def __UpperCamelCase( _A : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = []
embed.append(
(
F'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight''',
F'''stage{idx}.patch_embed.proj.weight''',
) )
embed.append(
(
F'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias''',
F'''stage{idx}.patch_embed.proj.bias''',
) )
embed.append(
(
F'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight''',
F'''stage{idx}.patch_embed.norm.weight''',
) )
embed.append(
(
F'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias''',
F'''stage{idx}.patch_embed.norm.bias''',
) )
return embed
def __UpperCamelCase( _A : Union[str, Any] , _A : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : int = []
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight''',
F'''stage{idx}.blocks.{cnt}.attn.proj_q.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias''',
F'''stage{idx}.blocks.{cnt}.attn.proj_q.bias''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight''',
F'''stage{idx}.blocks.{cnt}.attn.proj_k.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias''',
F'''stage{idx}.blocks.{cnt}.attn.proj_k.bias''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight''',
F'''stage{idx}.blocks.{cnt}.attn.proj_v.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias''',
F'''stage{idx}.blocks.{cnt}.attn.proj_v.bias''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight''',
F'''stage{idx}.blocks.{cnt}.attn.proj.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias''',
F'''stage{idx}.blocks.{cnt}.attn.proj.bias''',
) )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight''', F'''stage{idx}.blocks.{cnt}.mlp.fc1.weight''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias''', F'''stage{idx}.blocks.{cnt}.mlp.fc1.bias''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight''', F'''stage{idx}.blocks.{cnt}.mlp.fc2.weight''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias''', F'''stage{idx}.blocks.{cnt}.mlp.fc2.bias''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight''', F'''stage{idx}.blocks.{cnt}.norm1.weight''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias''', F'''stage{idx}.blocks.{cnt}.norm1.bias''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight''', F'''stage{idx}.blocks.{cnt}.norm2.weight''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias''', F'''stage{idx}.blocks.{cnt}.norm2.bias''') )
return attention_weights
def __UpperCamelCase( _A : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = []
token.append((F'''cvt.encoder.stages.{idx}.cls_token''', '''stage2.cls_token''') )
return token
def __UpperCamelCase( ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = []
head.append(('''layernorm.weight''', '''norm.weight''') )
head.append(('''layernorm.bias''', '''norm.bias''') )
head.append(('''classifier.weight''', '''head.weight''') )
head.append(('''classifier.bias''', '''head.bias''') )
return head
def __UpperCamelCase( _A : List[Any] , _A : Dict , _A : str , _A : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = '''imagenet-1k-id2label.json'''
UpperCAmelCase__ : str = 10_00
UpperCAmelCase__ : str = '''huggingface/label-files'''
UpperCAmelCase__ : List[Any] = num_labels
UpperCAmelCase__ : Tuple = json.load(open(cached_download(hf_hub_url(_A , _A , repo_type='''dataset''' ) ) , '''r''' ) )
UpperCAmelCase__ : Any = {int(_A ): v for k, v in idalabel.items()}
UpperCAmelCase__ : List[str] = idalabel
UpperCAmelCase__ : Any = {v: k for k, v in idalabel.items()}
UpperCAmelCase__ : int = CvtConfig(num_labels=_A , idalabel=_A , labelaid=_A )
# For depth size 13 (13 = 1+2+10)
if cvt_model.rsplit('''/''' , 1 )[-1][4:6] == "13":
UpperCAmelCase__ : Tuple = [1, 2, 10]
# For depth size 21 (21 = 1+4+16)
elif cvt_model.rsplit('''/''' , 1 )[-1][4:6] == "21":
UpperCAmelCase__ : List[str] = [1, 4, 16]
# For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20)
else:
UpperCAmelCase__ : Optional[int] = [2, 2, 20]
UpperCAmelCase__ : str = [3, 12, 16]
UpperCAmelCase__ : Union[str, Any] = [1_92, 7_68, 10_24]
UpperCAmelCase__ : Optional[int] = CvtForImageClassification(_A )
UpperCAmelCase__ : Any = AutoImageProcessor.from_pretrained('''facebook/convnext-base-224-22k-1k''' )
UpperCAmelCase__ : Dict = image_size
UpperCAmelCase__ : Union[str, Any] = torch.load(_A , map_location=torch.device('''cpu''' ) )
UpperCAmelCase__ : Union[str, Any] = OrderedDict()
UpperCAmelCase__ : str = []
for idx in range(len(config.depth ) ):
if config.cls_token[idx]:
UpperCAmelCase__ : Optional[int] = list_of_state_dict + cls_token(_A )
UpperCAmelCase__ : Union[str, Any] = list_of_state_dict + embeddings(_A )
for cnt in range(config.depth[idx] ):
UpperCAmelCase__ : str = list_of_state_dict + attention(_A , _A )
UpperCAmelCase__ : int = list_of_state_dict + final()
for gg in list_of_state_dict:
print(_A )
for i in range(len(_A ) ):
UpperCAmelCase__ : List[Any] = original_weights[list_of_state_dict[i][1]]
model.load_state_dict(_A )
model.save_pretrained(_A )
image_processor.save_pretrained(_A )
# Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al
if __name__ == "__main__":
UpperCamelCase__ : Tuple = argparse.ArgumentParser()
parser.add_argument(
'--cvt_model',
default='cvt-w24',
type=str,
help='Name of the cvt model you\'d like to convert.',
)
parser.add_argument(
'--image_size',
default=384,
type=int,
help='Input Image Size',
)
parser.add_argument(
'--cvt_file_name',
default=r'cvtmodels\CvT-w24-384x384-IN-22k.pth',
type=str,
help='Input Image Size',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
UpperCamelCase__ : Dict = parser.parse_args()
convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
| 496 | 1 |
import os
import unittest
from tempfile import TemporaryDirectory
import torch
import torch.nn as nn
from accelerate.utils import (
OffloadedWeightsLoader,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
)
class _UpperCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self : Optional[Any] ):
'''simple docstring'''
super().__init__()
lowercase__ = nn.Linear(3, 4 )
lowercase__ = nn.BatchNormad(4 )
lowercase__ = nn.Linear(4, 5 )
def lowercase__ ( self : List[str], lowerCamelCase : List[Any] ):
'''simple docstring'''
return self.lineara(self.batchnorm(self.lineara(lowerCamelCase ) ) )
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowercase__ ( self : List[Any] ):
'''simple docstring'''
lowercase__ = ModelForTest()
with TemporaryDirectory() as tmp_dir:
offload_state_dict(lowerCamelCase, model.state_dict() )
lowercase__ = os.path.join(lowerCamelCase, '''index.json''' )
self.assertTrue(os.path.isfile(lowerCamelCase ) )
# TODO: add tests on what is inside the index
for key in ["linear1.weight", "linear1.bias", "linear2.weight", "linear2.bias"]:
lowercase__ = os.path.join(lowerCamelCase, F"""{key}.dat""" )
self.assertTrue(os.path.isfile(lowerCamelCase ) )
# TODO: add tests on the fact weights are properly loaded
def lowercase__ ( self : Tuple ):
'''simple docstring'''
lowercase__ = [torch.floataa, torch.floataa, torch.bfloataa]
for dtype in dtypes:
lowercase__ = torch.randn(2, 3, dtype=lowerCamelCase )
with TemporaryDirectory() as tmp_dir:
lowercase__ = offload_weight(lowerCamelCase, '''weight''', lowerCamelCase, {} )
lowercase__ = os.path.join(lowerCamelCase, '''weight.dat''' )
self.assertTrue(os.path.isfile(lowerCamelCase ) )
self.assertDictEqual(lowerCamelCase, {'''weight''': {'''shape''': [2, 3], '''dtype''': str(lowerCamelCase ).split('''.''' )[1]}} )
lowercase__ = load_offloaded_weight(lowerCamelCase, index['''weight'''] )
self.assertTrue(torch.equal(lowerCamelCase, lowerCamelCase ) )
def lowercase__ ( self : int ):
'''simple docstring'''
lowercase__ = ModelForTest()
lowercase__ = model.state_dict()
lowercase__ = {k: v for k, v in state_dict.items() if '''linear2''' not in k}
lowercase__ = {k: v for k, v in state_dict.items() if '''linear2''' in k}
with TemporaryDirectory() as tmp_dir:
offload_state_dict(lowerCamelCase, lowerCamelCase )
lowercase__ = OffloadedWeightsLoader(state_dict=lowerCamelCase, save_folder=lowerCamelCase )
# Every key is there with the right value
self.assertEqual(sorted(lowerCamelCase ), sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(lowerCamelCase, weight_map[key] ) )
lowercase__ = {k: v for k, v in state_dict.items() if '''weight''' in k}
lowercase__ = {k: v for k, v in state_dict.items() if '''weight''' not in k}
with TemporaryDirectory() as tmp_dir:
offload_state_dict(lowerCamelCase, lowerCamelCase )
lowercase__ = OffloadedWeightsLoader(state_dict=lowerCamelCase, save_folder=lowerCamelCase )
# Every key is there with the right value
self.assertEqual(sorted(lowerCamelCase ), sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(lowerCamelCase, weight_map[key] ) )
with TemporaryDirectory() as tmp_dir:
offload_state_dict(lowerCamelCase, lowerCamelCase )
# Duplicates are removed
lowercase__ = OffloadedWeightsLoader(state_dict=lowerCamelCase, save_folder=lowerCamelCase )
# Every key is there with the right value
self.assertEqual(sorted(lowerCamelCase ), sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(lowerCamelCase, weight_map[key] ) )
def lowercase__ ( self : Any ):
'''simple docstring'''
lowercase__ = {'''a.1''': 0, '''a.10''': 1, '''a.2''': 2}
lowercase__ = extract_submodules_state_dict(lowerCamelCase, ['''a.1''', '''a.2'''] )
self.assertDictEqual(lowerCamelCase, {'''a.1''': 0, '''a.2''': 2} )
lowercase__ = {'''a.1.a''': 0, '''a.10.a''': 1, '''a.2.a''': 2}
lowercase__ = extract_submodules_state_dict(lowerCamelCase, ['''a.1''', '''a.2'''] )
self.assertDictEqual(lowerCamelCase, {'''a.1.a''': 0, '''a.2.a''': 2} )
| 183 |
def a ( lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = []
for data in source_data:
for i, el in enumerate(lowerCamelCase_ ):
if len(lowerCamelCase_ ) < i + 1:
data_lists.append([] )
data_lists[i].append(float(lowerCamelCase_ ) )
return data_lists
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = []
for dlist, weight in zip(lowerCamelCase_ , lowerCamelCase_ ):
lowercase__ = min(lowerCamelCase_ )
lowercase__ = max(lowerCamelCase_ )
lowercase__ = []
# for weight 0 score is 1 - actual score
if weight == 0:
for item in dlist:
try:
score.append(1 - ((item - mind) / (maxd - mind)) )
except ZeroDivisionError:
score.append(1 )
elif weight == 1:
for item in dlist:
try:
score.append((item - mind) / (maxd - mind) )
except ZeroDivisionError:
score.append(0 )
# weight not 0 or 1
else:
lowercase__ = F"""Invalid weight of {weight:f} provided"""
raise ValueError(lowerCamelCase_ )
score_lists.append(lowerCamelCase_ )
return score_lists
def a ( lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = [0 for i in range(len(score_lists[0] ) )]
for slist in score_lists:
for j, ele in enumerate(lowerCamelCase_ ):
lowercase__ = final_scores[j] + ele
return final_scores
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = get_data(lowerCamelCase_ )
lowercase__ = calculate_each_score(lowerCamelCase_ , lowerCamelCase_ )
lowercase__ = generate_final_scores(lowerCamelCase_ )
# append scores to source data
for i, ele in enumerate(lowerCamelCase_ ):
source_data[i].append(lowerCamelCase_ )
return source_data
| 183 | 1 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_albert import AlbertTokenizer
else:
lowerCAmelCase__: List[str] = None
lowerCAmelCase__: List[Any] = logging.get_logger(__name__)
lowerCAmelCase__: str = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
lowerCAmelCase__: Dict = {
"vocab_file": {
"albert-base-v1": "https://huggingface.co/albert-base-v1/resolve/main/spiece.model",
"albert-large-v1": "https://huggingface.co/albert-large-v1/resolve/main/spiece.model",
"albert-xlarge-v1": "https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model",
"albert-xxlarge-v1": "https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model",
"albert-base-v2": "https://huggingface.co/albert-base-v2/resolve/main/spiece.model",
"albert-large-v2": "https://huggingface.co/albert-large-v2/resolve/main/spiece.model",
"albert-xlarge-v2": "https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model",
"albert-xxlarge-v2": "https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model",
},
"tokenizer_file": {
"albert-base-v1": "https://huggingface.co/albert-base-v1/resolve/main/tokenizer.json",
"albert-large-v1": "https://huggingface.co/albert-large-v1/resolve/main/tokenizer.json",
"albert-xlarge-v1": "https://huggingface.co/albert-xlarge-v1/resolve/main/tokenizer.json",
"albert-xxlarge-v1": "https://huggingface.co/albert-xxlarge-v1/resolve/main/tokenizer.json",
"albert-base-v2": "https://huggingface.co/albert-base-v2/resolve/main/tokenizer.json",
"albert-large-v2": "https://huggingface.co/albert-large-v2/resolve/main/tokenizer.json",
"albert-xlarge-v2": "https://huggingface.co/albert-xlarge-v2/resolve/main/tokenizer.json",
"albert-xxlarge-v2": "https://huggingface.co/albert-xxlarge-v2/resolve/main/tokenizer.json",
},
}
lowerCAmelCase__: List[str] = {
"albert-base-v1": 512,
"albert-large-v1": 512,
"albert-xlarge-v1": 512,
"albert-xxlarge-v1": 512,
"albert-base-v2": 512,
"albert-large-v2": 512,
"albert-xlarge-v2": 512,
"albert-xxlarge-v2": 512,
}
lowerCAmelCase__: Dict = "▁"
class snake_case_ ( lowerCAmelCase ):
__lowerCamelCase : Optional[int] = VOCAB_FILES_NAMES
__lowerCamelCase : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase : Optional[int] = AlbertTokenizer
def __init__( self , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=False , __lowerCAmelCase="[CLS]" , __lowerCAmelCase="[SEP]" , __lowerCAmelCase="<unk>" , __lowerCAmelCase="[SEP]" , __lowerCAmelCase="<pad>" , __lowerCAmelCase="[CLS]" , __lowerCAmelCase="[MASK]" , **__lowerCAmelCase , ):
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
SCREAMING_SNAKE_CASE_ : List[str] = (
AddedToken(__lowerCAmelCase , lstrip=__lowerCAmelCase , rstrip=__lowerCAmelCase , normalized=__lowerCAmelCase )
if isinstance(__lowerCAmelCase , __lowerCAmelCase )
else mask_token
)
super().__init__(
__lowerCAmelCase , tokenizer_file=__lowerCAmelCase , do_lower_case=__lowerCAmelCase , remove_space=__lowerCAmelCase , keep_accents=__lowerCAmelCase , bos_token=__lowerCAmelCase , eos_token=__lowerCAmelCase , unk_token=__lowerCAmelCase , sep_token=__lowerCAmelCase , pad_token=__lowerCAmelCase , cls_token=__lowerCAmelCase , mask_token=__lowerCAmelCase , **__lowerCAmelCase , )
SCREAMING_SNAKE_CASE_ : Dict = do_lower_case
SCREAMING_SNAKE_CASE_ : Optional[Any] = remove_space
SCREAMING_SNAKE_CASE_ : Dict = keep_accents
SCREAMING_SNAKE_CASE_ : int = vocab_file
SCREAMING_SNAKE_CASE_ : Union[str, Any] = False if not self.vocab_file else True
def __A ( self , __lowerCAmelCase , __lowerCAmelCase = None ):
SCREAMING_SNAKE_CASE_ : str = [self.sep_token_id]
SCREAMING_SNAKE_CASE_ : List[str] = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def __A ( self , __lowerCAmelCase , __lowerCAmelCase = None ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [self.sep_token_id]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __A ( self , __lowerCAmelCase , __lowerCAmelCase = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(__lowerCAmelCase ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
SCREAMING_SNAKE_CASE_ : Tuple = os.path.join(
__lowerCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCAmelCase ):
copyfile(self.vocab_file , __lowerCAmelCase )
return (out_vocab_file,)
| 311 |
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> bool:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = len(SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Optional[Any] = [[False] * (required_sum + 1) for _ in range(arr_len + 1 )]
# for each arr value, a sum of zero(0) can be formed by not taking any element
# hence True/1
for i in range(arr_len + 1 ):
SCREAMING_SNAKE_CASE_ : str = True
# sum is not zero and set is empty then false
for i in range(1 , required_sum + 1 ):
SCREAMING_SNAKE_CASE_ : Dict = False
for i in range(1 , arr_len + 1 ):
for j in range(1 , required_sum + 1 ):
if arr[i - 1] > j:
SCREAMING_SNAKE_CASE_ : Optional[int] = subset[i - 1][j]
if arr[i - 1] <= j:
SCREAMING_SNAKE_CASE_ : int = subset[i - 1][j] or subset[i - 1][j - arr[i - 1]]
return subset[arr_len][required_sum]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 311 | 1 |
import argparse
import json
from collections import OrderedDict
import torch
from huggingface_hub import cached_download, hf_hub_url
from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification
def __UpperCAmelCase ( __a : Any ) -> Dict:
"""simple docstring"""
_a : Tuple = []
embed.append(
(
F"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight""",
F"""stage{idx}.patch_embed.proj.weight""",
) )
embed.append(
(
F"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias""",
F"""stage{idx}.patch_embed.proj.bias""",
) )
embed.append(
(
F"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight""",
F"""stage{idx}.patch_embed.norm.weight""",
) )
embed.append(
(
F"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias""",
F"""stage{idx}.patch_embed.norm.bias""",
) )
return embed
def __UpperCAmelCase ( __a : List[str] ,__a : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
_a : Union[str, Any] = []
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight""",
F"""stage{idx}.blocks.{cnt}.attn.proj_q.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias""",
F"""stage{idx}.blocks.{cnt}.attn.proj_q.bias""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight""",
F"""stage{idx}.blocks.{cnt}.attn.proj_k.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias""",
F"""stage{idx}.blocks.{cnt}.attn.proj_k.bias""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight""",
F"""stage{idx}.blocks.{cnt}.attn.proj_v.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias""",
F"""stage{idx}.blocks.{cnt}.attn.proj_v.bias""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight""",
F"""stage{idx}.blocks.{cnt}.attn.proj.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias""",
F"""stage{idx}.blocks.{cnt}.attn.proj.bias""",
) )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight""", F"""stage{idx}.blocks.{cnt}.mlp.fc1.weight""") )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias""", F"""stage{idx}.blocks.{cnt}.mlp.fc1.bias""") )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight""", F"""stage{idx}.blocks.{cnt}.mlp.fc2.weight""") )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias""", F"""stage{idx}.blocks.{cnt}.mlp.fc2.bias""") )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight""", F"""stage{idx}.blocks.{cnt}.norm1.weight""") )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias""", F"""stage{idx}.blocks.{cnt}.norm1.bias""") )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight""", F"""stage{idx}.blocks.{cnt}.norm2.weight""") )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias""", F"""stage{idx}.blocks.{cnt}.norm2.bias""") )
return attention_weights
def __UpperCAmelCase ( __a : Union[str, Any] ) -> str:
"""simple docstring"""
_a : Tuple = []
token.append((F"""cvt.encoder.stages.{idx}.cls_token""", '''stage2.cls_token''') )
return token
def __UpperCAmelCase ( ) -> Union[str, Any]:
"""simple docstring"""
_a : Any = []
head.append(('''layernorm.weight''', '''norm.weight''') )
head.append(('''layernorm.bias''', '''norm.bias''') )
head.append(('''classifier.weight''', '''head.weight''') )
head.append(('''classifier.bias''', '''head.bias''') )
return head
def __UpperCAmelCase ( __a : List[Any] ,__a : int ,__a : Any ,__a : List[str] ) -> Union[str, Any]:
"""simple docstring"""
_a : Union[str, Any] = '''imagenet-1k-id2label.json'''
_a : Union[str, Any] = 1_000
_a : str = '''huggingface/label-files'''
_a : Union[str, Any] = num_labels
_a : Optional[Any] = json.load(open(cached_download(hf_hub_url(__a ,__a ,repo_type='''dataset''' ) ) ,'''r''' ) )
_a : int = {int(__a ): v for k, v in idalabel.items()}
_a : Tuple = idalabel
_a : Union[str, Any] = {v: k for k, v in idalabel.items()}
_a : Any = CvtConfig(num_labels=__a ,idalabel=__a ,labelaid=__a )
# For depth size 13 (13 = 1+2+10)
if cvt_model.rsplit('''/''' ,1 )[-1][4:6] == "13":
_a : Optional[Any] = [1, 2, 10]
# For depth size 21 (21 = 1+4+16)
elif cvt_model.rsplit('''/''' ,1 )[-1][4:6] == "21":
_a : Any = [1, 4, 16]
# For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20)
else:
_a : Dict = [2, 2, 20]
_a : List[Any] = [3, 12, 16]
_a : List[str] = [192, 768, 1_024]
_a : List[str] = CvtForImageClassification(__a )
_a : str = AutoImageProcessor.from_pretrained('''facebook/convnext-base-224-22k-1k''' )
_a : List[Any] = image_size
_a : str = torch.load(__a ,map_location=torch.device('''cpu''' ) )
_a : List[Any] = OrderedDict()
_a : Tuple = []
for idx in range(len(config.depth ) ):
if config.cls_token[idx]:
_a : Any = list_of_state_dict + cls_token(__a )
_a : List[str] = list_of_state_dict + embeddings(__a )
for cnt in range(config.depth[idx] ):
_a : str = list_of_state_dict + attention(__a ,__a )
_a : Union[str, Any] = list_of_state_dict + final()
for gg in list_of_state_dict:
print(__a )
for i in range(len(__a ) ):
_a : Dict = original_weights[list_of_state_dict[i][1]]
model.load_state_dict(__a )
model.save_pretrained(__a )
image_processor.save_pretrained(__a )
# Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al
if __name__ == "__main__":
a__ = argparse.ArgumentParser()
parser.add_argument(
'''--cvt_model''',
default='''cvt-w24''',
type=str,
help='''Name of the cvt model you\'d like to convert.''',
)
parser.add_argument(
'''--image_size''',
default=384,
type=int,
help='''Input Image Size''',
)
parser.add_argument(
'''--cvt_file_name''',
default=R'''cvtmodels\CvT-w24-384x384-IN-22k.pth''',
type=str,
help='''Input Image Size''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
a__ = parser.parse_args()
convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
| 14 | '''simple docstring'''
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
__a = abspath(join(dirname(dirname(dirname(__file__))), 'src'))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='ignore', category=FutureWarning)
def __UpperCAmelCase ( a_: str ):
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(a_ )
def __UpperCAmelCase ( a_: str ):
from transformers.testing_utils import pytest_terminal_summary_main
_UpperCAmelCase : Any = terminalreporter.config.getoption("--make-reports" )
if make_reports:
pytest_terminal_summary_main(a_, id=a_ ) | 494 | 0 |
from random import randint, random
def __lowerCamelCase ( A__ : int , A__ : int , A__ : int , A__ : bool = False , A__ : bool = False , A__ : int = 5 , ) -> list:
lowerCamelCase_ : int = [[-1] * number_of_cells] # Create a highway without any car
lowerCamelCase_ : List[Any] = 0
lowerCamelCase_ : int = max(A__ , 0 )
while i < number_of_cells:
lowerCamelCase_ : Union[str, Any] = (
randint(0 , A__ ) if random_speed else initial_speed
) # Place the cars
i += (
randint(1 , max_speed * 2 ) if random_frequency else frequency
) # Arbitrary number, may need tuning
return highway
def __lowerCamelCase ( A__ : list , A__ : int ) -> int:
lowerCamelCase_ : Dict = 0
lowerCamelCase_ : Optional[Any] = highway_now[car_index + 1 :]
for cell in range(len(A__ ) ): # May need a better name for this
if cells[cell] != -1: # If the cell is not empty then
return distance # we have the distance we wanted
distance += 1
# Here if the car is near the end of the highway
return distance + get_distance(A__ , -1 )
def __lowerCamelCase ( A__ : list , A__ : float , A__ : int ) -> list:
lowerCamelCase_ : Dict = len(A__ )
# Beforce calculations, the highway is empty
lowerCamelCase_ : int = [-1] * number_of_cells
for car_index in range(A__ ):
if highway_now[car_index] != -1:
# Add 1 to the current speed of the car and cap the speed
lowerCamelCase_ : List[Any] = min(highway_now[car_index] + 1 , A__ )
# Number of empty cell before the next car
lowerCamelCase_ : Optional[Any] = get_distance(A__ , A__ ) - 1
# We can't have the car causing an accident
lowerCamelCase_ : Any = min(next_highway[car_index] , A__ )
if random() < probability:
# Randomly, a driver will slow down
lowerCamelCase_ : Tuple = max(next_highway[car_index] - 1 , 0 )
return next_highway
def __lowerCamelCase ( A__ : list , A__ : int , A__ : float , A__ : int ) -> list:
lowerCamelCase_ : List[Any] = len(highway[0] )
for i in range(A__ ):
lowerCamelCase_ : List[Any] = update(highway[i] , A__ , A__ )
lowerCamelCase_ : Optional[Any] = [-1] * number_of_cells
for car_index in range(A__ ):
lowerCamelCase_ : Union[str, Any] = next_speeds_calculated[car_index]
if speed != -1:
# Change the position based on the speed (with % to create the loop)
lowerCamelCase_ : Dict = (car_index + speed) % number_of_cells
# Commit the change of position
lowerCamelCase_ : Tuple = speed
highway.append(A__ )
return highway
if __name__ == "__main__":
import doctest
doctest.testmod()
| 721 |
class SCREAMING_SNAKE_CASE_ :
'''simple docstring'''
def __init__( self : int ) ->None:
lowerCamelCase_ : dict[str, TrieNode] = {} # Mapping from char to TrieNode
lowerCamelCase_ : str = False
def _lowerCAmelCase ( self : List[Any] , __a : list[str] ) ->None:
for word in words:
self.insert(__a )
def _lowerCAmelCase ( self : Tuple , __a : str ) ->None:
lowerCamelCase_ : int = self
for char in word:
if char not in curr.nodes:
lowerCamelCase_ : Optional[Any] = TrieNode()
lowerCamelCase_ : Union[str, Any] = curr.nodes[char]
lowerCamelCase_ : int = True
def _lowerCAmelCase ( self : Tuple , __a : str ) ->bool:
lowerCamelCase_ : Any = self
for char in word:
if char not in curr.nodes:
return False
lowerCamelCase_ : Any = curr.nodes[char]
return curr.is_leaf
def _lowerCAmelCase ( self : Tuple , __a : str ) ->None:
def _delete(__a : TrieNode , __a : str , __a : int ) -> bool:
if index == len(__a ):
# If word does not exist
if not curr.is_leaf:
return False
lowerCamelCase_ : Tuple = False
return len(curr.nodes ) == 0
lowerCamelCase_ : Dict = word[index]
lowerCamelCase_ : Dict = curr.nodes.get(__a )
# If char not in current trie node
if not char_node:
return False
# Flag to check if node can be deleted
lowerCamelCase_ : Any = _delete(__a , __a , index + 1 )
if delete_curr:
del curr.nodes[char]
return len(curr.nodes ) == 0
return delete_curr
_delete(self , __a , 0 )
def __lowerCamelCase ( A__ : TrieNode , A__ : str ) -> None:
if node.is_leaf:
print(A__ , end=""" """ )
for key, value in node.nodes.items():
print_words(A__ , word + key )
def __lowerCamelCase ( ) -> bool:
lowerCamelCase_ : List[Any] = """banana bananas bandana band apple all beast""".split()
lowerCamelCase_ : Optional[int] = TrieNode()
root.insert_many(A__ )
# print_words(root, "")
assert all(root.find(A__ ) for word in words )
assert root.find("""banana""" )
assert not root.find("""bandanas""" )
assert not root.find("""apps""" )
assert root.find("""apple""" )
assert root.find("""all""" )
root.delete("""all""" )
assert not root.find("""all""" )
root.delete("""banana""" )
assert not root.find("""banana""" )
assert root.find("""bananas""" )
return True
def __lowerCamelCase ( A__ : str , A__ : bool ) -> None:
print(str(A__ ) , """works!""" if passes else """doesn't work :(""" )
def __lowerCamelCase ( ) -> None:
assert test_trie()
def __lowerCamelCase ( ) -> None:
print_results("""Testing trie functionality""" , test_trie() )
if __name__ == "__main__":
main()
| 171 | 0 |
'''simple docstring'''
def UpperCAmelCase ( A : bytes ):
return "".join([hex(A )[2:].zfill(2 ).upper() for byte in list(A )] )
def UpperCAmelCase ( A : str ):
# Check data validity, following RFC3548
# https://www.ietf.org/rfc/rfc3548.txt
if (len(A ) % 2) != 0:
raise ValueError(
'''Base16 encoded data is invalid:
Data does not have an even number of hex digits.''' )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(A ) <= set('''0123456789ABCDEF''' ):
raise ValueError(
'''Base16 encoded data is invalid:
Data is not uppercase hex or it contains invalid characters.''' )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(A ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 527 |
'''simple docstring'''
from math import pow
def UpperCAmelCase ( A : int , A : int , A : int , A : int , A : int , ):
if current_sum == needed_sum:
# If the sum of the powers is equal to needed_sum, then we have a solution.
solutions_count += 1
return current_sum, solutions_count
SCREAMING_SNAKE_CASE : Tuple = int(pow(A , A ) )
if current_sum + i_to_n <= needed_sum:
# If the sum of the powers is less than needed_sum, then continue adding powers.
current_sum += i_to_n
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Dict = backtrack(
A , A , current_number + 1 , A , A )
current_sum -= i_to_n
if i_to_n < needed_sum:
# If the power of i is less than needed_sum, then try with the next power.
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : int = backtrack(
A , A , current_number + 1 , A , A )
return current_sum, solutions_count
def UpperCAmelCase ( A : int , A : int ):
if not (1 <= needed_sum <= 1000 and 2 <= power <= 10):
raise ValueError(
'''Invalid input\n'''
'''needed_sum must be between 1 and 1000, power between 2 and 10.''' )
return backtrack(A , A , 1 , 0 , 0 )[1] # Return the solutions_count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 527 | 1 |
'''simple docstring'''
from typing import Callable, Optional
from .. import Features
from ..packaged_modules.generator.generator import Generator
from .abc import AbstractDatasetInputStream
class A ( _a ):
def __init__( self : Any , lowerCAmelCase_ : Callable , lowerCAmelCase_ : Optional[Features] = None , lowerCAmelCase_ : str = None , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : Optional[dict] = None , lowerCAmelCase_ : Optional[int] = None , **lowerCAmelCase_ : Union[str, Any] , ) -> List[Any]:
"""simple docstring"""
super().__init__(
features=lowerCAmelCase_ , cache_dir=lowerCAmelCase_ , keep_in_memory=lowerCAmelCase_ , streaming=lowerCAmelCase_ , num_proc=lowerCAmelCase_ , **lowerCAmelCase_ , )
_a = Generator(
cache_dir=lowerCAmelCase_ , features=lowerCAmelCase_ , generator=lowerCAmelCase_ , gen_kwargs=lowerCAmelCase_ , **lowerCAmelCase_ , )
def __lowerCAmelCase ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
if self.streaming:
_a = self.builder.as_streaming_dataset(split='''train''' )
# Build regular (map-style) dataset
else:
_a = None
_a = None
_a = None
_a = None
self.builder.download_and_prepare(
download_config=lowerCAmelCase_ , download_mode=lowerCAmelCase_ , verification_mode=lowerCAmelCase_ , base_path=lowerCAmelCase_ , num_proc=self.num_proc , )
_a = self.builder.as_dataset(
split='''train''' , verification_mode=lowerCAmelCase_ , in_memory=self.keep_in_memory )
return dataset
| 377 |
'''simple docstring'''
import unittest
from transformers import PegasusTokenizer, PegasusTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_snake_case : Tuple = get_tests_dir('fixtures/test_sentencepiece_no_bos.model')
@require_sentencepiece
@require_tokenizers
class A ( _a ,unittest.TestCase ):
lowercase_ = PegasusTokenizer
lowercase_ = PegasusTokenizerFast
lowercase_ = True
lowercase_ = True
def __lowerCAmelCase ( self : str ) -> Any:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
_a = PegasusTokenizer(lowerCAmelCase_ )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def __lowerCAmelCase ( self : Any ) -> Optional[int]:
"""simple docstring"""
return PegasusTokenizer.from_pretrained('''google/pegasus-large''' )
def __lowerCAmelCase ( self : Optional[int] , **lowerCAmelCase_ : List[str] ) -> PegasusTokenizer:
"""simple docstring"""
return PegasusTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase_ )
def __lowerCAmelCase ( self : Optional[Any] , lowerCAmelCase_ : List[str] ) -> Any:
"""simple docstring"""
return ("This is a test", "This is a test")
def __lowerCAmelCase ( self : Optional[Any] ) -> Any:
"""simple docstring"""
_a = '''</s>'''
_a = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase_ ) , lowerCAmelCase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase_ ) , lowerCAmelCase_ )
def __lowerCAmelCase ( self : Tuple ) -> List[str]:
"""simple docstring"""
_a = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<pad>''' )
self.assertEqual(vocab_keys[1] , '''</s>''' )
self.assertEqual(vocab_keys[-1] , '''v''' )
self.assertEqual(len(lowerCAmelCase_ ) , 11_03 )
def __lowerCAmelCase ( self : List[Any] ) -> Any:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 11_03 )
def __lowerCAmelCase ( self : Tuple ) -> str:
"""simple docstring"""
_a = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
_a = self.tokenizer_class.from_pretrained(self.tmpdirname )
_a = (
'''Let\'s see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important'''
''' </s> <pad> <pad> <pad>'''
)
_a = rust_tokenizer([raw_input_str] , return_tensors=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ ).input_ids[0]
_a = py_tokenizer([raw_input_str] , return_tensors=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ ).input_ids[0]
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
def __lowerCAmelCase ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
_a = self._large_tokenizer
# <mask_1> masks whole sentence while <mask_2> masks single word
_a = '''<mask_1> To ensure a <mask_2> flow of bank resolutions.'''
_a = [2, 4_13, 6_15, 1_14, 3, 19_71, 1_13, 16_79, 1_07_10, 1_07, 1]
_a = tokenizer([raw_input_str] , return_tensors=lowerCAmelCase_ ).input_ids[0]
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
def __lowerCAmelCase ( self : Dict ) -> Dict:
"""simple docstring"""
_a = self._large_tokenizer
# The tracebacks for the following asserts are **better** without messages or self.assertEqual
assert tokenizer.vocab_size == 9_61_03
assert tokenizer.pad_token_id == 0
assert tokenizer.eos_token_id == 1
assert tokenizer.offset == 1_03
assert tokenizer.unk_token_id == tokenizer.offset + 2 == 1_05
assert tokenizer.unk_token == "<unk>"
assert tokenizer.model_max_length == 10_24
_a = '''To ensure a smooth flow of bank resolutions.'''
_a = [4_13, 6_15, 1_14, 22_91, 19_71, 1_13, 16_79, 1_07_10, 1_07, 1]
_a = tokenizer([raw_input_str] , return_tensors=lowerCAmelCase_ ).input_ids[0]
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"]
@require_torch
def __lowerCAmelCase ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
_a = ['''This is going to be way too long.''' * 1_50, '''short example''']
_a = ['''not super long but more than 5 tokens''', '''tiny''']
_a = self._large_tokenizer(lowerCAmelCase_ , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , return_tensors='''pt''' )
_a = self._large_tokenizer(
text_target=lowerCAmelCase_ , max_length=5 , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , return_tensors='''pt''' )
assert batch.input_ids.shape == (2, 10_24)
assert batch.attention_mask.shape == (2, 10_24)
assert targets["input_ids"].shape == (2, 5)
assert len(lowerCAmelCase_ ) == 2 # input_ids, attention_mask.
@slow
def __lowerCAmelCase ( self : List[Any] ) -> int:
"""simple docstring"""
_a = {'''input_ids''': [[3_89_79, 1_43, 1_84_85, 6_06, 1_30, 2_66_69, 8_76_86, 1_21, 5_41_89, 11_29, 1_11, 2_66_69, 8_76_86, 1_21, 91_14, 1_47_87, 1_21, 1_32_49, 1_58, 5_92, 9_56, 1_21, 1_46_21, 3_15_76, 1_43, 6_26_13, 1_08, 96_88, 9_30, 4_34_30, 1_15_62, 6_26_13, 3_04, 1_08, 1_14_43, 8_97, 1_08, 93_14, 1_74_15, 6_33_99, 1_08, 1_14_43, 76_14, 1_83_16, 1_18, 42_84, 71_48, 1_24_30, 1_43, 14_00, 2_57_03, 1_58, 1_11, 42_84, 71_48, 1_17_72, 1_43, 2_12_97, 10_64, 1_58, 1_22, 2_04, 35_06, 17_54, 11_33, 1_47_87, 15_81, 1_15, 3_32_24, 44_82, 1_11, 13_55, 1_10, 2_91_73, 3_17, 5_08_33, 1_08, 2_01_47, 9_46_65, 1_11, 7_71_98, 1_07, 1], [1_10, 6_26_13, 1_17, 6_38, 1_12, 11_33, 1_21, 2_00_98, 13_55, 7_90_50, 1_38_72, 1_35, 15_96, 5_35_41, 13_52, 1_41, 1_30_39, 55_42, 1_24, 3_02, 5_18, 1_11, 2_68, 29_56, 1_15, 1_49, 44_27, 1_07, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1_39, 12_35, 27_99, 1_82_89, 1_77_80, 2_04, 1_09, 94_74, 12_96, 1_07, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCAmelCase_ , model_name='''google/bigbird-pegasus-large-arxiv''' , revision='''ba85d0851d708441f91440d509690f1ab6353415''' , )
@require_sentencepiece
@require_tokenizers
class A ( _a ,unittest.TestCase ):
lowercase_ = PegasusTokenizer
lowercase_ = PegasusTokenizerFast
lowercase_ = True
lowercase_ = True
def __lowerCAmelCase ( self : Any ) -> Optional[Any]:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
_a = PegasusTokenizer(lowerCAmelCase_ , offset=0 , mask_token_sent=lowerCAmelCase_ , mask_token='''[MASK]''' )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def __lowerCAmelCase ( self : Dict ) -> int:
"""simple docstring"""
return PegasusTokenizer.from_pretrained('''google/bigbird-pegasus-large-arxiv''' )
def __lowerCAmelCase ( self : Optional[int] , **lowerCAmelCase_ : Tuple ) -> PegasusTokenizer:
"""simple docstring"""
return PegasusTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase_ )
def __lowerCAmelCase ( self : str , lowerCAmelCase_ : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
return ("This is a test", "This is a test")
def __lowerCAmelCase ( self : List[Any] ) -> str:
"""simple docstring"""
_a = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
_a = self.tokenizer_class.from_pretrained(self.tmpdirname )
_a = (
'''Let\'s see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>'''
''' <pad> <pad> <pad>'''
)
_a = rust_tokenizer([raw_input_str] , return_tensors=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ ).input_ids[0]
_a = py_tokenizer([raw_input_str] , return_tensors=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ ).input_ids[0]
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
@require_torch
def __lowerCAmelCase ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
_a = ['''This is going to be way too long.''' * 10_00, '''short example''']
_a = ['''not super long but more than 5 tokens''', '''tiny''']
_a = self._large_tokenizer(lowerCAmelCase_ , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , return_tensors='''pt''' )
_a = self._large_tokenizer(
text_target=lowerCAmelCase_ , max_length=5 , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , return_tensors='''pt''' )
assert batch.input_ids.shape == (2, 40_96)
assert batch.attention_mask.shape == (2, 40_96)
assert targets["input_ids"].shape == (2, 5)
assert len(lowerCAmelCase_ ) == 2 # input_ids, attention_mask.
def __lowerCAmelCase ( self : List[str] ) -> Any:
"""simple docstring"""
_a = (
'''This is an example string that is used to test the original TF implementation against the HF'''
''' implementation'''
)
_a = self._large_tokenizer(lowerCAmelCase_ ).input_ids
self.assertListEqual(
lowerCAmelCase_ , [1_82, 1_17, 1_42, 5_87, 42_11, 1_20, 1_17, 2_63, 1_12, 8_04, 1_09, 8_56, 2_50_16, 31_37, 4_64, 1_09, 2_69_55, 31_37, 1] , )
| 377 | 1 |
def A__ ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : int ) -> list[str]:
"""simple docstring"""
return [sentence[i : i + ngram_size] for i in range(len(SCREAMING_SNAKE_CASE_ ) - ngram_size + 1 )]
if __name__ == "__main__":
from doctest import testmod
testmod() | 32 |
import contextlib
import csv
import json
import os
import sqlitea
import tarfile
import textwrap
import zipfile
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
import datasets
import datasets.config
@pytest.fixture(scope='session' )
def UpperCamelCase_( ) -> Any:
_lowercase : str = 10
_lowercase : List[str] = datasets.Features(
{
'tokens': datasets.Sequence(datasets.Value('string' ) ),
'labels': datasets.Sequence(datasets.ClassLabel(names=['negative', 'positive'] ) ),
'answers': datasets.Sequence(
{
'text': datasets.Value('string' ),
'answer_start': datasets.Value('int32' ),
} ),
'id': datasets.Value('int64' ),
} )
_lowercase : Union[str, Any] = datasets.Dataset.from_dict(
{
'tokens': [['foo'] * 5] * n,
'labels': [[1] * 5] * n,
'answers': [{'answer_start': [97], 'text': ['1976']}] * 10,
'id': list(range(lowerCamelCase_ ) ),
} , features=lowerCamelCase_ , )
return dataset
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> int:
_lowercase : int = str(tmp_path_factory.mktemp('data' ) / 'file.arrow' )
dataset.map(cache_file_name=lowerCamelCase_ )
return filename
# FILE_CONTENT + files
SCREAMING_SNAKE_CASE : str = "\\n Text data.\n Second line of data."
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> List[Any]:
_lowercase : str = tmp_path_factory.mktemp('data' ) / 'file.txt'
_lowercase : List[str] = FILE_CONTENT
with open(lowerCamelCase_ , 'w' ) as f:
f.write(lowerCamelCase_ )
return filename
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> Tuple:
import bza
_lowercase : Any = tmp_path_factory.mktemp('data' ) / 'file.txt.bz2'
_lowercase : Optional[Any] = bytes(lowerCamelCase_ , 'utf-8' )
with bza.open(lowerCamelCase_ , 'wb' ) as f:
f.write(lowerCamelCase_ )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> List[Any]:
import gzip
_lowercase : Optional[int] = str(tmp_path_factory.mktemp('data' ) / 'file.txt.gz' )
_lowercase : Optional[int] = bytes(lowerCamelCase_ , 'utf-8' )
with gzip.open(lowerCamelCase_ , 'wb' ) as f:
f.write(lowerCamelCase_ )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> str:
if datasets.config.LZ4_AVAILABLE:
import lza.frame
_lowercase : Any = tmp_path_factory.mktemp('data' ) / 'file.txt.lz4'
_lowercase : Optional[Any] = bytes(lowerCamelCase_ , 'utf-8' )
with lza.frame.open(lowerCamelCase_ , 'wb' ) as f:
f.write(lowerCamelCase_ )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> str:
if datasets.config.PY7ZR_AVAILABLE:
import pyazr
_lowercase : int = tmp_path_factory.mktemp('data' ) / 'file.txt.7z'
with pyazr.SevenZipFile(lowerCamelCase_ , 'w' ) as archive:
archive.write(lowerCamelCase_ , arcname=os.path.basename(lowerCamelCase_ ) )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> List[str]:
import tarfile
_lowercase : Optional[Any] = tmp_path_factory.mktemp('data' ) / 'file.txt.tar'
with tarfile.TarFile(lowerCamelCase_ , 'w' ) as f:
f.add(lowerCamelCase_ , arcname=os.path.basename(lowerCamelCase_ ) )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> str:
import lzma
_lowercase : Optional[Any] = tmp_path_factory.mktemp('data' ) / 'file.txt.xz'
_lowercase : int = bytes(lowerCamelCase_ , 'utf-8' )
with lzma.open(lowerCamelCase_ , 'wb' ) as f:
f.write(lowerCamelCase_ )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> str:
import zipfile
_lowercase : Dict = tmp_path_factory.mktemp('data' ) / 'file.txt.zip'
with zipfile.ZipFile(lowerCamelCase_ , 'w' ) as f:
f.write(lowerCamelCase_ , arcname=os.path.basename(lowerCamelCase_ ) )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> Optional[Any]:
if datasets.config.ZSTANDARD_AVAILABLE:
import zstandard as zstd
_lowercase : Optional[Any] = tmp_path_factory.mktemp('data' ) / 'file.txt.zst'
_lowercase : Dict = bytes(lowerCamelCase_ , 'utf-8' )
with zstd.open(lowerCamelCase_ , 'wb' ) as f:
f.write(lowerCamelCase_ )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> str:
_lowercase : Union[str, Any] = tmp_path_factory.mktemp('data' ) / 'file.xml'
_lowercase : Optional[Any] = textwrap.dedent(
'\\n <?xml version="1.0" encoding="UTF-8" ?>\n <tmx version="1.4">\n <header segtype="sentence" srclang="ca" />\n <body>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 1</seg></tuv>\n <tuv xml:lang="en"><seg>Content 1</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 2</seg></tuv>\n <tuv xml:lang="en"><seg>Content 2</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 3</seg></tuv>\n <tuv xml:lang="en"><seg>Content 3</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 4</seg></tuv>\n <tuv xml:lang="en"><seg>Content 4</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 5</seg></tuv>\n <tuv xml:lang="en"><seg>Content 5</seg></tuv>\n </tu>\n </body>\n </tmx>' )
with open(lowerCamelCase_ , 'w' ) as f:
f.write(lowerCamelCase_ )
return filename
SCREAMING_SNAKE_CASE : Dict = [
{"col_1": "0", "col_2": 0, "col_3": 0.0},
{"col_1": "1", "col_2": 1, "col_3": 1.0},
{"col_1": "2", "col_2": 2, "col_3": 2.0},
{"col_1": "3", "col_2": 3, "col_3": 3.0},
]
SCREAMING_SNAKE_CASE : Dict = [
{"col_1": "4", "col_2": 4, "col_3": 4.0},
{"col_1": "5", "col_2": 5, "col_3": 5.0},
]
SCREAMING_SNAKE_CASE : Optional[Any] = {
"col_1": ["0", "1", "2", "3"],
"col_2": [0, 1, 2, 3],
"col_3": [0.0, 1.0, 2.0, 3.0],
}
SCREAMING_SNAKE_CASE : Tuple = [
{"col_3": 0.0, "col_1": "0", "col_2": 0},
{"col_3": 1.0, "col_1": "1", "col_2": 1},
]
SCREAMING_SNAKE_CASE : Any = [
{"col_1": "s0", "col_2": 0, "col_3": 0.0},
{"col_1": "s1", "col_2": 1, "col_3": 1.0},
{"col_1": "s2", "col_2": 2, "col_3": 2.0},
{"col_1": "s3", "col_2": 3, "col_3": 3.0},
]
@pytest.fixture(scope='session' )
def UpperCamelCase_( ) -> List[str]:
return DATA_DICT_OF_LISTS
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> Dict:
_lowercase : Optional[int] = datasets.Dataset.from_dict(lowerCamelCase_ )
_lowercase : List[Any] = str(tmp_path_factory.mktemp('data' ) / 'dataset.arrow' )
dataset.map(cache_file_name=lowerCamelCase_ )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> str:
_lowercase : List[str] = str(tmp_path_factory.mktemp('data' ) / 'dataset.sqlite' )
with contextlib.closing(sqlitea.connect(lowerCamelCase_ ) ) as con:
_lowercase : Union[str, Any] = con.cursor()
cur.execute('CREATE TABLE dataset(col_1 text, col_2 int, col_3 real)' )
for item in DATA:
cur.execute('INSERT INTO dataset(col_1, col_2, col_3) VALUES (?, ?, ?)' , tuple(item.values() ) )
con.commit()
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> Dict:
_lowercase : List[str] = str(tmp_path_factory.mktemp('data' ) / 'dataset.csv' )
with open(lowerCamelCase_ , 'w' , newline='' ) as f:
_lowercase : Tuple = csv.DictWriter(lowerCamelCase_ , fieldnames=['col_1', 'col_2', 'col_3'] )
writer.writeheader()
for item in DATA:
writer.writerow(lowerCamelCase_ )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> List[Any]:
_lowercase : Tuple = str(tmp_path_factory.mktemp('data' ) / 'dataset2.csv' )
with open(lowerCamelCase_ , 'w' , newline='' ) as f:
_lowercase : str = csv.DictWriter(lowerCamelCase_ , fieldnames=['col_1', 'col_2', 'col_3'] )
writer.writeheader()
for item in DATA:
writer.writerow(lowerCamelCase_ )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> Any:
import bza
_lowercase : int = tmp_path_factory.mktemp('data' ) / 'dataset.csv.bz2'
with open(lowerCamelCase_ , 'rb' ) as f:
_lowercase : int = f.read()
# data = bytes(FILE_CONTENT, "utf-8")
with bza.open(lowerCamelCase_ , 'wb' ) as f:
f.write(lowerCamelCase_ )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Optional[int]:
_lowercase : Optional[Any] = tmp_path_factory.mktemp('data' ) / 'dataset.csv.zip'
with zipfile.ZipFile(lowerCamelCase_ , 'w' ) as f:
f.write(lowerCamelCase_ , arcname=os.path.basename(lowerCamelCase_ ) )
f.write(lowerCamelCase_ , arcname=os.path.basename(lowerCamelCase_ ) )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Optional[Any]:
_lowercase : str = tmp_path_factory.mktemp('data' ) / 'dataset.csv.zip'
with zipfile.ZipFile(lowerCamelCase_ , 'w' ) as f:
f.write(lowerCamelCase_ , arcname=os.path.basename(csv_path.replace('.csv' , '.CSV' ) ) )
f.write(lowerCamelCase_ , arcname=os.path.basename(csva_path.replace('.csv' , '.CSV' ) ) )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Union[str, Any]:
_lowercase : Union[str, Any] = tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.csv.zip'
with zipfile.ZipFile(lowerCamelCase_ , 'w' ) as f:
f.write(lowerCamelCase_ , arcname=os.path.join('main_dir' , os.path.basename(lowerCamelCase_ ) ) )
f.write(lowerCamelCase_ , arcname=os.path.join('main_dir' , os.path.basename(lowerCamelCase_ ) ) )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> int:
_lowercase : Optional[int] = str(tmp_path_factory.mktemp('data' ) / 'dataset.parquet' )
_lowercase : Optional[Any] = pa.schema(
{
'col_1': pa.string(),
'col_2': pa.intaa(),
'col_3': pa.floataa(),
} )
with open(lowerCamelCase_ , 'wb' ) as f:
_lowercase : List[str] = pq.ParquetWriter(lowerCamelCase_ , schema=lowerCamelCase_ )
_lowercase : Any = pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(lowerCamelCase_ ) )] for k in DATA[0]} , schema=lowerCamelCase_ )
writer.write_table(lowerCamelCase_ )
writer.close()
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> Optional[Any]:
_lowercase : Optional[Any] = str(tmp_path_factory.mktemp('data' ) / 'dataset.json' )
_lowercase : List[Any] = {'data': DATA}
with open(lowerCamelCase_ , 'w' ) as f:
json.dump(lowerCamelCase_ , lowerCamelCase_ )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> Any:
_lowercase : Tuple = str(tmp_path_factory.mktemp('data' ) / 'dataset.json' )
_lowercase : Optional[Any] = {'data': DATA_DICT_OF_LISTS}
with open(lowerCamelCase_ , 'w' ) as f:
json.dump(lowerCamelCase_ , lowerCamelCase_ )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> Union[str, Any]:
_lowercase : Any = str(tmp_path_factory.mktemp('data' ) / 'dataset.jsonl' )
with open(lowerCamelCase_ , 'w' ) as f:
for item in DATA:
f.write(json.dumps(lowerCamelCase_ ) + '\n' )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> Dict:
_lowercase : Dict = str(tmp_path_factory.mktemp('data' ) / 'dataset2.jsonl' )
with open(lowerCamelCase_ , 'w' ) as f:
for item in DATA:
f.write(json.dumps(lowerCamelCase_ ) + '\n' )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> List[str]:
_lowercase : Optional[Any] = str(tmp_path_factory.mktemp('data' ) / 'dataset_312.jsonl' )
with open(lowerCamelCase_ , 'w' ) as f:
for item in DATA_312:
f.write(json.dumps(lowerCamelCase_ ) + '\n' )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> List[Any]:
_lowercase : str = str(tmp_path_factory.mktemp('data' ) / 'dataset-str.jsonl' )
with open(lowerCamelCase_ , 'w' ) as f:
for item in DATA_STR:
f.write(json.dumps(lowerCamelCase_ ) + '\n' )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> Optional[Any]:
import gzip
_lowercase : Tuple = str(tmp_path_factory.mktemp('data' ) / 'dataset.txt.gz' )
with open(lowerCamelCase_ , 'rb' ) as orig_file:
with gzip.open(lowerCamelCase_ , 'wb' ) as zipped_file:
zipped_file.writelines(lowerCamelCase_ )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> Dict:
import gzip
_lowercase : Optional[int] = str(tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.gz' )
with open(lowerCamelCase_ , 'rb' ) as orig_file:
with gzip.open(lowerCamelCase_ , 'wb' ) as zipped_file:
zipped_file.writelines(lowerCamelCase_ )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> List[str]:
_lowercase : Any = tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.zip'
with zipfile.ZipFile(lowerCamelCase_ , 'w' ) as f:
f.write(lowerCamelCase_ , arcname=os.path.basename(lowerCamelCase_ ) )
f.write(lowerCamelCase_ , arcname=os.path.basename(lowerCamelCase_ ) )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> str:
_lowercase : str = tmp_path_factory.mktemp('data' ) / 'dataset_nested.jsonl.zip'
with zipfile.ZipFile(lowerCamelCase_ , 'w' ) as f:
f.write(lowerCamelCase_ , arcname=os.path.join('nested' , os.path.basename(lowerCamelCase_ ) ) )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> int:
_lowercase : Optional[Any] = tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.jsonl.zip'
with zipfile.ZipFile(lowerCamelCase_ , 'w' ) as f:
f.write(lowerCamelCase_ , arcname=os.path.join('main_dir' , os.path.basename(lowerCamelCase_ ) ) )
f.write(lowerCamelCase_ , arcname=os.path.join('main_dir' , os.path.basename(lowerCamelCase_ ) ) )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Optional[int]:
_lowercase : Tuple = tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.tar'
with tarfile.TarFile(lowerCamelCase_ , 'w' ) as f:
f.add(lowerCamelCase_ , arcname=os.path.basename(lowerCamelCase_ ) )
f.add(lowerCamelCase_ , arcname=os.path.basename(lowerCamelCase_ ) )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Any:
_lowercase : str = tmp_path_factory.mktemp('data' ) / 'dataset_nested.jsonl.tar'
with tarfile.TarFile(lowerCamelCase_ , 'w' ) as f:
f.add(lowerCamelCase_ , arcname=os.path.join('nested' , os.path.basename(lowerCamelCase_ ) ) )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> Any:
_lowercase : Optional[int] = ['0', '1', '2', '3']
_lowercase : str = str(tmp_path_factory.mktemp('data' ) / 'dataset.txt' )
with open(lowerCamelCase_ , 'w' ) as f:
for item in data:
f.write(item + '\n' )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> Union[str, Any]:
_lowercase : str = ['0', '1', '2', '3']
_lowercase : List[Any] = str(tmp_path_factory.mktemp('data' ) / 'dataset2.txt' )
with open(lowerCamelCase_ , 'w' ) as f:
for item in data:
f.write(item + '\n' )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> List[str]:
_lowercase : List[Any] = ['0', '1', '2', '3']
_lowercase : Optional[int] = tmp_path_factory.mktemp('data' ) / 'dataset.abc'
with open(lowerCamelCase_ , 'w' ) as f:
for item in data:
f.write(item + '\n' )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Any:
_lowercase : Optional[Any] = tmp_path_factory.mktemp('data' ) / 'dataset.text.zip'
with zipfile.ZipFile(lowerCamelCase_ , 'w' ) as f:
f.write(lowerCamelCase_ , arcname=os.path.basename(lowerCamelCase_ ) )
f.write(lowerCamelCase_ , arcname=os.path.basename(lowerCamelCase_ ) )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Dict:
_lowercase : List[Any] = tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.text.zip'
with zipfile.ZipFile(lowerCamelCase_ , 'w' ) as f:
f.write(lowerCamelCase_ , arcname=os.path.join('main_dir' , os.path.basename(lowerCamelCase_ ) ) )
f.write(lowerCamelCase_ , arcname=os.path.join('main_dir' , os.path.basename(lowerCamelCase_ ) ) )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> str:
_lowercase : Any = tmp_path_factory.mktemp('data' ) / 'dataset.ext.zip'
with zipfile.ZipFile(lowerCamelCase_ , 'w' ) as f:
f.write(lowerCamelCase_ , arcname=os.path.basename('unsupported.ext' ) )
f.write(lowerCamelCase_ , arcname=os.path.basename('unsupported_2.ext' ) )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> int:
_lowercase : List[str] = '\n'.join(['First', 'Second\u2029with Unicode new line', 'Third'] )
_lowercase : Any = str(tmp_path_factory.mktemp('data' ) / 'dataset_with_unicode_new_lines.txt' )
with open(lowerCamelCase_ , 'w' , encoding='utf-8' ) as f:
f.write(lowerCamelCase_ )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( ) -> Dict:
return os.path.join('tests' , 'features' , 'data' , 'test_image_rgb.jpg' )
@pytest.fixture(scope='session' )
def UpperCamelCase_( ) -> int:
return os.path.join('tests' , 'features' , 'data' , 'test_audio_44100.wav' )
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> Any:
_lowercase : Dict = tmp_path_factory.mktemp('data' ) / 'dataset.img.zip'
with zipfile.ZipFile(lowerCamelCase_ , 'w' ) as f:
f.write(lowerCamelCase_ , arcname=os.path.basename(lowerCamelCase_ ) )
f.write(lowerCamelCase_ , arcname=os.path.basename(lowerCamelCase_ ).replace('.jpg' , '2.jpg' ) )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> Optional[Any]:
_lowercase : str = tmp_path_factory.mktemp('data_dir' )
(data_dir / "subdir").mkdir()
with open(data_dir / 'subdir' / 'train.txt' , 'w' ) as f:
f.write('foo\n' * 10 )
with open(data_dir / 'subdir' / 'test.txt' , 'w' ) as f:
f.write('bar\n' * 10 )
# hidden file
with open(data_dir / 'subdir' / '.test.txt' , 'w' ) as f:
f.write('bar\n' * 10 )
# hidden directory
(data_dir / ".subdir").mkdir()
with open(data_dir / '.subdir' / 'train.txt' , 'w' ) as f:
f.write('foo\n' * 10 )
with open(data_dir / '.subdir' / 'test.txt' , 'w' ) as f:
f.write('bar\n' * 10 )
return data_dir
| 89 | 0 |
'''simple docstring'''
from __future__ import annotations
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> str: # noqa: E741
while r - l > 1:
UpperCamelCase__ : List[str] = (l + r) // 2
if v[m] >= key:
UpperCamelCase__ : Dict = m
else:
UpperCamelCase__ : Any = m # noqa: E741
return r
def __UpperCAmelCase ( lowerCamelCase_) -> List[str]:
if len(lowerCamelCase_) == 0:
return 0
UpperCamelCase__ : Optional[int] = [0] * len(lowerCamelCase_)
UpperCamelCase__ : Dict = 1
UpperCamelCase__ : Dict = v[0]
for i in range(1 , len(lowerCamelCase_)):
if v[i] < tail[0]:
UpperCamelCase__ : Tuple = v[i]
elif v[i] > tail[length - 1]:
UpperCamelCase__ : Dict = v[i]
length += 1
else:
UpperCamelCase__ : Optional[int] = v[i]
return length
if __name__ == "__main__":
import doctest
doctest.testmod()
| 700 |
'''simple docstring'''
from typing import Union
import fire
import torch
from tqdm import tqdm
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ = "cpu" , lowerCamelCase_ = None) -> None:
UpperCamelCase__ : List[Any] = torch.load(lowerCamelCase_ , map_location=lowerCamelCase_)
for k, v in tqdm(state_dict.items()):
if not isinstance(lowerCamelCase_ , torch.Tensor):
raise TypeError('FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin')
UpperCamelCase__ : int = v.half()
if save_path is None: # overwrite src_path
UpperCamelCase__ : List[Any] = src_path
torch.save(lowerCamelCase_ , lowerCamelCase_)
if __name__ == "__main__":
fire.Fire(convert)
| 6 | 0 |
UpperCamelCase_ = {0: [2, 3], 1: [0], 2: [1], 3: [4], 4: []}
UpperCamelCase_ = {0: [1, 2, 3], 1: [2], 2: [0], 3: [4], 4: [5], 5: [3]}
def _UpperCAmelCase ( UpperCamelCase: dict[int, list[int]] , UpperCamelCase: int , UpperCamelCase: list[bool] ):
"""simple docstring"""
__lowerCAmelCase = True
__lowerCAmelCase = []
for neighbour in graph[vert]:
if not visited[neighbour]:
order += topology_sort(UpperCamelCase , UpperCamelCase , UpperCamelCase )
order.append(UpperCamelCase )
return order
def _UpperCAmelCase ( UpperCamelCase: dict[int, list[int]] , UpperCamelCase: int , UpperCamelCase: list[bool] ):
"""simple docstring"""
__lowerCAmelCase = True
__lowerCAmelCase = [vert]
for neighbour in reversed_graph[vert]:
if not visited[neighbour]:
component += find_components(UpperCamelCase , UpperCamelCase , UpperCamelCase )
return component
def _UpperCAmelCase ( UpperCamelCase: dict[int, list[int]] ):
"""simple docstring"""
__lowerCAmelCase = len(UpperCamelCase ) * [False]
__lowerCAmelCase = {vert: [] for vert in range(len(UpperCamelCase ) )}
for vert, neighbours in graph.items():
for neighbour in neighbours:
reversed_graph[neighbour].append(UpperCamelCase )
__lowerCAmelCase = []
for i, was_visited in enumerate(UpperCamelCase ):
if not was_visited:
order += topology_sort(UpperCamelCase , UpperCamelCase , UpperCamelCase )
__lowerCAmelCase = []
__lowerCAmelCase = len(UpperCamelCase ) * [False]
for i in range(len(UpperCamelCase ) ):
__lowerCAmelCase = order[len(UpperCamelCase ) - i - 1]
if not visited[vert]:
__lowerCAmelCase = find_components(UpperCamelCase , UpperCamelCase , UpperCamelCase )
components_list.append(UpperCamelCase )
return components_list
| 611 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
"andreasmadsen/efficient_mlm_m0.40": (
"https://huggingface.co/andreasmadsen/efficient_mlm_m0.40/resolve/main/config.json"
),
}
class a ( __UpperCAmelCase ):
lowercase_ : Any = 'roberta-prelayernorm'
def __init__( self : Any , snake_case__ : Any=50_265 , snake_case__ : Any=768 , snake_case__ : Optional[Any]=12 , snake_case__ : Tuple=12 , snake_case__ : Union[str, Any]=3_072 , snake_case__ : Optional[Any]="gelu" , snake_case__ : List[str]=0.1 , snake_case__ : str=0.1 , snake_case__ : List[str]=512 , snake_case__ : Any=2 , snake_case__ : List[Any]=0.0_2 , snake_case__ : Tuple=1E-12 , snake_case__ : List[str]=1 , snake_case__ : Optional[int]=0 , snake_case__ : Optional[Any]=2 , snake_case__ : List[Any]="absolute" , snake_case__ : List[Any]=True , snake_case__ : Union[str, Any]=None , **snake_case__ : List[str] , ):
"""simple docstring"""
super().__init__(pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , **snake_case__ )
__lowerCAmelCase = vocab_size
__lowerCAmelCase = hidden_size
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = hidden_act
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = max_position_embeddings
__lowerCAmelCase = type_vocab_size
__lowerCAmelCase = initializer_range
__lowerCAmelCase = layer_norm_eps
__lowerCAmelCase = position_embedding_type
__lowerCAmelCase = use_cache
__lowerCAmelCase = classifier_dropout
class a ( __UpperCAmelCase ):
@property
def UpperCAmelCase__ ( self : Optional[int] ):
"""simple docstring"""
if self.task == "multiple-choice":
__lowerCAmelCase = {0: "batch", 1: "choice", 2: "sequence"}
else:
__lowerCAmelCase = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 611 | 1 |
import torch
import torch.nn as nn
from transformers.modeling_utils import ModuleUtilsMixin
from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class _UpperCamelCase ( A,A,A ):
'''simple docstring'''
@register_to_config
def __init__( self : Union[str, Any] , _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : float , _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : str , _lowerCamelCase : bool = False , ):
'''simple docstring'''
super().__init__()
__lowerCamelCase : int = nn.Embedding(_lowerCamelCase , _lowerCamelCase )
__lowerCamelCase : str = nn.Embedding(_lowerCamelCase , _lowerCamelCase )
__lowerCamelCase : str = False
__lowerCamelCase : int = nn.Dropout(p=_lowerCamelCase )
__lowerCamelCase : Any = TaConfig(
vocab_size=_lowerCamelCase , d_model=_lowerCamelCase , num_heads=_lowerCamelCase , d_kv=_lowerCamelCase , d_ff=_lowerCamelCase , dropout_rate=_lowerCamelCase , feed_forward_proj=_lowerCamelCase , is_decoder=_lowerCamelCase , is_encoder_decoder=_lowerCamelCase , )
__lowerCamelCase : Optional[Any] = nn.ModuleList()
for lyr_num in range(_lowerCamelCase ):
__lowerCamelCase : Union[str, Any] = TaBlock(_lowerCamelCase )
self.encoders.append(_lowerCamelCase )
__lowerCamelCase : str = TaLayerNorm(_lowerCamelCase )
__lowerCamelCase : List[str] = nn.Dropout(p=_lowerCamelCase )
def _snake_case ( self : Dict , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : int ):
'''simple docstring'''
__lowerCamelCase : List[str] = self.token_embedder(_lowerCamelCase )
__lowerCamelCase : Tuple = encoder_input_tokens.shape[1]
__lowerCamelCase : Dict = torch.arange(_lowerCamelCase , device=encoder_input_tokens.device )
x += self.position_encoding(_lowerCamelCase )
__lowerCamelCase : List[str] = self.dropout_pre(_lowerCamelCase )
# inverted the attention mask
__lowerCamelCase : Dict = encoder_input_tokens.size()
__lowerCamelCase : Dict = self.get_extended_attention_mask(_lowerCamelCase , _lowerCamelCase )
for lyr in self.encoders:
__lowerCamelCase : List[Any] = lyr(_lowerCamelCase , _lowerCamelCase )[0]
__lowerCamelCase : str = self.layer_norm(_lowerCamelCase )
return self.dropout_post(_lowerCamelCase ), encoder_inputs_mask
| 458 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase : Tuple = logging.get_logger(__name__)
__UpperCamelCase : Any = {
'facebook/nllb-moe-54B': 'https://huggingface.co/facebook/nllb-moe-54b/resolve/main/config.json',
}
class _UpperCamelCase ( A ):
'''simple docstring'''
a_ : int = "nllb-moe"
a_ : Optional[int] = ["past_key_values"]
a_ : Optional[Any] = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self : int , _lowerCamelCase : Union[str, Any]=1_2_8_1_1_2 , _lowerCamelCase : List[str]=1_0_2_4 , _lowerCamelCase : Optional[int]=1_2 , _lowerCamelCase : int=4_0_9_6 , _lowerCamelCase : Union[str, Any]=1_6 , _lowerCamelCase : int=1_2 , _lowerCamelCase : Optional[int]=4_0_9_6 , _lowerCamelCase : Any=1_6 , _lowerCamelCase : Any=0.05 , _lowerCamelCase : List[Any]=0.05 , _lowerCamelCase : Tuple=True , _lowerCamelCase : Any=True , _lowerCamelCase : Optional[int]="relu" , _lowerCamelCase : Dict=1_0_2_4 , _lowerCamelCase : Optional[int]=0.1 , _lowerCamelCase : Optional[int]=0.1 , _lowerCamelCase : Optional[int]=0.0 , _lowerCamelCase : List[Any]=0.02 , _lowerCamelCase : List[str]=2 , _lowerCamelCase : List[str]=True , _lowerCamelCase : Union[str, Any]=False , _lowerCamelCase : Optional[Any]="float32" , _lowerCamelCase : Union[str, Any]=False , _lowerCamelCase : Tuple=1_2_8 , _lowerCamelCase : Dict=6_4 , _lowerCamelCase : Any=4 , _lowerCamelCase : List[Any]=4 , _lowerCamelCase : str=0.001 , _lowerCamelCase : Dict=0.001 , _lowerCamelCase : Any="all" , _lowerCamelCase : Optional[Any]=False , _lowerCamelCase : List[Any]=False , _lowerCamelCase : Optional[Any]=1.0 , _lowerCamelCase : List[str]=0.2 , _lowerCamelCase : List[Any]=1 , _lowerCamelCase : str=0 , _lowerCamelCase : int=2 , _lowerCamelCase : int=False , **_lowerCamelCase : List[str] , ):
'''simple docstring'''
__lowerCamelCase : Optional[Any] = vocab_size
__lowerCamelCase : str = max_position_embeddings
__lowerCamelCase : int = d_model
__lowerCamelCase : Optional[Any] = encoder_ffn_dim
__lowerCamelCase : str = encoder_layers
__lowerCamelCase : Tuple = encoder_attention_heads
__lowerCamelCase : Union[str, Any] = decoder_ffn_dim
__lowerCamelCase : Dict = decoder_layers
__lowerCamelCase : Any = decoder_attention_heads
__lowerCamelCase : Union[str, Any] = dropout
__lowerCamelCase : Dict = attention_dropout
__lowerCamelCase : int = activation_dropout
__lowerCamelCase : Union[str, Any] = activation_function
__lowerCamelCase : Optional[int] = init_std
__lowerCamelCase : Union[str, Any] = encoder_layerdrop
__lowerCamelCase : Dict = decoder_layerdrop
__lowerCamelCase : List[str] = use_cache
__lowerCamelCase : Any = encoder_layers
__lowerCamelCase : List[Any] = scale_embedding # scale factor will be sqrt(d_model) if True
__lowerCamelCase : Optional[Any] = router_z_loss_coef
__lowerCamelCase : Union[str, Any] = router_aux_loss_coef
__lowerCamelCase : Dict = decoder_sparse_step
__lowerCamelCase : Optional[int] = encoder_sparse_step
__lowerCamelCase : Any = num_experts
__lowerCamelCase : int = expert_capacity
__lowerCamelCase : Dict = router_bias
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(F"""`router_dtype` must be one of 'float32', 'float16' or 'bfloat16', got {router_dtype}""" )
__lowerCamelCase : Tuple = router_dtype
__lowerCamelCase : Dict = router_ignore_padding_tokens
__lowerCamelCase : str = batch_prioritized_routing
__lowerCamelCase : List[str] = second_expert_policy
__lowerCamelCase : Tuple = normalize_router_prob_before_dropping
__lowerCamelCase : Dict = moe_eval_capacity_token_fraction
__lowerCamelCase : Optional[int] = moe_token_dropout
__lowerCamelCase : Dict = output_router_logits
super().__init__(
pad_token_id=_lowerCamelCase , bos_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase , is_encoder_decoder=_lowerCamelCase , decoder_start_token_id=_lowerCamelCase , **_lowerCamelCase , )
| 458 | 1 |
import tempfile
import unittest
import numpy as np
from diffusers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionPipeline,
PNDMScheduler,
)
from diffusers.utils.testing_utils import is_onnx_available, nightly, require_onnxruntime, require_torch_gpu
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class _lowerCamelCase( _a, unittest.TestCase ):
lowercase_ : Optional[int] = """hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline"""
def UpperCamelCase ( self, lowerCamelCase=0) -> str:
"""simple docstring"""
_lowercase : Optional[int] = np.random.RandomState(lowerCamelCase)
_lowercase : Union[str, Any] = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
_lowercase : int = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : List[str] = self.get_dummy_inputs()
_lowercase : Tuple = pipe(**lowerCamelCase).images
_lowercase : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
_lowercase : List[Any] = np.array([0.6_5_0_7_2, 0.5_8_4_9_2, 0.4_8_2_1_9, 0.5_5_5_2_1, 0.5_3_1_8_0, 0.5_5_9_3_9, 0.5_0_6_9_7, 0.3_9_8_0_0, 0.4_6_4_5_5])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
def UpperCamelCase ( self) -> List[Any]:
"""simple docstring"""
_lowercase : str = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
_lowercase : List[Any] = PNDMScheduler.from_config(pipe.scheduler.config, skip_prk_steps=lowerCamelCase)
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : Dict = self.get_dummy_inputs()
_lowercase : Optional[int] = pipe(**lowerCamelCase).images
_lowercase : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
_lowercase : List[Any] = np.array([0.6_5_8_6_3, 0.5_9_4_2_5, 0.4_9_3_2_6, 0.5_6_3_1_3, 0.5_3_8_7_5, 0.5_6_6_2_7, 0.5_1_0_6_5, 0.3_9_7_7_7, 0.4_6_3_3_0])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
def UpperCamelCase ( self) -> str:
"""simple docstring"""
_lowercase : Optional[int] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
_lowercase : Optional[int] = LMSDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : Dict = self.get_dummy_inputs()
_lowercase : Union[str, Any] = pipe(**lowerCamelCase).images
_lowercase : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
_lowercase : Union[str, Any] = np.array([0.5_3_7_5_5, 0.6_0_7_8_6, 0.4_7_4_0_2, 0.4_9_4_8_8, 0.5_1_8_6_9, 0.4_9_8_1_9, 0.4_7_9_8_5, 0.3_8_9_5_7, 0.4_4_2_7_9])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
_lowercase : Union[str, Any] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
_lowercase : Any = EulerDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : List[Any] = self.get_dummy_inputs()
_lowercase : Any = pipe(**lowerCamelCase).images
_lowercase : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
_lowercase : Union[str, Any] = np.array([0.5_3_7_5_5, 0.6_0_7_8_6, 0.4_7_4_0_2, 0.4_9_4_8_8, 0.5_1_8_6_9, 0.4_9_8_1_9, 0.4_7_9_8_5, 0.3_8_9_5_7, 0.4_4_2_7_9])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
def UpperCamelCase ( self) -> Dict:
"""simple docstring"""
_lowercase : List[str] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
_lowercase : Optional[int] = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : List[Any] = self.get_dummy_inputs()
_lowercase : Optional[int] = pipe(**lowerCamelCase).images
_lowercase : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
_lowercase : List[str] = np.array([0.5_3_8_1_7, 0.6_0_8_1_2, 0.4_7_3_8_4, 0.4_9_5_3_0, 0.5_1_8_9_4, 0.4_9_8_1_4, 0.4_7_9_8_4, 0.3_8_9_5_8, 0.4_4_2_7_1])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
def UpperCamelCase ( self) -> List[Any]:
"""simple docstring"""
_lowercase : List[Any] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
_lowercase : Any = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : Union[str, Any] = self.get_dummy_inputs()
_lowercase : Any = pipe(**lowerCamelCase).images
_lowercase : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
_lowercase : Any = np.array([0.5_3_8_9_5, 0.6_0_8_0_8, 0.4_7_9_3_3, 0.4_9_6_0_8, 0.5_1_8_8_6, 0.4_9_9_5_0, 0.4_8_0_5_3, 0.3_8_9_5_7, 0.4_4_2_0_0])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
def UpperCamelCase ( self) -> int:
"""simple docstring"""
_lowercase : Dict = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : str = self.get_dummy_inputs()
_lowercase : Any = 3 * [inputs['prompt']]
# forward
_lowercase : int = pipe(**lowerCamelCase)
_lowercase : Optional[int] = output.images[0, -3:, -3:, -1]
_lowercase : int = self.get_dummy_inputs()
_lowercase : Union[str, Any] = 3 * [inputs.pop('prompt')]
_lowercase : Union[str, Any] = pipe.tokenizer(
lowerCamelCase, padding='max_length', max_length=pipe.tokenizer.model_max_length, truncation=lowerCamelCase, return_tensors='np', )
_lowercase : Tuple = text_inputs['input_ids']
_lowercase : Any = pipe.text_encoder(input_ids=text_inputs.astype(np.intaa))[0]
_lowercase : List[Any] = prompt_embeds
# forward
_lowercase : Union[str, Any] = pipe(**lowerCamelCase)
_lowercase : Union[str, Any] = output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten()).max() < 1E-4
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
_lowercase : Optional[Any] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : Optional[Any] = self.get_dummy_inputs()
_lowercase : Any = 3 * ['this is a negative prompt']
_lowercase : str = negative_prompt
_lowercase : Optional[int] = 3 * [inputs['prompt']]
# forward
_lowercase : int = pipe(**lowerCamelCase)
_lowercase : str = output.images[0, -3:, -3:, -1]
_lowercase : Union[str, Any] = self.get_dummy_inputs()
_lowercase : str = 3 * [inputs.pop('prompt')]
_lowercase : Optional[int] = []
for p in [prompt, negative_prompt]:
_lowercase : Tuple = pipe.tokenizer(
lowerCamelCase, padding='max_length', max_length=pipe.tokenizer.model_max_length, truncation=lowerCamelCase, return_tensors='np', )
_lowercase : Dict = text_inputs['input_ids']
embeds.append(pipe.text_encoder(input_ids=text_inputs.astype(np.intaa))[0])
_lowercase , _lowercase : str = embeds
# forward
_lowercase : Dict = pipe(**lowerCamelCase)
_lowercase : Tuple = output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten()).max() < 1E-4
@nightly
@require_onnxruntime
@require_torch_gpu
class _lowerCamelCase( unittest.TestCase ):
@property
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def UpperCamelCase ( self) -> str:
"""simple docstring"""
_lowercase : int = ort.SessionOptions()
_lowercase : str = False
return options
def UpperCamelCase ( self) -> Optional[Any]:
"""simple docstring"""
_lowercase : Optional[int] = OnnxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4', revision='onnx', safety_checker=lowerCamelCase, feature_extractor=lowerCamelCase, provider=self.gpu_provider, sess_options=self.gpu_options, )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : List[Any] = 'A painting of a squirrel eating a burger'
np.random.seed(0)
_lowercase : Union[str, Any] = sd_pipe([prompt], guidance_scale=6.0, num_inference_steps=10, output_type='np')
_lowercase : Optional[Any] = output.images
_lowercase : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
_lowercase : Union[str, Any] = np.array([0.0_4_5_2, 0.0_3_9_0, 0.0_0_8_7, 0.0_3_5_0, 0.0_6_1_7, 0.0_3_6_4, 0.0_5_4_4, 0.0_5_2_3, 0.0_7_2_0])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-3
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
_lowercase : str = DDIMScheduler.from_pretrained(
'runwayml/stable-diffusion-v1-5', subfolder='scheduler', revision='onnx')
_lowercase : str = OnnxStableDiffusionPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5', revision='onnx', scheduler=lowerCamelCase, safety_checker=lowerCamelCase, feature_extractor=lowerCamelCase, provider=self.gpu_provider, sess_options=self.gpu_options, )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : List[Any] = 'open neural network exchange'
_lowercase : List[Any] = np.random.RandomState(0)
_lowercase : Optional[Any] = sd_pipe([prompt], guidance_scale=7.5, num_inference_steps=10, generator=lowerCamelCase, output_type='np')
_lowercase : Optional[Any] = output.images
_lowercase : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
_lowercase : Optional[int] = np.array([0.2_8_6_7, 0.1_9_7_4, 0.1_4_8_1, 0.7_2_9_4, 0.7_2_5_1, 0.6_6_6_7, 0.4_1_9_4, 0.5_6_4_2, 0.6_4_8_6])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-3
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
_lowercase : Dict = LMSDiscreteScheduler.from_pretrained(
'runwayml/stable-diffusion-v1-5', subfolder='scheduler', revision='onnx')
_lowercase : Dict = OnnxStableDiffusionPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5', revision='onnx', scheduler=lowerCamelCase, safety_checker=lowerCamelCase, feature_extractor=lowerCamelCase, provider=self.gpu_provider, sess_options=self.gpu_options, )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : Tuple = 'open neural network exchange'
_lowercase : str = np.random.RandomState(0)
_lowercase : Dict = sd_pipe([prompt], guidance_scale=7.5, num_inference_steps=10, generator=lowerCamelCase, output_type='np')
_lowercase : Optional[Any] = output.images
_lowercase : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
_lowercase : Optional[Any] = np.array([0.2_3_0_6, 0.1_9_5_9, 0.1_5_9_3, 0.6_5_4_9, 0.6_3_9_4, 0.5_4_0_8, 0.5_0_6_5, 0.6_0_1_0, 0.6_1_6_1])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-3
def UpperCamelCase ( self) -> int:
"""simple docstring"""
_lowercase : List[Any] = 0
def test_callback_fn(lowerCamelCase, lowerCamelCase, lowerCamelCase) -> None:
_lowercase : List[str] = True
nonlocal number_of_steps
number_of_steps += 1
if step == 0:
assert latents.shape == (1, 4, 64, 64)
_lowercase : Any = latents[0, -3:, -3:, -1]
_lowercase : Tuple = np.array(
[-0.6_7_7_2, -0.3_8_3_5, -1.2_4_5_6, 0.1_9_0_5, -1.0_9_7_4, 0.6_9_6_7, -1.9_3_5_3, 0.0_1_7_8, 1.0_1_6_7])
assert np.abs(latents_slice.flatten() - expected_slice).max() < 1E-3
elif step == 5:
assert latents.shape == (1, 4, 64, 64)
_lowercase : List[Any] = latents[0, -3:, -3:, -1]
_lowercase : str = np.array(
[-0.3_3_5_1, 0.2_2_4_1, -0.1_8_3_7, -0.2_3_2_5, -0.6_5_7_7, 0.3_3_9_3, -0.0_2_4_1, 0.5_8_9_9, 1.3_8_7_5])
assert np.abs(latents_slice.flatten() - expected_slice).max() < 1E-3
_lowercase : Any = False
_lowercase : int = OnnxStableDiffusionPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5', revision='onnx', safety_checker=lowerCamelCase, feature_extractor=lowerCamelCase, provider=self.gpu_provider, sess_options=self.gpu_options, )
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : Any = 'Andromeda galaxy in a bottle'
_lowercase : str = np.random.RandomState(0)
pipe(
prompt=lowerCamelCase, num_inference_steps=5, guidance_scale=7.5, generator=lowerCamelCase, callback=lowerCamelCase, callback_steps=1, )
assert test_callback_fn.has_been_called
assert number_of_steps == 6
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
_lowercase : List[str] = OnnxStableDiffusionPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5', revision='onnx', safety_checker=lowerCamelCase, feature_extractor=lowerCamelCase, provider=self.gpu_provider, sess_options=self.gpu_options, )
assert isinstance(lowerCamelCase, lowerCamelCase)
assert pipe.safety_checker is None
_lowercase : Optional[int] = pipe('example prompt', num_inference_steps=2).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(lowerCamelCase)
_lowercase : Any = OnnxStableDiffusionPipeline.from_pretrained(lowerCamelCase)
# sanity check that the pipeline still works
assert pipe.safety_checker is None
_lowercase : List[str] = pipe('example prompt', num_inference_steps=2).images[0]
assert image is not None
| 89 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
SCREAMING_SNAKE_CASE : str = {
"configuration_gpt_bigcode": ["GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTBigCodeConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : int = [
"GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST",
"GPTBigCodeForSequenceClassification",
"GPTBigCodeForTokenClassification",
"GPTBigCodeForCausalLM",
"GPTBigCodeModel",
"GPTBigCodePreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_bigcode import (
GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTBigCodeForCausalLM,
GPTBigCodeForSequenceClassification,
GPTBigCodeForTokenClassification,
GPTBigCodeModel,
GPTBigCodePreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 89 | 1 |
'''simple docstring'''
import copy
import inspect
import unittest
from transformers import PretrainedConfig, SwiftFormerConfig
from transformers.testing_utils import (
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwiftFormerForImageClassification, SwiftFormerModel
from transformers.models.swiftformer.modeling_swiftformer import SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __A :
def __init__( self , UpperCamelCase_ , UpperCamelCase_=13 , UpperCamelCase_=3 , UpperCamelCase_=True , UpperCamelCase_=True , UpperCamelCase_=0.1 , UpperCamelCase_=0.1 , UpperCamelCase_=2_24 , UpperCamelCase_=10_00 , UpperCamelCase_=[3, 3, 6, 4] , UpperCamelCase_=[48, 56, 1_12, 2_20] , ):
__UpperCAmelCase : Tuple = parent
__UpperCAmelCase : str = batch_size
__UpperCAmelCase : int = num_channels
__UpperCAmelCase : Union[str, Any] = is_training
__UpperCAmelCase : Tuple = use_labels
__UpperCAmelCase : List[str] = hidden_dropout_prob
__UpperCAmelCase : Any = attention_probs_dropout_prob
__UpperCAmelCase : Dict = num_labels
__UpperCAmelCase : List[Any] = image_size
__UpperCAmelCase : List[Any] = layer_depths
__UpperCAmelCase : str = embed_dims
def _snake_case ( self ):
__UpperCAmelCase : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__UpperCAmelCase : int = None
if self.use_labels:
__UpperCAmelCase : int = ids_tensor([self.batch_size] , self.num_labels )
__UpperCAmelCase : Tuple = self.get_config()
return config, pixel_values, labels
def _snake_case ( self ):
return SwiftFormerConfig(
depths=self.layer_depths , embed_dims=self.embed_dims , mlp_ratio=4 , downsamples=[True, True, True, True] , hidden_act="gelu" , num_labels=self.num_labels , down_patch_size=3 , down_stride=2 , down_pad=1 , drop_rate=0.0 , drop_path_rate=0.0 , use_layer_scale=A__ , layer_scale_init_value=1E-5 , )
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
__UpperCAmelCase : List[Any] = SwiftFormerModel(config=A__ )
model.to(A__ )
model.eval()
__UpperCAmelCase : List[Any] = model(A__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dims[-1], 7, 7) )
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
__UpperCAmelCase : Tuple = self.num_labels
__UpperCAmelCase : Tuple = SwiftFormerForImageClassification(A__ )
model.to(A__ )
model.eval()
__UpperCAmelCase : int = model(A__ , labels=A__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
__UpperCAmelCase : List[str] = SwiftFormerForImageClassification(A__ )
model.to(A__ )
model.eval()
__UpperCAmelCase : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__UpperCAmelCase : Optional[int] = model(A__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _snake_case ( self ):
(__UpperCAmelCase) : Tuple = self.prepare_config_and_inputs()
__UpperCAmelCase : Optional[int] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class __A (__magic_name__ , __magic_name__ , unittest.TestCase ):
snake_case :Dict = (SwiftFormerModel, SwiftFormerForImageClassification) if is_torch_available() else ()
snake_case :List[Any] = (
{'''feature-extraction''': SwiftFormerModel, '''image-classification''': SwiftFormerForImageClassification}
if is_torch_available()
else {}
)
snake_case :Dict = False
snake_case :Union[str, Any] = False
snake_case :Any = False
snake_case :int = False
snake_case :Any = False
def _snake_case ( self ):
__UpperCAmelCase : List[str] = SwiftFormerModelTester(self )
__UpperCAmelCase : int = ConfigTester(
self , config_class=A__ , has_text_modality=A__ , hidden_size=37 , num_attention_heads=12 , num_hidden_layers=12 , )
def _snake_case ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason="SwiftFormer does not use inputs_embeds" )
def _snake_case ( self ):
pass
def _snake_case ( self ):
__UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase : List[str] = model_class(A__ )
__UpperCAmelCase : Dict = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(A__ , nn.Linear ) )
def _snake_case ( self ):
__UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase : Any = model_class(A__ )
__UpperCAmelCase : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCAmelCase : Dict = [*signature.parameters.keys()]
__UpperCAmelCase : List[str] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , A__ )
def _snake_case ( self ):
__UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A__ )
def _snake_case ( self ):
__UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A__ )
@slow
def _snake_case ( self ):
for model_name in SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCAmelCase : Tuple = SwiftFormerModel.from_pretrained(A__ )
self.assertIsNotNone(A__ )
@unittest.skip(reason="SwiftFormer does not output attentions" )
def _snake_case ( self ):
pass
def _snake_case ( self ):
def check_hidden_states_output(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
__UpperCAmelCase : Any = model_class(A__ )
model.to(A__ )
model.eval()
with torch.no_grad():
__UpperCAmelCase : Any = model(**self._prepare_for_class(A__ , A__ ) )
__UpperCAmelCase : str = outputs.hidden_states
__UpperCAmelCase : Tuple = 8
self.assertEqual(len(A__ ) , A__ ) # TODO
# SwiftFormer's feature maps are of shape (batch_size, embed_dims, height, width)
# with the width and height being successively divided by 2, after every 2 blocks
for i in range(len(A__ ) ):
self.assertEqual(
hidden_states[i].shape , torch.Size(
[
self.model_tester.batch_size,
self.model_tester.embed_dims[i // 2],
(self.model_tester.image_size // 4) // 2 ** (i // 2),
(self.model_tester.image_size // 4) // 2 ** (i // 2),
] ) , )
__UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase : Any = True
check_hidden_states_output(A__ , A__ , A__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__UpperCAmelCase : List[Any] = True
check_hidden_states_output(A__ , A__ , A__ )
def _snake_case ( self ):
def _config_zero_init(UpperCamelCase_ ):
__UpperCAmelCase : str = copy.deepcopy(A__ )
for key in configs_no_init.__dict__.keys():
if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key:
setattr(A__ , A__ , 1E-10 )
if isinstance(getattr(A__ , A__ , A__ ) , A__ ):
__UpperCAmelCase : Optional[Any] = _config_zero_init(getattr(A__ , A__ ) )
setattr(A__ , A__ , A__ )
return configs_no_init
__UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase : List[Any] = _config_zero_init(A__ )
for model_class in self.all_model_classes:
__UpperCAmelCase : str = model_class(config=A__ )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9) / 1E9).round().item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def _snake_case ( self ):
pass
def _lowercase ( ) -> Optional[Any]:
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class __A (unittest.TestCase ):
@cached_property
def _snake_case ( self ):
return ViTImageProcessor.from_pretrained("MBZUAI/swiftformer-xs" ) if is_vision_available() else None
@slow
def _snake_case ( self ):
__UpperCAmelCase : List[Any] = SwiftFormerForImageClassification.from_pretrained("MBZUAI/swiftformer-xs" ).to(A__ )
__UpperCAmelCase : Any = self.default_image_processor
__UpperCAmelCase : List[Any] = prepare_img()
__UpperCAmelCase : Optional[Any] = image_processor(images=A__ , return_tensors="pt" ).to(A__ )
# forward pass
with torch.no_grad():
__UpperCAmelCase : Dict = model(**A__ )
# verify the logits
__UpperCAmelCase : Union[str, Any] = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , A__ )
__UpperCAmelCase : Any = torch.tensor([[-2.1703E00, 2.1107E00, -2.0811E00]] ).to(A__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , A__ , atol=1E-4 ) )
| 719 | '''simple docstring'''
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import torch
class __A (TensorFormatter[Mapping, "torch.Tensor", Mapping] ):
def __init__( self , UpperCamelCase_=None , **UpperCamelCase_ ):
super().__init__(features=UpperCamelCase_ )
__UpperCAmelCase : Union[str, Any] = torch_tensor_kwargs
import torch # noqa import torch at initialization
def _snake_case ( self , UpperCamelCase_ ):
import torch
if isinstance(UpperCamelCase_ , UpperCamelCase_ ) and column:
if all(
isinstance(UpperCamelCase_ , torch.Tensor ) and x.shape == column[0].shape and x.dtype == column[0].dtype
for x in column ):
return torch.stack(UpperCamelCase_ )
return column
def _snake_case ( self , UpperCamelCase_ ):
import torch
if isinstance(UpperCamelCase_ , (str, bytes, type(UpperCamelCase_ )) ):
return value
elif isinstance(UpperCamelCase_ , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
__UpperCAmelCase : int = {}
if isinstance(UpperCamelCase_ , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
__UpperCAmelCase : Optional[int] = {"dtype": torch.intaa}
elif isinstance(UpperCamelCase_ , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
__UpperCAmelCase : str = {"dtype": torch.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(UpperCamelCase_ , PIL.Image.Image ):
__UpperCAmelCase : str = np.asarray(UpperCamelCase_ )
return torch.tensor(UpperCamelCase_ , **{**default_dtype, **self.torch_tensor_kwargs} )
def _snake_case ( self , UpperCamelCase_ ):
import torch
# support for torch, tf, jax etc.
if hasattr(UpperCamelCase_ , "__array__" ) and not isinstance(UpperCamelCase_ , torch.Tensor ):
__UpperCAmelCase : Dict = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(UpperCamelCase_ , np.ndarray ):
if data_struct.dtype == object: # torch tensors cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(UpperCamelCase_ ) for substruct in data_struct] )
elif isinstance(UpperCamelCase_ , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(UpperCamelCase_ ) for substruct in data_struct] )
return self._tensorize(UpperCamelCase_ )
def _snake_case ( self , UpperCamelCase_ ):
return map_nested(self._recursive_tensorize , UpperCamelCase_ , map_list=UpperCamelCase_ )
def _snake_case ( self , UpperCamelCase_ ):
__UpperCAmelCase : List[str] = self.numpy_arrow_extractor().extract_row(UpperCamelCase_ )
__UpperCAmelCase : Union[str, Any] = self.python_features_decoder.decode_row(UpperCamelCase_ )
return self.recursive_tensorize(UpperCamelCase_ )
def _snake_case ( self , UpperCamelCase_ ):
__UpperCAmelCase : Union[str, Any] = self.numpy_arrow_extractor().extract_column(UpperCamelCase_ )
__UpperCAmelCase : Optional[Any] = self.python_features_decoder.decode_column(UpperCamelCase_ , pa_table.column_names[0] )
__UpperCAmelCase : List[Any] = self.recursive_tensorize(UpperCamelCase_ )
__UpperCAmelCase : List[str] = self._consolidate(UpperCamelCase_ )
return column
def _snake_case ( self , UpperCamelCase_ ):
__UpperCAmelCase : int = self.numpy_arrow_extractor().extract_batch(UpperCamelCase_ )
__UpperCAmelCase : Any = self.python_features_decoder.decode_batch(UpperCamelCase_ )
__UpperCAmelCase : Optional[int] = self.recursive_tensorize(UpperCamelCase_ )
for column_name in batch:
__UpperCAmelCase : Tuple = self._consolidate(batch[column_name] )
return batch
| 10 | 0 |
'''simple docstring'''
import json
import os
from typing import Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase__ : List[Any] = logging.get_logger(__name__)
UpperCAmelCase__ : List[str] = {"vocab_file": "vocab.json"}
UpperCAmelCase__ : Optional[Any] = {
"vocab_file": {
"mgp-str": "https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json",
}
}
UpperCAmelCase__ : Union[str, Any] = {"mgp-str": 27}
class A ( SCREAMING_SNAKE_CASE__ ):
snake_case__ :Any = VOCAB_FILES_NAMES
snake_case__ :Dict = PRETRAINED_VOCAB_FILES_MAP
snake_case__ :Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : Union[str, Any] , __magic_name__ : List[str] , __magic_name__ : int="[GO]" , __magic_name__ : Optional[Any]="[GO]" , __magic_name__ : List[str]="[s]" , __magic_name__ : str="[GO]" , **__magic_name__ : List[Any] ):
"""simple docstring"""
super().__init__(
unk_token=__magic_name__ , bos_token=__magic_name__ , eos_token=__magic_name__ , pad_token=__magic_name__ , **__magic_name__ , )
with open(__magic_name__ , encoding="utf-8" ) as vocab_handle:
lowerCAmelCase__ = json.load(__magic_name__ )
lowerCAmelCase__ = {v: k for k, v in self.vocab.items()}
@property
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
return len(self.vocab )
def __SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
return dict(self.vocab , **self.added_tokens_encoder )
def __SCREAMING_SNAKE_CASE ( self : Any , __magic_name__ : Dict ):
"""simple docstring"""
lowerCAmelCase__ = []
for s in text:
char_tokens.extend(__magic_name__ )
return char_tokens
def __SCREAMING_SNAKE_CASE ( self : Dict , __magic_name__ : str ):
"""simple docstring"""
return self.vocab.get(__magic_name__ , self.vocab.get(self.unk_token ) )
def __SCREAMING_SNAKE_CASE ( self : int , __magic_name__ : Tuple ):
"""simple docstring"""
return self.decoder.get(__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : str , __magic_name__ : str , __magic_name__ : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(__magic_name__ ):
logger.error("Vocabulary path ({}) should be a directory".format(__magic_name__ ) )
return
lowerCAmelCase__ = os.path.join(
__magic_name__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
with open(__magic_name__ , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.vocab , indent=2 , sort_keys=__magic_name__ , ensure_ascii=__magic_name__ ) + "\n" )
return (vocab_file,)
| 48 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ..models.auto import AutoModelForVisionaSeq
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class A ( SCREAMING_SNAKE_CASE__ ):
snake_case__ :Tuple = 'Salesforce/blip-image-captioning-base'
snake_case__ :List[Any] = (
'This is a tool that generates a description of an image. It takes an input named `image` which should be the '
'image to caption, and returns a text that contains the description in English.'
)
snake_case__ :List[Any] = 'image_captioner'
snake_case__ :Optional[int] = AutoModelForVisionaSeq
snake_case__ :Optional[int] = ['image']
snake_case__ :Any = ['text']
def __init__( self : str , *__magic_name__ : List[str] , **__magic_name__ : Tuple ):
"""simple docstring"""
requires_backends(self , ["vision"] )
super().__init__(*__magic_name__ , **__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __magic_name__ : "Image" ):
"""simple docstring"""
return self.pre_processor(images=__magic_name__ , return_tensors="pt" )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __magic_name__ : Tuple ):
"""simple docstring"""
return self.model.generate(**__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : str , __magic_name__ : Optional[int] ):
"""simple docstring"""
return self.pre_processor.batch_decode(__magic_name__ , skip_special_tokens=__magic_name__ )[0].strip()
| 48 | 1 |
"""simple docstring"""
import argparse
import os
from . import (
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
AlbertConfig,
BartConfig,
BertConfig,
CamembertConfig,
CTRLConfig,
DistilBertConfig,
DPRConfig,
ElectraConfig,
FlaubertConfig,
GPTaConfig,
LayoutLMConfig,
LxmertConfig,
OpenAIGPTConfig,
RobertaConfig,
TaConfig,
TFAlbertForPreTraining,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFCamembertForMaskedLM,
TFCTRLLMHeadModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
TFElectraForPreTraining,
TFFlaubertWithLMHeadModel,
TFGPTaLMHeadModel,
TFLayoutLMForMaskedLM,
TFLxmertForPreTraining,
TFLxmertVisualFeatureEncoder,
TFOpenAIGPTLMHeadModel,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForSequenceClassification,
TFTaForConditionalGeneration,
TFTransfoXLLMHeadModel,
TFWavaVecaModel,
TFXLMRobertaForMaskedLM,
TFXLMWithLMHeadModel,
TFXLNetLMHeadModel,
TransfoXLConfig,
WavaVecaConfig,
WavaVecaModel,
XLMConfig,
XLMRobertaConfig,
XLNetConfig,
is_torch_available,
load_pytorch_checkpoint_in_tfa_model,
)
from .utils import CONFIG_NAME, WEIGHTS_NAME, cached_file, logging
if is_torch_available():
import numpy as np
import torch
from . import (
AlbertForPreTraining,
BartForConditionalGeneration,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
CamembertForMaskedLM,
CTRLLMHeadModel,
DistilBertForMaskedLM,
DistilBertForQuestionAnswering,
DPRContextEncoder,
DPRQuestionEncoder,
DPRReader,
ElectraForPreTraining,
FlaubertWithLMHeadModel,
GPTaLMHeadModel,
LayoutLMForMaskedLM,
LxmertForPreTraining,
LxmertVisualFeatureEncoder,
OpenAIGPTLMHeadModel,
RobertaForMaskedLM,
RobertaForSequenceClassification,
TaForConditionalGeneration,
TransfoXLLMHeadModel,
XLMRobertaForMaskedLM,
XLMWithLMHeadModel,
XLNetLMHeadModel,
)
logging.set_verbosity_info()
UpperCamelCase = {
"""bart""": (
BartConfig,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
BartForConditionalGeneration,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
),
"""bert""": (
BertConfig,
TFBertForPreTraining,
BertForPreTraining,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""bert-large-uncased-whole-word-masking-finetuned-squad""": (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""bert-large-cased-whole-word-masking-finetuned-squad""": (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""bert-base-cased-finetuned-mrpc""": (
BertConfig,
TFBertForSequenceClassification,
BertForSequenceClassification,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""dpr""": (
DPRConfig,
TFDPRQuestionEncoder,
TFDPRContextEncoder,
TFDPRReader,
DPRQuestionEncoder,
DPRContextEncoder,
DPRReader,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
),
"""gpt2""": (
GPTaConfig,
TFGPTaLMHeadModel,
GPTaLMHeadModel,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""xlnet""": (
XLNetConfig,
TFXLNetLMHeadModel,
XLNetLMHeadModel,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""xlm""": (
XLMConfig,
TFXLMWithLMHeadModel,
XLMWithLMHeadModel,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""xlm-roberta""": (
XLMRobertaConfig,
TFXLMRobertaForMaskedLM,
XLMRobertaForMaskedLM,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""transfo-xl""": (
TransfoXLConfig,
TFTransfoXLLMHeadModel,
TransfoXLLMHeadModel,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""openai-gpt""": (
OpenAIGPTConfig,
TFOpenAIGPTLMHeadModel,
OpenAIGPTLMHeadModel,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""roberta""": (
RobertaConfig,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
RobertaForMaskedLM,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""layoutlm""": (
LayoutLMConfig,
TFLayoutLMForMaskedLM,
LayoutLMForMaskedLM,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
),
"""roberta-large-mnli""": (
RobertaConfig,
TFRobertaForSequenceClassification,
RobertaForSequenceClassification,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""camembert""": (
CamembertConfig,
TFCamembertForMaskedLM,
CamembertForMaskedLM,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""flaubert""": (
FlaubertConfig,
TFFlaubertWithLMHeadModel,
FlaubertWithLMHeadModel,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""distilbert""": (
DistilBertConfig,
TFDistilBertForMaskedLM,
DistilBertForMaskedLM,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""distilbert-base-distilled-squad""": (
DistilBertConfig,
TFDistilBertForQuestionAnswering,
DistilBertForQuestionAnswering,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""lxmert""": (
LxmertConfig,
TFLxmertForPreTraining,
LxmertForPreTraining,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""lxmert-visual-feature-encoder""": (
LxmertConfig,
TFLxmertVisualFeatureEncoder,
LxmertVisualFeatureEncoder,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""ctrl""": (
CTRLConfig,
TFCTRLLMHeadModel,
CTRLLMHeadModel,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""albert""": (
AlbertConfig,
TFAlbertForPreTraining,
AlbertForPreTraining,
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""t5""": (
TaConfig,
TFTaForConditionalGeneration,
TaForConditionalGeneration,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""electra""": (
ElectraConfig,
TFElectraForPreTraining,
ElectraForPreTraining,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""wav2vec2""": (
WavaVecaConfig,
TFWavaVecaModel,
WavaVecaModel,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
}
def lowerCAmelCase ( UpperCamelCase_: Union[str, Any] , UpperCamelCase_: Optional[int] , UpperCamelCase_: List[Any] , UpperCamelCase_: Optional[int] , UpperCamelCase_: int=False , UpperCamelCase_: Optional[Any]=True ) -> Optional[int]:
'''simple docstring'''
if model_type not in MODEL_CLASSES:
raise ValueError(f'''Unrecognized model type, should be one of {list(MODEL_CLASSES.keys() )}.''' )
_a , _a , _a , _a = MODEL_CLASSES[model_type]
# Initialise TF model
if config_file in aws_config_map:
_a = cached_file(UpperCamelCase_ , UpperCamelCase_ , force_download=not use_cached_models )
_a = config_class.from_json_file(UpperCamelCase_ )
_a = True
_a = True
print(f'''Building TensorFlow model from configuration: {config}''' )
_a = model_class(UpperCamelCase_ )
# Load weights from tf checkpoint
if pytorch_checkpoint_path in aws_config_map.keys():
_a = cached_file(
UpperCamelCase_ , UpperCamelCase_ , force_download=not use_cached_models )
# Load PyTorch checkpoint in tf2 model:
_a = load_pytorch_checkpoint_in_tfa_model(UpperCamelCase_ , UpperCamelCase_ )
if compare_with_pt_model:
_a = tf_model(tf_model.dummy_inputs , training=UpperCamelCase_ ) # build the network
_a = torch.load(UpperCamelCase_ , map_location="cpu" )
_a = pt_model_class.from_pretrained(
pretrained_model_name_or_path=UpperCamelCase_ , config=UpperCamelCase_ , state_dict=UpperCamelCase_ )
with torch.no_grad():
_a = pt_model(**pt_model.dummy_inputs )
_a = pto[0].numpy()
_a = tfo[0].numpy()
_a = np.amax(np.abs(np_pt - np_tf ) )
print(f'''Max absolute difference between models outputs {diff}''' )
assert diff <= 2E-2, f'''Error, model absolute difference is >2e-2: {diff}'''
# Save pytorch-model
print(f'''Save TensorFlow model to {tf_dump_path}''' )
tf_model.save_weights(UpperCamelCase_ , save_format="h5" )
def lowerCAmelCase ( UpperCamelCase_: Union[str, Any] , UpperCamelCase_: List[str] , UpperCamelCase_: Any=None , UpperCamelCase_: List[Any]=None , UpperCamelCase_: str=False , UpperCamelCase_: List[str]=False , UpperCamelCase_: Optional[int]=False , UpperCamelCase_: Optional[int]=False , ) -> List[str]:
'''simple docstring'''
if args_model_type is None:
_a = list(MODEL_CLASSES.keys() )
else:
_a = [args_model_type]
for j, model_type in enumerate(UpperCamelCase_ , start=1 ):
print("=" * 100 )
print(f''' Converting model type {j}/{len(UpperCamelCase_ )}: {model_type}''' )
print("=" * 100 )
if model_type not in MODEL_CLASSES:
raise ValueError(f'''Unrecognized model type {model_type}, should be one of {list(MODEL_CLASSES.keys() )}.''' )
_a , _a , _a , _a , _a = MODEL_CLASSES[model_type]
if model_shortcut_names_or_path is None:
_a = list(aws_model_maps.keys() )
if config_shortcut_names_or_path is None:
_a = model_shortcut_names_or_path
for i, (model_shortcut_name, config_shortcut_name) in enumerate(
zip(UpperCamelCase_ , UpperCamelCase_ ) , start=1 ):
print("-" * 100 )
if "-squad" in model_shortcut_name or "-mrpc" in model_shortcut_name or "-mnli" in model_shortcut_name:
if not only_convert_finetuned_models:
print(f''' Skipping finetuned checkpoint {model_shortcut_name}''' )
continue
_a = model_shortcut_name
elif only_convert_finetuned_models:
print(f''' Skipping not finetuned checkpoint {model_shortcut_name}''' )
continue
print(
f''' Converting checkpoint {i}/{len(UpperCamelCase_ )}: {model_shortcut_name} - model_type {model_type}''' )
print("-" * 100 )
if config_shortcut_name in aws_config_map:
_a = cached_file(UpperCamelCase_ , UpperCamelCase_ , force_download=not use_cached_models )
else:
_a = config_shortcut_name
if model_shortcut_name in aws_model_maps:
_a = cached_file(UpperCamelCase_ , UpperCamelCase_ , force_download=not use_cached_models )
else:
_a = model_shortcut_name
if os.path.isfile(UpperCamelCase_ ):
_a = "converted_model"
convert_pt_checkpoint_to_tf(
model_type=UpperCamelCase_ , pytorch_checkpoint_path=UpperCamelCase_ , config_file=UpperCamelCase_ , tf_dump_path=os.path.join(UpperCamelCase_ , model_shortcut_name + "-tf_model.h5" ) , compare_with_pt_model=UpperCamelCase_ , )
if remove_cached_files:
os.remove(UpperCamelCase_ )
os.remove(UpperCamelCase_ )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_dump_path""", default=None, type=str, required=True, help="""Path to the output Tensorflow dump file."""
)
parser.add_argument(
"""--model_type""",
default=None,
type=str,
help=(
f"Model type selected in the list of {list(MODEL_CLASSES.keys())}. If not given, will download and "
"""convert all the models from AWS."""
),
)
parser.add_argument(
"""--pytorch_checkpoint_path""",
default=None,
type=str,
help=(
"""Path to the PyTorch checkpoint path or shortcut name to download from AWS. """
"""If not given, will download and convert all the checkpoints from AWS."""
),
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
help=(
"""The config json file corresponding to the pre-trained model. \n"""
"""This specifies the model architecture. If not given and """
"""--pytorch_checkpoint_path is not given or is a shortcut name """
"""use the configuration associated to the shortcut name on the AWS"""
),
)
parser.add_argument(
"""--compare_with_pt_model""", action="""store_true""", help="""Compare Tensorflow and PyTorch model predictions."""
)
parser.add_argument(
"""--use_cached_models""",
action="""store_true""",
help="""Use cached models if possible instead of updating to latest checkpoint versions.""",
)
parser.add_argument(
"""--remove_cached_files""",
action="""store_true""",
help="""Remove pytorch models after conversion (save memory when converting in batches).""",
)
parser.add_argument("""--only_convert_finetuned_models""", action="""store_true""", help="""Only convert finetuned models.""")
UpperCamelCase = parser.parse_args()
# if args.pytorch_checkpoint_path is not None:
# convert_pt_checkpoint_to_tf(args.model_type.lower(),
# args.pytorch_checkpoint_path,
# args.config_file if args.config_file is not None else args.pytorch_checkpoint_path,
# args.tf_dump_path,
# compare_with_pt_model=args.compare_with_pt_model,
# use_cached_models=args.use_cached_models)
# else:
convert_all_pt_checkpoints_to_tf(
args.model_type.lower() if args.model_type is not None else None,
args.tf_dump_path,
model_shortcut_names_or_path=[args.pytorch_checkpoint_path]
if args.pytorch_checkpoint_path is not None
else None,
config_shortcut_names_or_path=[args.config_file] if args.config_file is not None else None,
compare_with_pt_model=args.compare_with_pt_model,
use_cached_models=args.use_cached_models,
remove_cached_files=args.remove_cached_files,
only_convert_finetuned_models=args.only_convert_finetuned_models,
)
| 612 |
"""simple docstring"""
import argparse
import json
import os
from collections import OrderedDict
import numpy as np
import tensorflow as tf
import torch
def lowerCAmelCase ( UpperCamelCase_: Dict ) -> Any:
'''simple docstring'''
_a = os.path.join(args.tf_model_dir , "parameters.json" )
_a = json.loads(open(UpperCamelCase_ ).read() )
if not params:
raise ValueError(
f'''It seems that the json file at {parameter_file} is empty. Make sure you have a correct json file.''' )
if not args.output.endswith(".pt" ):
_a = args.output + ".pt"
_a = OrderedDict()
with tf.device("/CPU:0" ):
_a = tf.train.load_checkpoint(args.tf_model_dir )
_a = reader.get_variable_to_shape_map()
for key_name in shapes.keys():
_a = reader.get_tensor(UpperCamelCase_ ).astype(np.floataa )
if key_name.endswith("/adam_m" ) or key_name.endswith("/adam_v" ):
continue
if key_name.startswith("pasts/" ):
if key_name.startswith("pasts/mlp" ):
_a = int(key_name[9] )
elif key_name.startswith("pasts/out" ):
_a = 8
_a = "model.sqout.%d.weight" % (player * 2) # enter to nn.Sequencial with Tanh, so 2 at a time
_a = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_a = torch.tensor(UpperCamelCase_ )
elif key_name.startswith("model/moe" ):
_a = int(key_name[9:].split("/" )[0] )
if key_name.endswith("/switch_gating/kernel" ):
_a = "model.blocks.%d.feed_forward.mlp.router.classifier.weight" % player
_a = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_a = torch.tensor(UpperCamelCase_ )
elif key_name.endswith("/softmlp/kernel" ):
_a = "model.blocks.%d.feed_forward.soft_bypass_mlp.weight" % player
_a = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_a = torch.tensor(UpperCamelCase_ )
elif key_name.endswith("/wo/kernel" ) or key_name.endswith("/wi/kernel" ):
_a = key_name[-9:-7]
for i in range(16 ):
_a = "model.blocks.%d.feed_forward.mlp.experts.expert_%d.%s.weight" % (player, i, nlayer)
_a = (
vnp[i].transpose([1, 0] ).copy()
) # In Mesh-Tensorflow, it is one array, so it is divided
_a = torch.tensor(UpperCamelCase_ )
elif key_name.startswith("model/mlp" ):
_a = int(key_name[9:].split("/" )[0] )
if key_name.endswith("/p1/kernel" ):
_a = "model.blocks.%d.feed_forward.mlp.wi.weight" % player
_a = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_a = torch.tensor(UpperCamelCase_ )
elif key_name.endswith("/p1/bias" ):
_a = "model.blocks.%d.feed_forward.mlp.wi.bias" % player
_a = vnp.copy() # same because it is one dimensional
_a = torch.tensor(UpperCamelCase_ )
elif key_name.endswith("/p2/kernel" ):
_a = "model.blocks.%d.feed_forward.mlp.wo.weight" % player
_a = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_a = torch.tensor(UpperCamelCase_ )
elif key_name.endswith("/p2/bias" ):
_a = "model.blocks.%d.feed_forward.mlp.wo.bias" % player
_a = vnp.copy() # same because it is one dimensional
_a = torch.tensor(UpperCamelCase_ )
elif key_name.startswith("model/ln" ):
_a = int(key_name[8:].split("/" )[0] )
if key_name.endswith("/b" ):
_a = "model.blocks.%d.feed_forward.norm.bias" % player
_a = vnp.copy() # same because it is one dimensional
_a = torch.tensor(UpperCamelCase_ )
elif key_name.endswith("/g" ):
_a = "model.blocks.%d.feed_forward.norm.weight" % player
_a = vnp.copy() # same because it is one dimensional
_a = torch.tensor(UpperCamelCase_ )
elif key_name.startswith("model/att" ):
_a = int(key_name[9:].split("/" )[0] )
if key_name.endswith("/qkv/kernel" ):
_a = vnp.copy() # Compute same dimension as Mesh-tensorflow using einsum
_a = state[:, 0, :, :]
_a = state[:, 1, :, :]
_a = state[:, 2, :, :]
_a = (
state_q.reshape([state_q.shape[0], state_q.shape[1] * state_q.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
_a = (
state_k.reshape([state_k.shape[0], state_k.shape[1] * state_k.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
_a = (
state_v.reshape([state_v.shape[0], state_v.shape[1] * state_v.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
_a = "model.blocks.%d.self_attn.self_attn.q_proj.weight" % player
_a = torch.tensor(UpperCamelCase_ )
_a = "model.blocks.%d.self_attn.self_attn.k_proj.weight" % player
_a = torch.tensor(UpperCamelCase_ )
_a = "model.blocks.%d.self_attn.self_attn.v_proj.weight" % player
_a = torch.tensor(UpperCamelCase_ )
elif key_name.endswith("/o/kernel" ):
_a = "model.blocks.%d.self_attn.self_attn.out_proj.weight" % player
_a = (
vnp.reshape([vnp.shape[0] * vnp.shape[1], vnp.shape[2]] ).transpose([1, 0] ).copy()
) # Mesh-Tensorflow is a diagonal matrix
_a = torch.tensor(UpperCamelCase_ )
elif key_name.startswith("model/an" ):
_a = int(key_name[8:].split("/" )[0] )
if key_name.endswith("/b" ):
_a = "model.blocks.%d.self_attn.norm.bias" % player
_a = vnp.copy() # same because it is one dimensional
_a = torch.tensor(UpperCamelCase_ )
elif key_name.endswith("/g" ):
_a = "model.blocks.%d.self_attn.norm.weight" % player
_a = vnp.copy() # same because it is one dimensional
_a = torch.tensor(UpperCamelCase_ )
elif (
key_name.startswith("model/wte" )
or key_name.startswith("model/wpe" )
or key_name.startswith("model/ete" )
):
_a = {"wte": "embed_tokens", "wpe": "position_embeddings", "ete": "extra_position_embeddings"}[
key_name[-3:]
]
_a = "model.%s.weight" % nlayer
_a = vnp.copy() # same in embedded
_a = torch.tensor(UpperCamelCase_ )
if key_name.startswith("model/wte" ):
_a = "lm_head.weight"
_a = vnp.copy() # same in embedded
_a = torch.tensor(UpperCamelCase_ )
elif key_name.startswith("model/wob" ):
_a = "final_logits_bias"
_a = vnp.copy() # same in embedded
_a = state.reshape((1, -1) )
_a = torch.tensor(UpperCamelCase_ )
elif key_name == "model/dense/kernel":
_a = "model.last_project.weight"
_a = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_a = torch.tensor(UpperCamelCase_ )
elif key_name == "model/dense_1/bias":
_a = "model.last_project.bias"
_a = vnp.copy() # same because it is one dimensional
_a = torch.tensor(UpperCamelCase_ )
torch.save(UpperCamelCase_ , args.output )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser(
description="""model converter.""", formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument("""--tf_model_dir""", metavar="""PATH""", type=str, required=True, help="""import model""")
parser.add_argument("""--output""", metavar="""PATH""", type=str, required=True, help="""output model""")
UpperCamelCase = parser.parse_args()
convert_tf_gptsan_to_pt(args)
| 612 | 1 |
'''simple docstring'''
from __future__ import annotations
_a : Optional[int] = list[tuple[int, int]]
_a : Union[str, Any] = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
_a : Optional[int] = ([-1, 0], [0, -1], [1, 0], [0, 1]) # up, left, down, right
class _UpperCAmelCase :
def __init__( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,):
'''simple docstring'''
__lowerCAmelCase = pos_x
__lowerCAmelCase = pos_y
__lowerCAmelCase = (pos_y, pos_x)
__lowerCAmelCase = goal_x
__lowerCAmelCase = goal_y
__lowerCAmelCase = g_cost
__lowerCAmelCase = parent
__lowerCAmelCase = self.calculate_heuristic()
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = abs(self.pos_x - self.goal_x )
__lowerCAmelCase = abs(self.pos_y - self.goal_y )
return dx + dy
def __lt__( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
return self.f_cost < other.f_cost
class _UpperCAmelCase :
def __init__( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = Node(start[1],start[0],goal[1],goal[0],0,__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = Node(goal[1],goal[0],goal[1],goal[0],9_99_99,__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = [self.start]
__lowerCAmelCase = []
__lowerCAmelCase = False
def lowerCamelCase__ ( self ):
'''simple docstring'''
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
__lowerCAmelCase = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
__lowerCAmelCase = True
return self.retrace_path(__SCREAMING_SNAKE_CASE )
self.closed_nodes.append(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = self.get_successors(__SCREAMING_SNAKE_CASE )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(__SCREAMING_SNAKE_CASE )
else:
# retrieve the best current path
__lowerCAmelCase = self.open_nodes.pop(self.open_nodes.index(__SCREAMING_SNAKE_CASE ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(__SCREAMING_SNAKE_CASE )
else:
self.open_nodes.append(__SCREAMING_SNAKE_CASE )
if not self.reached:
return [self.start.pos]
return None
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = []
for action in delta:
__lowerCAmelCase = parent.pos_x + action[1]
__lowerCAmelCase = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(__SCREAMING_SNAKE_CASE ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,self.target.pos_y,self.target.pos_x,parent.g_cost + 1,__SCREAMING_SNAKE_CASE,) )
return successors
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = node
__lowerCAmelCase = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
__lowerCAmelCase = current_node.parent
path.reverse()
return path
if __name__ == "__main__":
_a : List[Any] = (0, 0)
_a : Dict = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
print("""------""")
_a : int = GreedyBestFirst(init, goal)
_a : Optional[Any] = greedy_bf.search()
if path:
for pos_x, pos_y in path:
_a : List[Any] = 2
for elem in grid:
print(elem)
| 689 |
'''simple docstring'''
import string
from math import logaa
def a_ ( UpperCamelCase_ , UpperCamelCase_ ):
A_ = document.translate(
str.maketrans("" , "" , string.punctuation ) ).replace("\n" , "" )
A_ = document_without_punctuation.split(" " ) # word tokenization
return len([word for word in tokenize_document if word.lower() == term.lower()] )
def a_ ( UpperCamelCase_ , UpperCamelCase_ ):
A_ = corpus.lower().translate(
str.maketrans("" , "" , string.punctuation ) ) # strip all punctuation and replace it with ''
A_ = corpus_without_punctuation.split("\n" )
A_ = term.lower()
return (len([doc for doc in docs if term in doc] ), len(UpperCamelCase_ ))
def a_ ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=False ):
if smoothing:
if n == 0:
raise ValueError("log10(0) is undefined." )
return round(1 + logaa(n / (1 + df) ) , 3 )
if df == 0:
raise ZeroDivisionError("df must be > 0" )
elif n == 0:
raise ValueError("log10(0) is undefined." )
return round(logaa(n / df ) , 3 )
def a_ ( UpperCamelCase_ , UpperCamelCase_ ):
return round(tf * idf , 3 )
| 452 | 0 |
import os
from pathlib import Path
from unittest.mock import patch
import pytest
import zstandard as zstd
from datasets.download.download_config import DownloadConfig
from datasets.utils.file_utils import (
OfflineModeIsEnabled,
cached_path,
fsspec_get,
fsspec_head,
ftp_get,
ftp_head,
get_from_cache,
http_get,
http_head,
)
__UpperCamelCase = '\\n Text data.\n Second line of data.'
__UpperCamelCase = 'file'
@pytest.fixture(scope="session" )
def UpperCamelCase_( _A :int )-> Optional[Any]:
UpperCamelCase__ = tmp_path_factory.mktemp("data" ) / (FILE_PATH + ".zstd")
UpperCamelCase__ = bytes(_A , "utf-8" )
with zstd.open(_A , "wb" ) as f:
f.write(_A )
return path
@pytest.fixture
def UpperCamelCase_( _A :int )-> Optional[Any]:
with open(os.path.join(tmpfs.local_root_dir , _A ) , "w" ) as f:
f.write(_A )
return FILE_PATH
@pytest.mark.parametrize("compression_format" , ["gzip", "xz", "zstd"] )
def UpperCamelCase_( _A :List[str] , _A :Optional[int] , _A :Optional[Any] , _A :Dict , _A :Union[str, Any] , _A :str )-> Union[str, Any]:
UpperCamelCase__ = {"gzip": gz_file, "xz": xz_file, "zstd": zstd_path}
UpperCamelCase__ = input_paths[compression_format]
UpperCamelCase__ = tmp_path / "cache"
UpperCamelCase__ = DownloadConfig(cache_dir=_A , extract_compressed_file=_A )
UpperCamelCase__ = cached_path(_A , download_config=_A )
with open(_A ) as f:
UpperCamelCase__ = f.read()
with open(_A ) as f:
UpperCamelCase__ = f.read()
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize("default_extracted" , [True, False] )
@pytest.mark.parametrize("default_cache_dir" , [True, False] )
def UpperCamelCase_( _A :int , _A :Dict , _A :Tuple , _A :Dict , _A :Union[str, Any] )-> List[str]:
UpperCamelCase__ = "custom_cache"
UpperCamelCase__ = "custom_extracted_dir"
UpperCamelCase__ = tmp_path / "custom_extracted_path"
if default_extracted:
UpperCamelCase__ = ("downloads" if default_cache_dir else custom_cache_dir, "extracted")
else:
monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_DIR" , _A )
monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_PATH" , str(_A ) )
UpperCamelCase__ = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir)
UpperCamelCase__ = xz_file
UpperCamelCase__ = (
DownloadConfig(extract_compressed_file=_A )
if default_cache_dir
else DownloadConfig(cache_dir=tmp_path / custom_cache_dir , extract_compressed_file=_A )
)
UpperCamelCase__ = cached_path(_A , download_config=_A )
assert Path(_A ).parent.parts[-2:] == expected
def UpperCamelCase_( _A :Any )-> Optional[Any]:
# absolute path
UpperCamelCase__ = str(Path(_A ).resolve() )
assert cached_path(_A ) == text_file
# relative path
UpperCamelCase__ = str(Path(_A ).resolve().relative_to(Path(os.getcwd() ) ) )
assert cached_path(_A ) == text_file
def UpperCamelCase_( _A :List[str] )-> Optional[Any]:
# absolute path
UpperCamelCase__ = str(tmp_path.resolve() / "__missing_file__.txt" )
with pytest.raises(_A ):
cached_path(_A )
# relative path
UpperCamelCase__ = "./__missing_file__.txt"
with pytest.raises(_A ):
cached_path(_A )
def UpperCamelCase_( _A :Optional[Any] )-> Any:
UpperCamelCase__ = get_from_cache(F'''tmp://{tmpfs_file}''' )
with open(_A ) as f:
UpperCamelCase__ = f.read()
assert output_file_content == FILE_CONTENT
@patch("datasets.config.HF_DATASETS_OFFLINE" , _A )
def UpperCamelCase_( )-> Optional[int]:
with pytest.raises(_A ):
cached_path("https://huggingface.co" )
@patch("datasets.config.HF_DATASETS_OFFLINE" , _A )
def UpperCamelCase_( _A :List[Any] )-> Optional[int]:
UpperCamelCase__ = tmp_path_factory.mktemp("data" ) / "file.html"
with pytest.raises(_A ):
http_get("https://huggingface.co" , temp_file=_A )
with pytest.raises(_A ):
http_head("https://huggingface.co" )
@patch("datasets.config.HF_DATASETS_OFFLINE" , _A )
def UpperCamelCase_( _A :Optional[Any] )-> List[Any]:
UpperCamelCase__ = tmp_path_factory.mktemp("data" ) / "file.html"
with pytest.raises(_A ):
ftp_get("ftp://huggingface.co" , temp_file=_A )
with pytest.raises(_A ):
ftp_head("ftp://huggingface.co" )
@patch("datasets.config.HF_DATASETS_OFFLINE" , _A )
def UpperCamelCase_( _A :str )-> Any:
UpperCamelCase__ = tmp_path_factory.mktemp("data" ) / "file.html"
with pytest.raises(_A ):
fsspec_get("s3://huggingface.co" , temp_file=_A )
with pytest.raises(_A ):
fsspec_head("s3://huggingface.co" )
| 185 |
import math
import os
import unittest
from transformers import MegatronBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
)
class lowerCamelCase__ :
"""simple docstring"""
def __init__( self , snake_case , snake_case=13 , snake_case=7 , snake_case=True , snake_case=True , snake_case=True , snake_case=True , snake_case=99 , snake_case=64 , snake_case=32 , snake_case=5 , snake_case=4 , snake_case=37 , snake_case="gelu" , snake_case=0.1 , snake_case=0.1 , snake_case=512 , snake_case=16 , snake_case=2 , snake_case=0.02 , snake_case=3 , snake_case=4 , snake_case=None , ):
'''simple docstring'''
UpperCamelCase__ = parent
UpperCamelCase__ = batch_size
UpperCamelCase__ = seq_length
UpperCamelCase__ = is_training
UpperCamelCase__ = use_input_mask
UpperCamelCase__ = use_token_type_ids
UpperCamelCase__ = use_labels
UpperCamelCase__ = vocab_size
UpperCamelCase__ = hidden_size
UpperCamelCase__ = embedding_size
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = intermediate_size
UpperCamelCase__ = hidden_act
UpperCamelCase__ = hidden_dropout_prob
UpperCamelCase__ = attention_probs_dropout_prob
UpperCamelCase__ = max_position_embeddings
UpperCamelCase__ = type_vocab_size
UpperCamelCase__ = type_sequence_label_size
UpperCamelCase__ = initializer_range
UpperCamelCase__ = num_labels
UpperCamelCase__ = num_choices
UpperCamelCase__ = scope
def snake_case__ ( self ):
'''simple docstring'''
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase__ = None
if self.use_input_mask:
UpperCamelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase__ = None
if self.use_token_type_ids:
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCamelCase__ = None
UpperCamelCase__ = None
UpperCamelCase__ = None
if self.use_labels:
UpperCamelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase__ = ids_tensor([self.batch_size] , self.num_choices )
UpperCamelCase__ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case__ ( self ):
'''simple docstring'''
return MegatronBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case , initializer_range=self.initializer_range , )
def snake_case__ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ):
'''simple docstring'''
UpperCamelCase__ = MegatronBertModel(config=snake_case )
model.to(snake_case )
model.eval()
UpperCamelCase__ = model(snake_case , attention_mask=snake_case , token_type_ids=snake_case )
UpperCamelCase__ = model(snake_case , token_type_ids=snake_case )
UpperCamelCase__ = model(snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def snake_case__ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ):
'''simple docstring'''
UpperCamelCase__ = MegatronBertForMaskedLM(config=snake_case )
model.to(snake_case )
model.eval()
UpperCamelCase__ = model(snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case__ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ):
'''simple docstring'''
UpperCamelCase__ = MegatronBertForCausalLM(config=snake_case )
model.to(snake_case )
model.eval()
UpperCamelCase__ = model(snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case__ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ):
'''simple docstring'''
UpperCamelCase__ = MegatronBertForNextSentencePrediction(config=snake_case )
model.to(snake_case )
model.eval()
UpperCamelCase__ = model(
snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def snake_case__ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ):
'''simple docstring'''
UpperCamelCase__ = MegatronBertForPreTraining(config=snake_case )
model.to(snake_case )
model.eval()
UpperCamelCase__ = model(
snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case , next_sentence_label=snake_case , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def snake_case__ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ):
'''simple docstring'''
UpperCamelCase__ = MegatronBertForQuestionAnswering(config=snake_case )
model.to(snake_case )
model.eval()
UpperCamelCase__ = model(
snake_case , attention_mask=snake_case , token_type_ids=snake_case , start_positions=snake_case , end_positions=snake_case , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def snake_case__ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ):
'''simple docstring'''
UpperCamelCase__ = self.num_labels
UpperCamelCase__ = MegatronBertForSequenceClassification(snake_case )
model.to(snake_case )
model.eval()
UpperCamelCase__ = model(snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case__ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ):
'''simple docstring'''
UpperCamelCase__ = self.num_labels
UpperCamelCase__ = MegatronBertForTokenClassification(config=snake_case )
model.to(snake_case )
model.eval()
UpperCamelCase__ = model(snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def snake_case__ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ):
'''simple docstring'''
UpperCamelCase__ = self.num_choices
UpperCamelCase__ = MegatronBertForMultipleChoice(config=snake_case )
model.to(snake_case )
model.eval()
UpperCamelCase__ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase__ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase__ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase__ = model(
snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def snake_case__ ( self ):
'''simple docstring'''
UpperCamelCase__ = self.prepare_config_and_inputs()
(
(
UpperCamelCase__
), (
UpperCamelCase__
), (
UpperCamelCase__
), (
UpperCamelCase__
), (
UpperCamelCase__
), (
UpperCamelCase__
), (
UpperCamelCase__
),
) = config_and_inputs
UpperCamelCase__ = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class lowerCamelCase__ ( UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
_UpperCamelCase : int = (
(
MegatronBertModel,
MegatronBertForMaskedLM,
MegatronBertForCausalLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
)
if is_torch_available()
else ()
)
_UpperCamelCase : int = (
{
'feature-extraction': MegatronBertModel,
'fill-mask': MegatronBertForMaskedLM,
'question-answering': MegatronBertForQuestionAnswering,
'text-classification': MegatronBertForSequenceClassification,
'text-generation': MegatronBertForCausalLM,
'token-classification': MegatronBertForTokenClassification,
'zero-shot': MegatronBertForSequenceClassification,
}
if is_torch_available()
else {}
)
_UpperCamelCase : Optional[Any] = True
# test_resize_embeddings = False
_UpperCamelCase : List[Any] = False
def snake_case__ ( self , snake_case , snake_case , snake_case=False ):
'''simple docstring'''
UpperCamelCase__ = super()._prepare_for_class(snake_case , snake_case , return_labels=snake_case )
if return_labels:
if model_class in get_values(snake_case ):
UpperCamelCase__ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=snake_case )
UpperCamelCase__ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=snake_case )
return inputs_dict
def snake_case__ ( self ):
'''simple docstring'''
UpperCamelCase__ = MegatronBertModelTester(self )
UpperCamelCase__ = ConfigTester(self , config_class=snake_case , hidden_size=37 )
def snake_case__ ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def snake_case__ ( self ):
'''simple docstring'''
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_model(*snake_case )
def snake_case__ ( self ):
'''simple docstring'''
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_masked_lm(*snake_case )
def snake_case__ ( self ):
'''simple docstring'''
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_multiple_choice(*snake_case )
def snake_case__ ( self ):
'''simple docstring'''
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_next_sequence_prediction(*snake_case )
def snake_case__ ( self ):
'''simple docstring'''
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_pretraining(*snake_case )
def snake_case__ ( self ):
'''simple docstring'''
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_question_answering(*snake_case )
def snake_case__ ( self ):
'''simple docstring'''
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_sequence_classification(*snake_case )
def snake_case__ ( self ):
'''simple docstring'''
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_token_classification(*snake_case )
def UpperCamelCase_( _A :List[Any] )-> Optional[Any]:
return torch.tensor(
_A , dtype=torch.long , device=_A , )
__UpperCamelCase = 1E-4
@require_torch
@require_sentencepiece
@require_tokenizers
class lowerCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
@slow
@unittest.skip("Model is not available." )
def snake_case__ ( self ):
'''simple docstring'''
UpperCamelCase__ = "nvidia/megatron-bert-uncased-345m"
if "MYDIR" in os.environ:
UpperCamelCase__ = os.path.join(os.environ["MYDIR"] , snake_case )
UpperCamelCase__ = MegatronBertModel.from_pretrained(snake_case )
model.to(snake_case )
model.half()
UpperCamelCase__ = _long_tensor([[101, 7110, 1005, 1056, 2023, 11333, 17413, 1029, 102]] )
with torch.no_grad():
UpperCamelCase__ = model(snake_case )[0]
UpperCamelCase__ = torch.Size((1, 9, 1024) )
self.assertEqual(output.shape , snake_case )
UpperCamelCase__ = [-0.6040, -0.2517, -0.1025, 0.3420, -0.6758, -0.0017, -0.1089, -0.1990, 0.5728]
for ii in range(3 ):
for jj in range(3 ):
UpperCamelCase__ = output[0, ii, jj]
UpperCamelCase__ = expected[3 * ii + jj]
UpperCamelCase__ = "ii={} jj={} a={} b={}".format(snake_case , snake_case , snake_case , snake_case )
self.assertTrue(math.isclose(snake_case , snake_case , rel_tol=snake_case , abs_tol=snake_case ) , msg=snake_case )
| 185 | 1 |
'''simple docstring'''
import baseaa
import io
import json
import os
from copy import deepcopy
from ..optimizer import AcceleratedOptimizer
from ..scheduler import AcceleratedScheduler
class lowerCamelCase__:
def __init__( self , __UpperCAmelCase ):
"""simple docstring"""
if isinstance(__A , __A ):
# Don't modify user's data should they want to reuse it (e.g. in tests), because once we
# modified it, it will not be accepted here again, since `auto` values would have been overridden
__lowercase = deepcopy(__A )
elif os.path.exists(__A ):
with io.open(__A , """r""" , encoding="""utf-8""" ) as f:
__lowercase = json.load(__A )
else:
try:
__lowercase = baseaa.urlsafe_baadecode(__A ).decode("""utf-8""" )
__lowercase = json.loads(__A )
except (UnicodeDecodeError, AttributeError, ValueError):
raise ValueError(
F'''Expected a string path to an existing deepspeed config, or a dictionary, or a base64 encoded string. Received: {config_file_or_dict}''' )
__lowercase = config
self.set_stage_and_offload()
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = self.get_value("""zero_optimization.stage""" , -1 )
# offload
__lowercase = False
if self.is_zeroa() or self.is_zeroa():
__lowercase = set(["""cpu""", """nvme"""] )
__lowercase = set(
[
self.get_value("""zero_optimization.offload_optimizer.device""" ),
self.get_value("""zero_optimization.offload_param.device""" ),
] )
if len(offload_devices & offload_devices_valid ) > 0:
__lowercase = True
def __magic_name__ ( self , __UpperCAmelCase ):
"""simple docstring"""
__lowercase = self.config
# find the config node of interest if it exists
__lowercase = ds_key_long.split(""".""" )
__lowercase = nodes.pop()
for node in nodes:
__lowercase = config.get(__A )
if config is None:
return None, ds_key
return config, ds_key
def __magic_name__ ( self , __UpperCAmelCase , __UpperCAmelCase=None ):
"""simple docstring"""
__lowercase , __lowercase = self.find_config_node(__A )
if config is None:
return default
return config.get(__A , __A )
def __magic_name__ ( self , __UpperCAmelCase , __UpperCAmelCase=False ):
"""simple docstring"""
__lowercase = self.config
# find the config node of interest if it exists
__lowercase = ds_key_long.split(""".""" )
for node in nodes:
__lowercase = config
__lowercase = config.get(__A )
if config is None:
if must_exist:
raise ValueError(F'''Can\'t find {ds_key_long} entry in the config: {self.config}''' )
else:
return
# if found remove it
if parent_config is not None:
parent_config.pop(__A )
def __magic_name__ ( self , __UpperCAmelCase ):
"""simple docstring"""
__lowercase = self.get_value(__A )
return False if value is None else bool(__A )
def __magic_name__ ( self , __UpperCAmelCase ):
"""simple docstring"""
__lowercase = self.get_value(__A )
return False if value is None else not bool(__A )
def __magic_name__ ( self ):
"""simple docstring"""
return self._stage == 2
def __magic_name__ ( self ):
"""simple docstring"""
return self._stage == 3
def __magic_name__ ( self ):
"""simple docstring"""
return self._offload
class lowerCamelCase__:
def __init__( self , __UpperCAmelCase ):
"""simple docstring"""
__lowercase = engine
def __magic_name__ ( self , __UpperCAmelCase , **__UpperCAmelCase ):
"""simple docstring"""
self.engine.backward(__A , **__A )
# Deepspeed's `engine.step` performs the following operations:
# - gradient accumulation check
# - gradient clipping
# - optimizer step
# - zero grad
# - checking overflow
# - lr_scheduler step (only if engine.lr_scheduler is not None)
self.engine.step()
# and this plugin overrides the above calls with no-ops when Accelerate runs under
# Deepspeed, but allows normal functionality for non-Deepspeed cases thus enabling a simple
# training loop that works transparently under many training regimes.
class lowerCamelCase__( lowerCamelCase__ ):
def __init__( self , __UpperCAmelCase ):
"""simple docstring"""
super().__init__(__A , device_placement=__A , scaler=__A )
__lowercase = hasattr(self.optimizer , """overflow""" )
def __magic_name__ ( self , __UpperCAmelCase=None ):
"""simple docstring"""
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
def __magic_name__ ( self ):
"""simple docstring"""
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
@property
def __magic_name__ ( self ):
"""simple docstring"""
if self.__has_overflow__:
return self.optimizer.overflow
return False
class lowerCamelCase__( lowerCamelCase__ ):
def __init__( self , __UpperCAmelCase , __UpperCAmelCase ):
"""simple docstring"""
super().__init__(__A , __A )
def __magic_name__ ( self ):
"""simple docstring"""
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
class lowerCamelCase__:
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=0.0_01 , __UpperCAmelCase=0 , **__UpperCAmelCase ):
"""simple docstring"""
__lowercase = params
__lowercase = lr
__lowercase = weight_decay
__lowercase = kwargs
class lowerCamelCase__:
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=0 , **__UpperCAmelCase ):
"""simple docstring"""
__lowercase = optimizer
__lowercase = total_num_steps
__lowercase = warmup_num_steps
__lowercase = kwargs
| 566 |
'''simple docstring'''
from typing import Any
def A__ ( A_ ) -> list[Any]:
if not input_list:
return []
_lowercase = [input_list.count(A_ ) for value in input_list]
_lowercase = max(A_ ) # Gets the maximum count in the input list.
# Gets values of modes
return sorted({input_list[i] for i, value in enumerate(A_ ) if value == y} )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 497 | 0 |
def _lowercase ( SCREAMING_SNAKE_CASE_ : list[int] , SCREAMING_SNAKE_CASE_ : list[int] , SCREAMING_SNAKE_CASE_ : int ):
"""simple docstring"""
return not any(
neighbour == 1 and colored_vertices[i] == color
for i, neighbour in enumerate(UpperCAmelCase__ ) )
def _lowercase ( SCREAMING_SNAKE_CASE_ : list[list[int]] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : list[int] , SCREAMING_SNAKE_CASE_ : int ):
"""simple docstring"""
if index == len(UpperCAmelCase__ ):
return True
# Recursive Step
for i in range(UpperCAmelCase__ ):
if valid_coloring(graph[index] , UpperCAmelCase__ , UpperCAmelCase__ ):
# Color current vertex
UpperCamelCase = i
# Validate coloring
if util_color(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , index + 1 ):
return True
# Backtrack
UpperCamelCase = -1
return False
def _lowercase ( SCREAMING_SNAKE_CASE_ : list[list[int]] , SCREAMING_SNAKE_CASE_ : int ):
"""simple docstring"""
UpperCamelCase = [-1] * len(UpperCAmelCase__ )
if util_color(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , 0 ):
return colored_vertices
return []
| 708 |
def _lowercase ( SCREAMING_SNAKE_CASE_ : int ):
"""simple docstring"""
assert (
isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) and number_of_steps > 0
), f'number_of_steps needs to be positive integer, your input {number_of_steps}'
if number_of_steps == 1:
return 1
UpperCamelCase , UpperCamelCase = 1, 1
for _ in range(number_of_steps - 1 ):
UpperCamelCase , UpperCamelCase = current + previous, current
return current
if __name__ == "__main__":
import doctest
doctest.testmod()
| 181 | 0 |
'''simple docstring'''
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from transformers.modeling_outputs import BaseModelOutput
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
__a: int = logging.get_logger(__name__)
@add_end_docstrings(lowerCamelCase__ )
class UpperCAmelCase ( lowerCamelCase__ ):
'''simple docstring'''
def __init__( self , **__lowerCAmelCase ) -> Optional[Any]:
super().__init__(**lowercase__ )
if self.framework == "tf":
raise ValueError(F"""The {self.__class__} is only available in PyTorch.""" )
requires_backends(self , '''vision''' )
self.check_model_type(lowercase__ )
def __call__( self , __lowerCAmelCase , __lowerCAmelCase = None , **__lowerCAmelCase , ) -> str:
if "text_queries" in kwargs:
lowercase__ : Tuple = kwargs.pop('''text_queries''' )
if isinstance(lowercase__ , (str, Image.Image) ):
lowercase__ : int = {'''image''': image, '''candidate_labels''': candidate_labels}
else:
lowercase__ : List[Any] = image
lowercase__ : Optional[int] = super().__call__(lowercase__ , **lowercase__ )
return results
def _lowerCAmelCase( self , **__lowerCAmelCase ) -> Optional[Any]:
lowercase__ : str = {}
if "threshold" in kwargs:
lowercase__ : Optional[Any] = kwargs['''threshold''']
if "top_k" in kwargs:
lowercase__ : List[Any] = kwargs['''top_k''']
return {}, {}, postprocess_params
def _lowerCAmelCase( self , __lowerCAmelCase ) -> int:
lowercase__ : Optional[int] = load_image(inputs['''image'''] )
lowercase__ : Optional[int] = inputs['''candidate_labels''']
if isinstance(lowercase__ , lowercase__ ):
lowercase__ : int = candidate_labels.split(''',''' )
lowercase__ : List[Any] = torch.tensor([[image.height, image.width]] , dtype=torch.intaa )
for i, candidate_label in enumerate(lowercase__ ):
lowercase__ : List[Any] = self.tokenizer(lowercase__ , return_tensors=self.framework )
lowercase__ : Optional[int] = self.image_processor(lowercase__ , return_tensors=self.framework )
yield {
"is_last": i == len(lowercase__ ) - 1,
"target_size": target_size,
"candidate_label": candidate_label,
**text_inputs,
**image_features,
}
def _lowerCAmelCase( self , __lowerCAmelCase ) -> Dict:
lowercase__ : Optional[Any] = model_inputs.pop('''target_size''' )
lowercase__ : Tuple = model_inputs.pop('''candidate_label''' )
lowercase__ : Any = model_inputs.pop('''is_last''' )
lowercase__ : str = self.model(**lowercase__ )
lowercase__ : List[str] = {'''target_size''': target_size, '''candidate_label''': candidate_label, '''is_last''': is_last, **outputs}
return model_outputs
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase=0.1 , __lowerCAmelCase=None ) -> Union[str, Any]:
lowercase__ : Union[str, Any] = []
for model_output in model_outputs:
lowercase__ : Union[str, Any] = model_output['''candidate_label''']
lowercase__ : Union[str, Any] = BaseModelOutput(lowercase__ )
lowercase__ : Optional[Any] = self.image_processor.post_process_object_detection(
outputs=lowercase__ , threshold=lowercase__ , target_sizes=model_output['''target_size'''] )[0]
for index in outputs["scores"].nonzero():
lowercase__ : int = outputs['''scores'''][index].item()
lowercase__ : str = self._get_bounding_box(outputs['''boxes'''][index][0] )
lowercase__ : Optional[Any] = {'''score''': score, '''label''': label, '''box''': box}
results.append(lowercase__ )
lowercase__ : List[str] = sorted(lowercase__ , key=lambda __lowerCAmelCase : x["score"] , reverse=lowercase__ )
if top_k:
lowercase__ : Union[str, Any] = results[:top_k]
return results
def _lowerCAmelCase( self , __lowerCAmelCase ) -> Union[str, Any]:
if self.framework != "pt":
raise ValueError('''The ZeroShotObjectDetectionPipeline is only available in PyTorch.''' )
lowercase__ , lowercase__ , lowercase__ , lowercase__ : Dict = box.int().tolist()
lowercase__ : Union[str, Any] = {
'''xmin''': xmin,
'''ymin''': ymin,
'''xmax''': xmax,
'''ymax''': ymax,
}
return bbox
| 152 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json''',
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = 'blenderbot-small'
SCREAMING_SNAKE_CASE : int = ['past_key_values']
SCREAMING_SNAKE_CASE : List[str] = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self : Optional[int] ,lowercase__ : List[str]=5_0_2_6_5 ,lowercase__ : Optional[Any]=5_1_2 ,lowercase__ : Optional[int]=8 ,lowercase__ : List[Any]=2_0_4_8 ,lowercase__ : List[str]=1_6 ,lowercase__ : str=8 ,lowercase__ : Any=2_0_4_8 ,lowercase__ : Tuple=1_6 ,lowercase__ : Tuple=0.0 ,lowercase__ : List[str]=0.0 ,lowercase__ : Any=True ,lowercase__ : str=True ,lowercase__ : int="gelu" ,lowercase__ : Tuple=5_1_2 ,lowercase__ : List[Any]=0.1 ,lowercase__ : Tuple=0.0 ,lowercase__ : str=0.0 ,lowercase__ : Any=0.0_2 ,lowercase__ : Union[str, Any]=1 ,lowercase__ : List[Any]=False ,lowercase__ : Optional[int]=0 ,lowercase__ : Optional[int]=1 ,lowercase__ : str=2 ,lowercase__ : int=2 ,**lowercase__ : List[str] ,):
__lowercase = vocab_size
__lowercase = max_position_embeddings
__lowercase = d_model
__lowercase = encoder_ffn_dim
__lowercase = encoder_layers
__lowercase = encoder_attention_heads
__lowercase = decoder_ffn_dim
__lowercase = decoder_layers
__lowercase = decoder_attention_heads
__lowercase = dropout
__lowercase = attention_dropout
__lowercase = activation_dropout
__lowercase = activation_function
__lowercase = init_std
__lowercase = encoder_layerdrop
__lowercase = decoder_layerdrop
__lowercase = use_cache
__lowercase = encoder_layers
__lowercase = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=lowercase__ ,bos_token_id=lowercase__ ,eos_token_id=lowercase__ ,is_encoder_decoder=lowercase__ ,decoder_start_token_id=lowercase__ ,forced_eos_token_id=lowercase__ ,**lowercase__ ,)
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
@property
def SCREAMING_SNAKE_CASE ( self : Dict ):
if self.task in ["default", "seq2seq-lm"]:
__lowercase = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
__lowercase = {0: '''batch'''}
__lowercase = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
else:
__lowercase = {0: '''batch''', 1: '''decoder_sequence'''}
__lowercase = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(lowercase__ ,direction='''inputs''' )
elif self.task == "causal-lm":
# TODO: figure this case out.
__lowercase = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
__lowercase , __lowercase = self.num_layers
for i in range(lowercase__ ):
__lowercase = {0: '''batch''', 2: '''past_sequence + sequence'''}
__lowercase = {0: '''batch''', 2: '''past_sequence + sequence'''}
else:
__lowercase = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''decoder_input_ids''', {0: '''batch''', 1: '''decoder_sequence'''}),
('''decoder_attention_mask''', {0: '''batch''', 1: '''decoder_sequence'''}),
] )
return common_inputs
@property
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
if self.task in ["default", "seq2seq-lm"]:
__lowercase = super().outputs
else:
__lowercase = super(lowercase__ ,self ).outputs
if self.use_past:
__lowercase , __lowercase = self.num_layers
for i in range(lowercase__ ):
__lowercase = {0: '''batch''', 2: '''past_sequence + sequence'''}
__lowercase = {0: '''batch''', 2: '''past_sequence + sequence'''}
return common_outputs
def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : PreTrainedTokenizer ,lowercase__ : int = -1 ,lowercase__ : int = -1 ,lowercase__ : bool = False ,lowercase__ : Optional[TensorType] = None ,):
__lowercase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ )
# Generate decoder inputs
__lowercase = seq_length if not self.use_past else 1
__lowercase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ )
__lowercase = {F"decoder_{name}": tensor for name, tensor in decoder_inputs.items()}
__lowercase = dict(**lowercase__ ,**lowercase__ )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
__lowercase , __lowercase = common_inputs['''input_ids'''].shape
__lowercase = common_inputs['''decoder_input_ids'''].shape[1]
__lowercase , __lowercase = self.num_attention_heads
__lowercase = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__lowercase = decoder_seq_length + 3
__lowercase = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
__lowercase = torch.cat(
[common_inputs['''decoder_attention_mask'''], torch.ones(lowercase__ ,lowercase__ )] ,dim=1 )
__lowercase = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
__lowercase , __lowercase = self.num_layers
__lowercase = min(lowercase__ ,lowercase__ )
__lowercase = max(lowercase__ ,lowercase__ ) - min_num_layers
__lowercase = '''encoder''' if num_encoder_layers > num_decoder_layers else '''decoder'''
for _ in range(lowercase__ ):
common_inputs["past_key_values"].append(
(
torch.zeros(lowercase__ ),
torch.zeros(lowercase__ ),
torch.zeros(lowercase__ ),
torch.zeros(lowercase__ ),
) )
# TODO: test this.
__lowercase = encoder_shape if remaining_side_name == '''encoder''' else decoder_shape
for _ in range(lowercase__ ,lowercase__ ):
common_inputs["past_key_values"].append((torch.zeros(lowercase__ ), torch.zeros(lowercase__ )) )
return common_inputs
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : PreTrainedTokenizer ,lowercase__ : int = -1 ,lowercase__ : int = -1 ,lowercase__ : bool = False ,lowercase__ : Optional[TensorType] = None ,):
__lowercase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
__lowercase , __lowercase = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
__lowercase = seqlen + 2
__lowercase , __lowercase = self.num_layers
__lowercase , __lowercase = self.num_attention_heads
__lowercase = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__lowercase = common_inputs['''attention_mask'''].dtype
__lowercase = torch.cat(
[common_inputs['''attention_mask'''], torch.ones(lowercase__ ,lowercase__ ,dtype=lowercase__ )] ,dim=1 )
__lowercase = [
(torch.zeros(lowercase__ ), torch.zeros(lowercase__ )) for _ in range(lowercase__ )
]
return common_inputs
def SCREAMING_SNAKE_CASE ( self : List[str] ,lowercase__ : PreTrainedTokenizer ,lowercase__ : int = -1 ,lowercase__ : int = -1 ,lowercase__ : bool = False ,lowercase__ : Optional[TensorType] = None ,):
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
__lowercase = compute_effective_axis_dimension(
lowercase__ ,fixed_dimension=OnnxConfig.default_fixed_batch ,num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
__lowercase = tokenizer.num_special_tokens_to_add(lowercase__ )
__lowercase = compute_effective_axis_dimension(
lowercase__ ,fixed_dimension=OnnxConfig.default_fixed_sequence ,num_token_to_add=lowercase__ )
# Generate dummy inputs according to compute batch and sequence
__lowercase = [''' '''.join([tokenizer.unk_token] ) * seq_length] * batch_size
__lowercase = dict(tokenizer(lowercase__ ,return_tensors=lowercase__ ) )
return common_inputs
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,lowercase__ : PreTrainedTokenizer ,lowercase__ : int = -1 ,lowercase__ : int = -1 ,lowercase__ : bool = False ,lowercase__ : Optional[TensorType] = None ,):
if self.task in ["default", "seq2seq-lm"]:
__lowercase = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
lowercase__ ,batch_size=lowercase__ ,seq_length=lowercase__ ,is_pair=lowercase__ ,framework=lowercase__ )
elif self.task == "causal-lm":
__lowercase = self._generate_dummy_inputs_for_causal_lm(
lowercase__ ,batch_size=lowercase__ ,seq_length=lowercase__ ,is_pair=lowercase__ ,framework=lowercase__ )
else:
__lowercase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowercase__ ,batch_size=lowercase__ ,seq_length=lowercase__ ,is_pair=lowercase__ ,framework=lowercase__ )
return common_inputs
def SCREAMING_SNAKE_CASE ( self : Tuple ,lowercase__ : List[Any] ,lowercase__ : Tuple ,lowercase__ : List[Any] ,lowercase__ : Optional[Any] ):
if self.task in ["default", "seq2seq-lm"]:
__lowercase = super()._flatten_past_key_values_(lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ )
else:
__lowercase = super(lowercase__ ,self )._flatten_past_key_values_(
lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ )
| 41 | 0 |
"""simple docstring"""
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
A = logging.getLogger()
@unittest.skip('Temporarily disable the doc tests.' )
@require_torch
@require_tf
@slow
class _UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
def _UpperCAmelCase ( self : Dict , snake_case : Path , snake_case : Union[str, None] = None , snake_case : Union[List[str], None] = None , snake_case : Union[str, List[str], None] = None , snake_case : bool = True , ) -> int:
'''simple docstring'''
__magic_name__ : List[str] = [file for file in os.listdir(snake_case ) if os.path.isfile(os.path.join(snake_case , snake_case ) )]
if identifier is not None:
__magic_name__ : Tuple = [file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(snake_case , snake_case ):
for n_ in n_identifier:
__magic_name__ : int = [file for file in files if n_ not in file]
else:
__magic_name__ : Tuple = [file for file in files if n_identifier not in file]
__magic_name__ : Tuple = ignore_files or []
ignore_files.append('''__init__.py''' )
__magic_name__ : Any = [file for file in files if file not in ignore_files]
for file in files:
# Open all files
print('''Testing''' , snake_case )
if only_modules:
__magic_name__ : List[Any] = file.split('''.''' )[0]
try:
__magic_name__ : Dict = getattr(snake_case , snake_case )
__magic_name__ : List[str] = doctest.DocTestSuite(snake_case )
__magic_name__ : Dict = unittest.TextTestRunner().run(snake_case )
self.assertIs(len(result.failures ) , 0 )
except AttributeError:
logger.info(f"""{module_identifier} is not a module.""" )
else:
__magic_name__ : Tuple = doctest.testfile(str('''..''' / directory / file ) , optionflags=doctest.ELLIPSIS )
self.assertIs(result.failed , 0 )
def _UpperCAmelCase ( self : str ) -> Optional[int]:
'''simple docstring'''
__magic_name__ : int = Path('''src/transformers''' )
__magic_name__ : str = '''modeling'''
__magic_name__ : str = [
'''modeling_ctrl.py''',
'''modeling_tf_ctrl.py''',
]
self.analyze_directory(snake_case , identifier=snake_case , ignore_files=snake_case )
def _UpperCAmelCase ( self : Any ) -> Optional[Any]:
'''simple docstring'''
__magic_name__ : Dict = Path('''src/transformers''' )
__magic_name__ : Union[str, Any] = '''tokenization'''
self.analyze_directory(snake_case , identifier=snake_case )
def _UpperCAmelCase ( self : Any ) -> Dict:
'''simple docstring'''
__magic_name__ : Any = Path('''src/transformers''' )
__magic_name__ : int = '''configuration'''
self.analyze_directory(snake_case , identifier=snake_case )
def _UpperCAmelCase ( self : str ) -> List[str]:
'''simple docstring'''
__magic_name__ : List[str] = Path('''src/transformers''' )
__magic_name__ : str = ['''configuration''', '''modeling''', '''tokenization''']
self.analyze_directory(snake_case , n_identifier=snake_case )
def _UpperCAmelCase ( self : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
__magic_name__ : Any = Path('''docs/source''' )
__magic_name__ : str = ['''favicon.ico''']
self.analyze_directory(snake_case , ignore_files=snake_case , only_modules=snake_case )
| 147 |
"""simple docstring"""
import numpy as np
from numpy import ndarray
from scipy.optimize import Bounds, LinearConstraint, minimize
def UpperCamelCase_ ( lowerCamelCase : ndarray ) -> float:
"""simple docstring"""
return np.dot(lowerCamelCase , lowerCamelCase )
class _UpperCamelCase :
"""simple docstring"""
def __init__( self : Any , *,
snake_case : float = np.inf , snake_case : str = "linear" , snake_case : float = 0.0 , ) -> None:
'''simple docstring'''
__magic_name__ : Optional[int] = regularization
__magic_name__ : Dict = gamma
if kernel == "linear":
__magic_name__ : Dict = self.__linear
elif kernel == "rbf":
if self.gamma == 0:
raise ValueError('''rbf kernel requires gamma''' )
if not isinstance(self.gamma , (float, int) ):
raise ValueError('''gamma must be float or int''' )
if not self.gamma > 0:
raise ValueError('''gamma must be > 0''' )
__magic_name__ : int = self.__rbf
# in the future, there could be a default value like in sklearn
# sklear: def_gamma = 1/(n_features * X.var()) (wiki)
# previously it was 1/(n_features)
else:
__magic_name__ : List[str] = f"""Unknown kernel: {kernel}"""
raise ValueError(snake_case )
def _UpperCAmelCase ( self : str , snake_case : ndarray , snake_case : ndarray ) -> float:
'''simple docstring'''
return np.dot(snake_case , snake_case )
def _UpperCAmelCase ( self : List[str] , snake_case : ndarray , snake_case : ndarray ) -> float:
'''simple docstring'''
return np.exp(-(self.gamma * norm_squared(vectora - vectora )) )
def _UpperCAmelCase ( self : Tuple , snake_case : list[ndarray] , snake_case : ndarray ) -> None:
'''simple docstring'''
__magic_name__ : List[str] = observations
__magic_name__ : List[Any] = classes
# using Wolfe's Dual to calculate w.
# Primal problem: minimize 1/2*norm_squared(w)
# constraint: yn(w . xn + b) >= 1
#
# With l a vector
# Dual problem: maximize sum_n(ln) -
# 1/2 * sum_n(sum_m(ln*lm*yn*ym*xn . xm))
# constraint: self.C >= ln >= 0
# and sum_n(ln*yn) = 0
# Then we get w using w = sum_n(ln*yn*xn)
# At the end we can get b ~= mean(yn - w . xn)
#
# Since we use kernels, we only need l_star to calculate b
# and to classify observations
((__magic_name__) , ) : Union[str, Any] = np.shape(snake_case )
def to_minimize(snake_case : ndarray ) -> float:
__magic_name__ : Union[str, Any] = 0
((__magic_name__) , ) : Tuple = np.shape(snake_case )
for i in range(snake_case ):
for j in range(snake_case ):
s += (
candidate[i]
* candidate[j]
* classes[i]
* classes[j]
* self.kernel(observations[i] , observations[j] )
)
return 1 / 2 * s - sum(snake_case )
__magic_name__ : Optional[Any] = LinearConstraint(snake_case , 0 , 0 )
__magic_name__ : Union[str, Any] = Bounds(0 , self.regularization )
__magic_name__ : Optional[Any] = minimize(
snake_case , np.ones(snake_case ) , bounds=snake_case , constraints=[ly_contraint] ).x
__magic_name__ : str = l_star
# calculating mean offset of separation plane to points
__magic_name__ : Any = 0
for i in range(snake_case ):
for j in range(snake_case ):
s += classes[i] - classes[i] * self.optimum[i] * self.kernel(
observations[i] , observations[j] )
__magic_name__ : str = s / n
def _UpperCAmelCase ( self : str , snake_case : ndarray ) -> int:
'''simple docstring'''
__magic_name__ : str = sum(
self.optimum[n]
* self.classes[n]
* self.kernel(self.observations[n] , snake_case )
for n in range(len(self.classes ) ) )
return 1 if s + self.offset >= 0 else -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 147 | 1 |
"""simple docstring"""
import argparse
import gc
import json
import os
import re
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast, RwkvConfig
from transformers.modeling_utils import WEIGHTS_INDEX_NAME, shard_checkpoint
__A = {
"""169M""": 12,
"""430M""": 24,
"""1B5""": 24,
"""3B""": 32,
"""7B""": 32,
"""14B""": 40,
}
__A = {
"""169M""": 768,
"""430M""": 1024,
"""1B5""": 2048,
"""3B""": 2560,
"""7B""": 4096,
"""14B""": 5120,
}
def __A (_SCREAMING_SNAKE_CASE ) ->List[str]:
"""simple docstring"""
lowerCAmelCase__ :Tuple = list(state_dict.keys() )
for name in state_dict_keys:
lowerCAmelCase__ :List[str] = state_dict.pop(_SCREAMING_SNAKE_CASE )
# emb -> embedding
if name.startswith('emb.' ):
lowerCAmelCase__ :Dict = name.replace('emb.' , 'embeddings.' )
# ln_0 -> pre_ln (only present at block 0)
if name.startswith('blocks.0.ln0' ):
lowerCAmelCase__ :Optional[int] = name.replace('blocks.0.ln0' , 'blocks.0.pre_ln' )
# att -> attention
lowerCAmelCase__ :Tuple = re.sub(r'blocks\.(\d+)\.att' , r'blocks.\1.attention' , _SCREAMING_SNAKE_CASE )
# ffn -> feed_forward
lowerCAmelCase__ :List[Any] = re.sub(r'blocks\.(\d+)\.ffn' , r'blocks.\1.feed_forward' , _SCREAMING_SNAKE_CASE )
# time_mix_k -> time_mix_key and reshape
if name.endswith('.time_mix_k' ):
lowerCAmelCase__ :Union[str, Any] = name.replace('.time_mix_k' , '.time_mix_key' )
# time_mix_v -> time_mix_value and reshape
if name.endswith('.time_mix_v' ):
lowerCAmelCase__ :str = name.replace('.time_mix_v' , '.time_mix_value' )
# time_mix_r -> time_mix_key and reshape
if name.endswith('.time_mix_r' ):
lowerCAmelCase__ :List[str] = name.replace('.time_mix_r' , '.time_mix_receptance' )
if name != "head.weight":
lowerCAmelCase__ :Union[str, Any] = 'rwkv.' + name
lowerCAmelCase__ :Tuple = weight
return state_dict
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=None ) ->Optional[int]:
"""simple docstring"""
if tokenizer_file is None:
print('No `--tokenizer_file` provided, we will use the default tokenizer.' )
lowerCAmelCase__ :Optional[int] = 5_0277
lowerCAmelCase__ :Union[str, Any] = AutoTokenizer.from_pretrained('EleutherAI/gpt-neox-20b' )
else:
lowerCAmelCase__ :Union[str, Any] = PreTrainedTokenizerFast(tokenizer_file=_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :Optional[int] = len(_SCREAMING_SNAKE_CASE )
tokenizer.save_pretrained(_SCREAMING_SNAKE_CASE )
# 2. Build the config
lowerCAmelCase__ :Any = list(NUM_HIDDEN_LAYERS_MAPPING.keys() )
if size is None:
# Try to infer size from the checkpoint name
for candidate in possible_sizes:
if candidate in checkpoint_file:
lowerCAmelCase__ :Any = candidate
break
if size is None:
raise ValueError('Could not infer the size, please provide it with the `--size` argument.' )
if size not in possible_sizes:
raise ValueError(F"`size` should be one of {possible_sizes}, got {size}." )
lowerCAmelCase__ :Dict = RwkvConfig(
vocab_size=_SCREAMING_SNAKE_CASE , num_hidden_layers=NUM_HIDDEN_LAYERS_MAPPING[size] , hidden_size=HIDEN_SIZE_MAPPING[size] , )
config.save_pretrained(_SCREAMING_SNAKE_CASE )
# 3. Download model file then convert state_dict
lowerCAmelCase__ :Union[str, Any] = hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :str = torch.load(_SCREAMING_SNAKE_CASE , map_location='cpu' )
lowerCAmelCase__ :Dict = convert_state_dict(_SCREAMING_SNAKE_CASE )
# 4. Split in shards and save
lowerCAmelCase__ , lowerCAmelCase__ :Any = shard_checkpoint(_SCREAMING_SNAKE_CASE )
for shard_file, shard in shards.items():
torch.save(_SCREAMING_SNAKE_CASE , os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
if index is not None:
lowerCAmelCase__ :str = os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Save the index as well
with open(_SCREAMING_SNAKE_CASE , 'w' , encoding='utf-8' ) as f:
lowerCAmelCase__ :Any = json.dumps(_SCREAMING_SNAKE_CASE , indent=2 , sort_keys=_SCREAMING_SNAKE_CASE ) + '\n'
f.write(_SCREAMING_SNAKE_CASE )
# 5. Clean up shards (for some reason the file PyTorch saves take the same space as the whole state_dict
print(
'Cleaning up shards. This may error with an OOM error, it this is the case don\'t worry you still have converted the model.' )
lowerCAmelCase__ :Dict = list(shards.keys() )
del state_dict
del shards
gc.collect()
for shard_file in shard_files:
lowerCAmelCase__ :Union[str, Any] = torch.load(os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
torch.save({k: v.cpu().clone() for k, v in state_dict.items()} , os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
del state_dict
gc.collect()
if push_to_hub:
if model_name is None:
raise ValueError('Please provide a `model_name` to push the model to the Hub.' )
lowerCAmelCase__ :Optional[Any] = AutoModelForCausalLM.from_pretrained(_SCREAMING_SNAKE_CASE )
model.push_to_hub(_SCREAMING_SNAKE_CASE , max_shard_size='2GB' )
tokenizer.push_to_hub(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--repo_id""", default=None, type=str, required=True, help="""Repo ID from which to pull the checkpoint."""
)
parser.add_argument(
"""--checkpoint_file""", default=None, type=str, required=True, help="""Name of the checkpoint file in the repo."""
)
parser.add_argument(
"""--output_dir""", default=None, type=str, required=True, help="""Where to save the converted model."""
)
parser.add_argument(
"""--tokenizer_file""",
default=None,
type=str,
help="""Path to the tokenizer file to use (if not provided, only the model is converted).""",
)
parser.add_argument(
"""--size""",
default=None,
type=str,
help="""Size of the model. Will be inferred from the `checkpoint_file` if not passed.""",
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Push to the Hub the converted model.""",
)
parser.add_argument(
"""--model_name""",
default=None,
type=str,
help="""Name of the pushed model on the Hub, including the username / organization.""",
)
__A = parser.parse_args()
convert_rmkv_checkpoint_to_hf_format(
args.repo_id,
args.checkpoint_file,
args.output_dir,
size=args.size,
tokenizer_file=args.tokenizer_file,
push_to_hub=args.push_to_hub,
model_name=args.model_name,
)
| 93 |
"""simple docstring"""
import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class _lowerCAmelCase ( a , a , a , unittest.TestCase ):
"""simple docstring"""
__magic_name__ :int = StableUnCLIPPipeline
__magic_name__ :int = TEXT_TO_IMAGE_PARAMS
__magic_name__ :List[str] = TEXT_TO_IMAGE_BATCH_PARAMS
__magic_name__ :Optional[int] = TEXT_TO_IMAGE_IMAGE_PARAMS
__magic_name__ :Union[str, Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
__magic_name__ :List[str] = False
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Dict = 3_2
lowerCAmelCase__ :List[Any] = embedder_hidden_size
# prior components
torch.manual_seed(0 )
lowerCAmelCase__ :Any = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
torch.manual_seed(0 )
lowerCAmelCase__ :Any = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=__UpperCAmelCase , projection_dim=__UpperCAmelCase , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , ) )
torch.manual_seed(0 )
lowerCAmelCase__ :int = PriorTransformer(
num_attention_heads=2 , attention_head_dim=1_2 , embedding_dim=__UpperCAmelCase , num_layers=1 , )
torch.manual_seed(0 )
lowerCAmelCase__ :Optional[int] = DDPMScheduler(
variance_type='fixed_small_log' , prediction_type='sample' , num_train_timesteps=1_0_0_0 , clip_sample=__UpperCAmelCase , clip_sample_range=5.0 , beta_schedule='squaredcos_cap_v2' , )
# regular denoising components
torch.manual_seed(0 )
lowerCAmelCase__ :Dict = StableUnCLIPImageNormalizer(embedding_dim=__UpperCAmelCase )
lowerCAmelCase__ :List[Any] = DDPMScheduler(beta_schedule='squaredcos_cap_v2' )
torch.manual_seed(0 )
lowerCAmelCase__ :List[Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
torch.manual_seed(0 )
lowerCAmelCase__ :str = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=__UpperCAmelCase , projection_dim=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , ) )
torch.manual_seed(0 )
lowerCAmelCase__ :Optional[int] = UNetaDConditionModel(
sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('CrossAttnDownBlock2D', 'DownBlock2D') , up_block_types=('UpBlock2D', 'CrossAttnUpBlock2D') , block_out_channels=(3_2, 6_4) , attention_head_dim=(2, 4) , class_embed_type='projection' , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=__UpperCAmelCase , layers_per_block=1 , upcast_attention=__UpperCAmelCase , use_linear_projection=__UpperCAmelCase , )
torch.manual_seed(0 )
lowerCAmelCase__ :Tuple = DDIMScheduler(
beta_schedule='scaled_linear' , beta_start=0.0_00_85 , beta_end=0.0_12 , prediction_type='v_prediction' , set_alpha_to_one=__UpperCAmelCase , steps_offset=1 , )
torch.manual_seed(0 )
lowerCAmelCase__ :Optional[int] = AutoencoderKL()
lowerCAmelCase__ :Optional[int] = {
# prior components
'prior_tokenizer': prior_tokenizer,
'prior_text_encoder': prior_text_encoder,
'prior': prior,
'prior_scheduler': prior_scheduler,
# image noising components
'image_normalizer': image_normalizer,
'image_noising_scheduler': image_noising_scheduler,
# regular denoising components
'tokenizer': tokenizer,
'text_encoder': text_encoder,
'unet': unet,
'scheduler': scheduler,
'vae': vae,
}
return components
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase=0 ):
'''simple docstring'''
if str(__UpperCAmelCase ).startswith('mps' ):
lowerCAmelCase__ :str = torch.manual_seed(__UpperCAmelCase )
else:
lowerCAmelCase__ :Optional[int] = torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase )
lowerCAmelCase__ :Tuple = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'prior_num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[int] = torch_device == 'cpu'
self._test_attention_slicing_forward_pass(test_max_difference=__UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :str = torch_device in ['cpu', 'mps']
self._test_inference_batch_single_identical(test_max_difference=__UpperCAmelCase )
@slow
@require_torch_gpu
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def snake_case ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Tuple = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy' )
lowerCAmelCase__ :List[Any] = StableUnCLIPPipeline.from_pretrained('fusing/stable-unclip-2-1-l' , torch_dtype=torch.floataa )
pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowerCAmelCase__ :List[str] = torch.Generator(device='cpu' ).manual_seed(0 )
lowerCAmelCase__ :List[str] = pipe('anime turle' , generator=__UpperCAmelCase , output_type='np' )
lowerCAmelCase__ :Union[str, Any] = output.images[0]
assert image.shape == (7_6_8, 7_6_8, 3)
assert_mean_pixel_difference(__UpperCAmelCase , __UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowerCAmelCase__ :int = StableUnCLIPPipeline.from_pretrained('fusing/stable-unclip-2-1-l' , torch_dtype=torch.floataa )
lowerCAmelCase__ :List[str] = pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowerCAmelCase__ :Tuple = pipe(
'anime turtle' , prior_num_inference_steps=2 , num_inference_steps=2 , output_type='np' , )
lowerCAmelCase__ :str = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 1_0**9
| 93 | 1 |
"""simple docstring"""
import argparse
from transformers import BigBirdConfig, BigBirdForPreTraining, BigBirdForQuestionAnswering, load_tf_weights_in_big_bird
from transformers.utils import logging
logging.set_verbosity_info()
def A_ (__a , __a , __a , __a ):
A_ = BigBirdConfig.from_json_file(__a )
print(f'Building PyTorch model from configuration: {config}' )
if is_trivia_qa:
A_ = BigBirdForQuestionAnswering(__a )
else:
A_ = BigBirdForPreTraining(__a )
# Load weights from tf checkpoint
load_tf_weights_in_big_bird(__a , __a , is_trivia_qa=__a )
# Save pytorch-model
print(f'Save PyTorch model to {pytorch_dump_path}' )
model.save_pretrained(__a )
if __name__ == "__main__":
UpperCamelCase_ : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--big_bird_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained BERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--is_trivia_qa''', action='''store_true''', help='''Whether to convert a model with a trivia_qa head.'''
)
UpperCamelCase_ : Optional[Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.big_bird_config_file, args.pytorch_dump_path, args.is_trivia_qa
)
| 705 |
"""simple docstring"""
import baseaa
def A_ (__a ):
'''simple docstring'''
return baseaa.aaaencode(string.encode("utf-8" ) )
def A_ (__a ):
'''simple docstring'''
return baseaa.aaadecode(__a ).decode("utf-8" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 482 | 0 |
from ..utils import DummyObject, requires_backends
class a__ ( metaclass=__snake_case ):
A__ : Tuple = ['note_seq']
def __init__( self , *UpperCAmelCase , **UpperCAmelCase ) -> str:
requires_backends(self , ['note_seq'] )
@classmethod
def __SCREAMING_SNAKE_CASE ( cls , *UpperCAmelCase , **UpperCAmelCase ) -> str:
requires_backends(cls , ['note_seq'] )
@classmethod
def __SCREAMING_SNAKE_CASE ( cls , *UpperCAmelCase , **UpperCAmelCase ) -> Optional[int]:
requires_backends(cls , ['note_seq'] )
| 559 | from __future__ import annotations
def lowerCAmelCase( __lowerCamelCase ):
if len(__lowerCamelCase ) == 0:
return array
__a , __a = min(__lowerCamelCase ), max(__lowerCamelCase )
# Compute the variables
__a = _max - _min + 1
__a , __a = [0] * holes_range, [0] * holes_range
# Make the sorting.
for i in array:
__a = i - _min
__a = i
holes_repeat[index] += 1
# Makes the array back by replacing the numbers.
__a = 0
for i in range(__lowerCamelCase ):
while holes_repeat[i] > 0:
__a = holes[i]
index += 1
holes_repeat[i] -= 1
# Returns the sorted array.
return array
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase_ : str = input("""Enter numbers separated by comma:\n""")
lowerCamelCase_ : Tuple = [int(x) for x in user_input.split(""",""")]
print(pigeon_sort(unsorted))
| 559 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''hustvl/yolos-small''': '''https://huggingface.co/hustvl/yolos-small/resolve/main/config.json''',
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class _lowerCamelCase ( _lowercase ):
UpperCAmelCase_ = "yolos"
def __init__(self , __a=7_68 , __a=12 , __a=12 , __a=30_72 , __a="gelu" , __a=0.0 , __a=0.0 , __a=0.02 , __a=1e-1_2 , __a=[5_12, 8_64] , __a=16 , __a=3 , __a=True , __a=1_00 , __a=True , __a=False , __a=1 , __a=5 , __a=2 , __a=5 , __a=2 , __a=0.1 , **__a , ) -> Optional[int]:
super().__init__(**__a )
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_act
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = initializer_range
UpperCamelCase = layer_norm_eps
UpperCamelCase = image_size
UpperCamelCase = patch_size
UpperCamelCase = num_channels
UpperCamelCase = qkv_bias
UpperCamelCase = num_detection_tokens
UpperCamelCase = use_mid_position_embeddings
UpperCamelCase = auxiliary_loss
# Hungarian matcher
UpperCamelCase = class_cost
UpperCamelCase = bbox_cost
UpperCamelCase = giou_cost
# Loss coefficients
UpperCamelCase = bbox_loss_coefficient
UpperCamelCase = giou_loss_coefficient
UpperCamelCase = eos_coefficient
class _lowerCamelCase ( _lowercase ):
UpperCAmelCase_ = version.parse("1.11" )
@property
def snake_case_ (self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def snake_case_ (self ) -> float:
return 1e-4
@property
def snake_case_ (self ) -> int:
return 12
| 702 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase__ = {
'''configuration_blenderbot''': [
'''BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''BlenderbotConfig''',
'''BlenderbotOnnxConfig''',
],
'''tokenization_blenderbot''': ['''BlenderbotTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['''BlenderbotTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BlenderbotForCausalLM''',
'''BlenderbotForConditionalGeneration''',
'''BlenderbotModel''',
'''BlenderbotPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''TFBlenderbotForConditionalGeneration''',
'''TFBlenderbotModel''',
'''TFBlenderbotPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''FlaxBlenderbotForConditionalGeneration''',
'''FlaxBlenderbotModel''',
'''FlaxBlenderbotPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_blenderbot import (
BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotConfig,
BlenderbotOnnxConfig,
)
from .tokenization_blenderbot import BlenderbotTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_fast import BlenderbotTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot import (
BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotForCausalLM,
BlenderbotForConditionalGeneration,
BlenderbotModel,
BlenderbotPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot import (
TFBlenderbotForConditionalGeneration,
TFBlenderbotModel,
TFBlenderbotPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
FlaxBlenderbotPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 544 | 0 |
import argparse
from collections import defaultdict
def lowerCAmelCase_ ( __a , __a , __a , __a , __a ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase__: Union[str, Any] =F"""{file}_{class_name}_{test_name}"""
done_test[_id] += 1
with open(lowerCAmelCase__ , "r" ) as f:
lowerCamelCase__: List[str] =f.readlines()
lowerCamelCase__: List[str] =F"""class {class_name}("""
lowerCamelCase__: Any =F"""{4 * " "}def {test_name}("""
lowerCamelCase__: Optional[Any] =F"""{8 * " "}{correct_line.split()[0]}"""
lowerCamelCase__: Dict =F"""{16 * " "}{correct_line.split()[0]}"""
lowerCamelCase__: Optional[Any] =False
lowerCamelCase__: Dict =False
lowerCamelCase__: List[str] =False
lowerCamelCase__: Dict =False
lowerCamelCase__: str =0
lowerCamelCase__: Optional[int] =0
lowerCamelCase__: List[str] =[]
for line in lines:
if line.startswith(lowerCAmelCase__ ):
lowerCamelCase__: Any =True
elif in_class and line.startswith(lowerCAmelCase__ ):
lowerCamelCase__: Optional[Any] =True
elif in_class and in_func and (line.startswith(lowerCAmelCase__ ) or line.startswith(lowerCAmelCase__ )):
lowerCamelCase__: Dict =len(line.split(correct_line.split()[0] )[0] )
count += 1
if count == done_test[_id]:
lowerCamelCase__: int =True
if in_class and in_func and in_line:
if ")" not in line:
continue
else:
lowerCamelCase__: Any =True
if in_class and in_func and in_line and insert_line:
new_lines.append(F"""{spaces * " "}{correct_line}""" )
lowerCamelCase__: Any =False
else:
new_lines.append(lowerCAmelCase__ )
with open(lowerCAmelCase__ , "w" ) as f:
for line in new_lines:
f.write(lowerCAmelCase__ )
def lowerCAmelCase_ ( __a , __a=None ) -> Union[str, Any]:
"""simple docstring"""
if fail is not None:
with open(lowerCAmelCase__ , "r" ) as f:
lowerCamelCase__: Optional[Any] ={l.strip() for l in f.readlines()}
else:
lowerCamelCase__: Union[str, Any] =None
with open(lowerCAmelCase__ , "r" ) as f:
lowerCamelCase__: int =f.readlines()
lowerCamelCase__: List[str] =defaultdict(lowerCAmelCase__ )
for line in correct_lines:
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__: Tuple =line.split(";" )
if test_failures is None or "::".join([file, class_name, test_name] ) in test_failures:
overwrite_file(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
parser.add_argument("--correct_filename", help="filename of tests with expected result")
parser.add_argument("--fail_filename", help="filename of test failures", type=str, default=None)
__A = parser.parse_args()
main(args.correct_filename, args.fail_filename)
| 59 |
from typing import List
import numpy as np
def lowerCamelCase_ ( lowerCAmelCase__ : dict ) -> int:
'''simple docstring'''
A = {key: len(lowerCAmelCase__ ) for key, value in gen_kwargs.items() if isinstance(lowerCAmelCase__ , lowerCAmelCase__ )}
if len(set(lists_lengths.values() ) ) > 1:
raise RuntimeError(
(
'Sharding is ambiguous for this dataset: '
+ 'we found several data sources lists of different lengths, and we don\'t know over which list we should parallelize:\n'
+ '\n'.join(F'''\t- key {key} has length {length}''' for key, length in lists_lengths.items() )
+ '\nTo fix this, check the \'gen_kwargs\' and make sure to use lists only for data sources, '
+ 'and use tuples otherwise. In the end there should only be one single list, or several lists with the same length.'
) )
A = max(lists_lengths.values() , default=0 )
return max(1 , lowerCAmelCase__ )
def lowerCamelCase_ ( lowerCAmelCase__ : int , lowerCAmelCase__ : int ) -> List[range]:
'''simple docstring'''
A = []
for group_idx in range(lowerCAmelCase__ ):
A = num_shards // max_num_jobs + (group_idx < (num_shards % max_num_jobs))
if num_shards_to_add == 0:
break
A = shards_indices_per_group[-1].stop if shards_indices_per_group else 0
A = range(lowerCAmelCase__ , start + num_shards_to_add )
shards_indices_per_group.append(lowerCAmelCase__ )
return shards_indices_per_group
def lowerCamelCase_ ( lowerCAmelCase__ : dict , lowerCAmelCase__ : int ) -> List[dict]:
'''simple docstring'''
A = _number_of_shards_in_gen_kwargs(lowerCAmelCase__ )
if num_shards == 1:
return [dict(lowerCAmelCase__ )]
else:
A = _distribute_shards(num_shards=lowerCAmelCase__ , max_num_jobs=lowerCAmelCase__ )
return [
{
key: [value[shard_idx] for shard_idx in shard_indices_per_group[group_idx]]
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ )
else value
for key, value in gen_kwargs.items()
}
for group_idx in range(len(lowerCAmelCase__ ) )
]
def lowerCamelCase_ ( lowerCAmelCase__ : List[dict] ) -> dict:
'''simple docstring'''
return {
key: [value for gen_kwargs in gen_kwargs_list for value in gen_kwargs[key]]
if isinstance(gen_kwargs_list[0][key] , lowerCAmelCase__ )
else gen_kwargs_list[0][key]
for key in gen_kwargs_list[0]
}
def lowerCamelCase_ ( lowerCAmelCase__ : np.random.Generator , lowerCAmelCase__ : dict ) -> dict:
'''simple docstring'''
A = {len(lowerCAmelCase__ ) for value in gen_kwargs.values() if isinstance(lowerCAmelCase__ , lowerCAmelCase__ )}
A = {}
for size in list_sizes:
A = list(range(lowerCAmelCase__ ) )
rng.shuffle(indices_per_size[size] )
# Now let's copy the gen_kwargs and shuffle the lists based on their sizes
A = dict(lowerCAmelCase__ )
for key, value in shuffled_kwargs.items():
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
A = [value[i] for i in indices_per_size[len(lowerCAmelCase__ )]]
return shuffled_kwargs | 106 | 0 |
import argparse
import json
import os
import fairseq
import torch
from torch import nn
from transformers import (
SpeechaTextaConfig,
SpeechaTextaForCausalLM,
SpeechaTextaTokenizer,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
__UpperCamelCase : Optional[int] = logging.get_logger(__name__)
__UpperCamelCase : Optional[Any] = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """lm_head""",
"""mask_emb""": """masked_spec_embed""",
}
__UpperCamelCase : Optional[Any] = [
"""lm_head""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
]
def a_ ( _A , _A , _A , _A , _A ) -> Dict:
"""simple docstring"""
for attribute in key.split('.' ):
snake_case__ = getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if weight_type is not None:
snake_case__ = getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).shape
else:
snake_case__ = hf_pointer.shape
assert hf_shape == value.shape, (
f'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
snake_case__ = value
elif weight_type == "weight_g":
snake_case__ = value
elif weight_type == "weight_v":
snake_case__ = value
elif weight_type == "bias":
snake_case__ = value
else:
snake_case__ = value
logger.info(f'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' )
def a_ ( _A , _A ) -> Tuple:
"""simple docstring"""
snake_case__ = []
snake_case__ = fairseq_model.state_dict()
snake_case__ = hf_model.feature_extractor
# if encoder has different dim to decoder -> use proj_weight
snake_case__ = None
for name, value in fairseq_dict.items():
snake_case__ = False
if "conv_layers" in name:
load_conv_layer(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , hf_model.config.feat_extract_norm == 'group' , )
snake_case__ = True
elif name.split('.' )[0] == "proj":
snake_case__ = fairseq_model.proj
snake_case__ = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
snake_case__ = True
if "*" in mapped_key:
snake_case__ = name.split(SCREAMING_SNAKE_CASE__ )[0].split('.' )[-2]
snake_case__ = mapped_key.replace('*' , SCREAMING_SNAKE_CASE__ )
if "weight_g" in name:
snake_case__ = """weight_g"""
elif "weight_v" in name:
snake_case__ = """weight_v"""
elif "bias" in name:
snake_case__ = """bias"""
elif "weight" in name:
snake_case__ = """weight"""
else:
snake_case__ = None
set_recursively(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
continue
if not is_used:
unused_weights.append(SCREAMING_SNAKE_CASE__ )
logger.warning(f'''Unused weights: {unused_weights}''' )
return proj_weight
def a_ ( _A , _A , _A , _A , _A ) -> Optional[Any]:
"""simple docstring"""
snake_case__ = full_name.split('conv_layers.' )[-1]
snake_case__ = name.split('.' )
snake_case__ = int(items[0] )
snake_case__ = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
snake_case__ = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
snake_case__ = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
snake_case__ = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
snake_case__ = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(SCREAMING_SNAKE_CASE__ )
def a_ ( _A ) -> Dict:
"""simple docstring"""
snake_case__ = emb.weight.shape
snake_case__ = nn.Linear(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , bias=SCREAMING_SNAKE_CASE__ )
snake_case__ = emb.weight.data
return lin_layer
def a_ ( _A ) -> Any:
"""simple docstring"""
with open(SCREAMING_SNAKE_CASE__ , 'r' , encoding='utf-8' ) as f:
snake_case__ = f.readlines()
snake_case__ = [line.split(' ' )[0] for line in lines]
snake_case__ = len(SCREAMING_SNAKE_CASE__ )
snake_case__ = {
"""<s>""": 0,
"""<pad>""": 1,
"""</s>""": 2,
"""<unk>""": 3,
}
vocab_dict.update(dict(zip(SCREAMING_SNAKE_CASE__ , range(4 , num_words + 4 ) ) ) )
return vocab_dict
@torch.no_grad()
def a_ ( _A , _A , _A , _A , _A , _A , _A , ) -> Tuple:
"""simple docstring"""
snake_case__ = WavaVecaConfig.from_pretrained(SCREAMING_SNAKE_CASE__ )
snake_case__ = SpeechaTextaConfig.from_pretrained(
SCREAMING_SNAKE_CASE__ , vocab_size=SCREAMING_SNAKE_CASE__ , decoder_layers=SCREAMING_SNAKE_CASE__ , do_stable_layer_norm=SCREAMING_SNAKE_CASE__ )
snake_case__ = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=SCREAMING_SNAKE_CASE__ , return_attention_mask=SCREAMING_SNAKE_CASE__ , )
snake_case__ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
snake_case__ = model[0].eval()
# set weights for wav2vec2 encoder
snake_case__ = WavaVecaModel(SCREAMING_SNAKE_CASE__ )
snake_case__ = recursively_load_weights_wavaveca(model.encoder , SCREAMING_SNAKE_CASE__ )
snake_case__ = SpeechaTextaForCausalLM(SCREAMING_SNAKE_CASE__ )
snake_case__ = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=SCREAMING_SNAKE_CASE__ )
# set output linear layer
unexpected_keys.remove('embed_out' )
snake_case__ = nn.Parameter(model.decoder.embed_out.detach() )
# layer norm is init to identity matrix so leaving it is fine
logger.warning(f'''The following keys are missing when loading the decoder weights: {missing_keys}''' )
logger.warning(f'''The following keys are unexpected when loading the decoder weights: {unexpected_keys}''' )
snake_case__ = SpeechEncoderDecoderModel(encoder=SCREAMING_SNAKE_CASE__ , decoder=SCREAMING_SNAKE_CASE__ )
snake_case__ = False
# add projection layer
snake_case__ = nn.Parameter(projection_layer.weight )
snake_case__ = nn.Parameter(projection_layer.bias )
snake_case__ = create_vocab_dict(SCREAMING_SNAKE_CASE__ )
with open(os.path.join(SCREAMING_SNAKE_CASE__ , 'vocab.json' ) , 'w' ) as fp:
json.dump(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
snake_case__ = SpeechaTextaTokenizer(os.path.join(SCREAMING_SNAKE_CASE__ , 'vocab.json' ) )
tokenizer.save_pretrained(SCREAMING_SNAKE_CASE__ )
snake_case__ = hf_wavavec.config.to_dict()
snake_case__ = tokenizer.pad_token_id
snake_case__ = tokenizer.bos_token_id
snake_case__ = tokenizer.eos_token_id
snake_case__ = """speech_to_text_2"""
snake_case__ = """wav2vec2"""
snake_case__ = SpeechEncoderDecoderConfig.from_dict(SCREAMING_SNAKE_CASE__ )
hf_wavavec.save_pretrained(SCREAMING_SNAKE_CASE__ )
feature_extractor.save_pretrained(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
__UpperCamelCase : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument(
"""--encoder_config_path""",
default="""facebook/wav2vec2-large-lv60""",
type=str,
help="""Path to hf encoder wav2vec2 checkpoint config""",
)
parser.add_argument(
"""--decoder_config_path""",
default="""facebook/s2t-small-mustc-en-fr-st""",
type=str,
help="""Path to hf decoder s2t checkpoint config""",
)
parser.add_argument("""--vocab_size""", default=10224, type=int, help="""Vocab size of decoder""")
parser.add_argument("""--num_decoder_layers""", default=7, type=int, help="""Number of decoder layers""")
__UpperCamelCase : List[str] = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
vocab_size=args.vocab_size,
num_decoder_layers=args.num_decoder_layers,
)
| 704 |
from argparse import ArgumentParser
from .env import EnvironmentCommand
def a_ ( ) -> Optional[Any]:
"""simple docstring"""
snake_case__ = ArgumentParser('Diffusers CLI tool' , usage='diffusers-cli <command> [<args>]' )
snake_case__ = parser.add_subparsers(help='diffusers-cli command helpers' )
# Register commands
EnvironmentCommand.register_subcommand(_A )
# Let's go
snake_case__ = parser.parse_args()
if not hasattr(_A , 'func' ):
parser.print_help()
exit(1 )
# Run
snake_case__ = args.func(_A )
service.run()
if __name__ == "__main__":
main()
| 372 | 0 |
'''simple docstring'''
def __a(SCREAMING_SNAKE_CASE_ : int = 1 , SCREAMING_SNAKE_CASE_ : int = 1000 ):
'''simple docstring'''
_lowerCAmelCase = 1
_lowerCAmelCase = 0
for divide_by_number in range(SCREAMING_SNAKE_CASE_ , digit + 1 ):
_lowerCAmelCase = []
_lowerCAmelCase = numerator
for _ in range(1 , digit + 1 ):
if now_divide in has_been_divided:
if longest_list_length < len(SCREAMING_SNAKE_CASE_ ):
_lowerCAmelCase = len(SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase = divide_by_number
else:
has_been_divided.append(SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase = now_divide * 10 % divide_by_number
return the_digit
# Tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 18 |
"""simple docstring"""
from ...utils import is_torch_available, is_transformers_available
if is_transformers_available() and is_torch_available():
from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
| 102 | 0 |
import argparse
import os
import re
lowercase__ = '''src/diffusers'''
# Pattern that looks at the indentation in a line.
lowercase__ = re.compile(R'''^(\s*)\S''')
# Pattern that matches `"key":" and puts `key` in group 0.
lowercase__ = re.compile(R'''^\s*"([^"]+)":''')
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
lowercase__ = re.compile(R'''^\s*_import_structure\["([^"]+)"\]''')
# Pattern that matches `"key",` and puts `key` in group 0.
lowercase__ = re.compile(R'''^\s*"([^"]+)",\s*$''')
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
lowercase__ = re.compile(R'''\[([^\]]+)\]''')
def __snake_case ( lowercase : Union[str, Any] ):
snake_case_ = _re_indent.search(lowercase )
return "" if search is None else search.groups()[0]
def __snake_case ( lowercase : str , lowercase : List[Any]="" , lowercase : str=None , lowercase : int=None ):
snake_case_ = 0
snake_case_ = code.split("\n" )
if start_prompt is not None:
while not lines[index].startswith(lowercase ):
index += 1
snake_case_ = ["\n".join(lines[:index] )]
else:
snake_case_ = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
snake_case_ = [lines[index]]
index += 1
while index < len(lowercase ) and (end_prompt is None or not lines[index].startswith(lowercase )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(lowercase ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + " " ):
current_block.append(lines[index] )
blocks.append("\n".join(lowercase ) )
if index < len(lowercase ) - 1:
snake_case_ = [lines[index + 1]]
index += 1
else:
snake_case_ = []
else:
blocks.append("\n".join(lowercase ) )
snake_case_ = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(lowercase ) > 0:
blocks.append("\n".join(lowercase ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(lowercase ):
blocks.append("\n".join(lines[index:] ) )
return blocks
def __snake_case ( lowercase : Union[str, Any] ):
def _inner(lowercase : List[str] ):
return key(lowercase ).lower().replace("_" , "" )
return _inner
def __snake_case ( lowercase : Union[str, Any] , lowercase : Any=None ):
# If no key is provided, we use a noop.
def noop(lowercase : int ):
return x
if key is None:
snake_case_ = noop
# Constants are all uppercase, they go first.
snake_case_ = [obj for obj in objects if key(lowercase ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
snake_case_ = [obj for obj in objects if key(lowercase )[0].isupper() and not key(lowercase ).isupper()]
# Functions begin with a lowercase, they go last.
snake_case_ = [obj for obj in objects if not key(lowercase )[0].isupper()]
snake_case_ = ignore_underscore(lowercase )
return sorted(lowercase , key=lowercase ) + sorted(lowercase , key=lowercase ) + sorted(lowercase , key=lowercase )
def __snake_case ( lowercase : Tuple ):
# This inner function sort imports between [ ].
def _replace(lowercase : Optional[int] ):
snake_case_ = match.groups()[0]
if "," not in imports:
return f'''[{imports}]'''
snake_case_ = [part.strip().replace("\"" , "" ) for part in imports.split("," )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
snake_case_ = keys[:-1]
return "[" + ", ".join([f'''"{k}"''' for k in sort_objects(lowercase )] ) + "]"
snake_case_ = import_statement.split("\n" )
if len(lowercase ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
snake_case_ = 2 if lines[1].strip() == "[" else 1
snake_case_ = [(i, _re_strip_line.search(lowercase ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
snake_case_ = sort_objects(lowercase , key=lambda lowercase : x[1] )
snake_case_ = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(lowercase ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
snake_case_ = _re_bracket_content.sub(_replace , lines[1] )
else:
snake_case_ = [part.strip().replace("\"" , "" ) for part in lines[1].split("," )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
snake_case_ = keys[:-1]
snake_case_ = get_indent(lines[1] ) + ", ".join([f'''"{k}"''' for k in sort_objects(lowercase )] )
return "\n".join(lowercase )
else:
# Finally we have to deal with imports fitting on one line
snake_case_ = _re_bracket_content.sub(_replace , lowercase )
return import_statement
def __snake_case ( lowercase : List[str] , lowercase : str=True ):
with open(lowercase , "r" ) as f:
snake_case_ = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
snake_case_ = split_code_in_indented_blocks(
lowercase , start_prompt="_import_structure = {" , end_prompt="if TYPE_CHECKING:" )
# We ignore block 0 (everything until start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(lowercase ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
snake_case_ = main_blocks[block_idx]
snake_case_ = block.split("\n" )
# Get to the start of the imports.
snake_case_ = 0
while line_idx < len(lowercase ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
snake_case_ = len(lowercase )
else:
line_idx += 1
if line_idx >= len(lowercase ):
continue
# Ignore beginning and last line: they don't contain anything.
snake_case_ = "\n".join(block_lines[line_idx:-1] )
snake_case_ = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
snake_case_ = split_code_in_indented_blocks(lowercase , indent_level=lowercase )
# We have two categories of import key: list or _import_structure[key].append/extend
snake_case_ = _re_direct_key if "_import_structure" in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
snake_case_ = [(pattern.search(lowercase ).groups()[0] if pattern.search(lowercase ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
snake_case_ = [(i, key) for i, key in enumerate(lowercase ) if key is not None]
snake_case_ = [x[0] for x in sorted(lowercase , key=lambda lowercase : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
snake_case_ = 0
snake_case_ = []
for i in range(len(lowercase ) ):
if keys[i] is None:
reordered_blocks.append(internal_blocks[i] )
else:
snake_case_ = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reordered_blocks.append(lowercase )
count += 1
# And we put our main block back together with its first and last line.
snake_case_ = "\n".join(block_lines[:line_idx] + reordered_blocks + [block_lines[-1]] )
if code != "\n".join(lowercase ):
if check_only:
return True
else:
print(f'''Overwriting {file}.''' )
with open(lowercase , "w" ) as f:
f.write("\n".join(lowercase ) )
def __snake_case ( lowercase : Dict=True ):
snake_case_ = []
for root, _, files in os.walk(lowercase ):
if "__init__.py" in files:
snake_case_ = sort_imports(os.path.join(lowercase , "__init__.py" ) , check_only=lowercase )
if result:
snake_case_ = [os.path.join(lowercase , "__init__.py" )]
if len(lowercase ) > 0:
raise ValueError(f'''Would overwrite {len(lowercase )} files, run `make style`.''' )
if __name__ == "__main__":
lowercase__ = argparse.ArgumentParser()
parser.add_argument('''--check_only''', action='''store_true''', help='''Whether to only check or fix style.''')
lowercase__ = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 701 |
'''simple docstring'''
import json
import re
from typing import TYPE_CHECKING, List, Optional, Tuple, Union
import numpy as np
from ...utils import is_tf_available, is_torch_available, logging
if TYPE_CHECKING:
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_codegen import CodeGenTokenizer
lowercase__ = logging.get_logger(__name__)
lowercase__ = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
lowercase__ = {
'''vocab_file''': {
'''Salesforce/codegen-350M-mono''': '''https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/vocab.json''',
},
'''merges_file''': {
'''Salesforce/codegen-350M-mono''': '''https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''Salesforce/codegen-350M-mono''': (
'''https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/tokenizer.json'''
),
},
}
lowercase__ = {
'''Salesforce/codegen-350M-mono''': 20_48,
}
class UpperCAmelCase_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
snake_case = VOCAB_FILES_NAMES
snake_case = PRETRAINED_VOCAB_FILES_MAP
snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case = ["""input_ids""", """attention_mask"""]
snake_case = CodeGenTokenizer
def __init__( self , UpperCAmelCase_=None , UpperCAmelCase_=None , UpperCAmelCase_=None , UpperCAmelCase_="<|endoftext|>" , UpperCAmelCase_="<|endoftext|>" , UpperCAmelCase_="<|endoftext|>" , UpperCAmelCase_=False , **UpperCAmelCase_ , ):
super().__init__(
UpperCAmelCase_ , UpperCAmelCase_ , tokenizer_file=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , bos_token=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , add_prefix_space=UpperCAmelCase_ , **UpperCAmelCase_ , )
if kwargs.pop("add_bos_token" , UpperCAmelCase_ ):
snake_case_ = kwargs.pop("name_or_path" , "" )
raise ValueError(
"Currenty GPT2's fast tokenizer does NOT support adding a BOS token."
"Instead you should use GPT2's slow tokenizer class `CodeGenTokenizer` as follows: \n"
f'''`CodeGenTokenizer.from_pretrained(\'{model_id}\')`\nor\n'''
f'''`AutoTokenizer.from_pretrained(\'{model_id}\', use_fast=False)`\n'''
"This issue will be fixed soon, see: https://github.com/huggingface/tokenizers/pull/1005."
" so that the fast tokenizer works correctly." )
snake_case_ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , UpperCAmelCase_ ) != add_prefix_space:
snake_case_ = getattr(UpperCAmelCase_ , pre_tok_state.pop("type" ) )
snake_case_ = add_prefix_space
snake_case_ = pre_tok_class(**UpperCAmelCase_ )
snake_case_ = add_prefix_space
def _lowercase ( self , *UpperCAmelCase_ , **UpperCAmelCase_ ):
snake_case_ = kwargs.get("is_split_into_words" , UpperCAmelCase_ )
assert self.add_prefix_space or not is_split_into_words, (
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*UpperCAmelCase_ , **UpperCAmelCase_ )
def _lowercase ( self , *UpperCAmelCase_ , **UpperCAmelCase_ ):
snake_case_ = kwargs.get("is_split_into_words" , UpperCAmelCase_ )
assert self.add_prefix_space or not is_split_into_words, (
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._encode_plus(*UpperCAmelCase_ , **UpperCAmelCase_ )
def _lowercase ( self , UpperCAmelCase_ , UpperCAmelCase_ = None ):
snake_case_ = self._tokenizer.model.save(UpperCAmelCase_ , name=UpperCAmelCase_ )
return tuple(UpperCAmelCase_ )
def _lowercase ( self , UpperCAmelCase_ , UpperCAmelCase_ = False , UpperCAmelCase_ = None , UpperCAmelCase_ = None , **UpperCAmelCase_ , ):
snake_case_ = super().decode(
token_ids=UpperCAmelCase_ , skip_special_tokens=UpperCAmelCase_ , clean_up_tokenization_spaces=UpperCAmelCase_ , **UpperCAmelCase_ , )
if truncate_before_pattern is not None and len(UpperCAmelCase_ ) > 0:
snake_case_ = self.truncate(UpperCAmelCase_ , UpperCAmelCase_ )
return decoded_text
def _lowercase ( self , UpperCAmelCase_ , UpperCAmelCase_ ):
def find_re(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
snake_case_ = pattern.search(UpperCAmelCase_ , UpperCAmelCase_ )
return m.start() if m else -1
snake_case_ = [re.compile(UpperCAmelCase_ , re.MULTILINE ) for pattern in truncate_before_pattern]
snake_case_ = list(re.finditer("^print" , UpperCAmelCase_ , re.MULTILINE ) )
if len(UpperCAmelCase_ ) > 1:
snake_case_ = completion[: prints[1].start()]
snake_case_ = list(re.finditer("^def" , UpperCAmelCase_ , re.MULTILINE ) )
if len(UpperCAmelCase_ ) > 1:
snake_case_ = completion[: defs[1].start()]
snake_case_ = 0
snake_case_ = [
pos for pos in [find_re(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) for terminal in terminals] if pos != -1
]
if len(UpperCAmelCase_ ) > 0:
return completion[: min(UpperCAmelCase_ )]
else:
return completion
| 420 | 0 |
def __lowerCAmelCase ( __snake_case ):
def merge(__snake_case , __snake_case ) -> list:
def _merge():
while left and right:
yield (left if left[0] <= right[0] else right).pop(0 )
yield from left
yield from right
return list(_merge() )
if len(__snake_case ) <= 1:
return collection
__lowerCAmelCase = len(__snake_case ) // 2
return merge(merge_sort(collection[:mid] ) , merge_sort(collection[mid:] ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase : Optional[int] = input('''Enter numbers separated by a comma:\n''').strip()
lowerCamelCase : Optional[Any] = [int(item) for item in user_input.split(''',''')]
print(*merge_sort(unsorted), sep=''',''')
| 367 |
from __future__ import annotations
import os
from typing import Any
import requests
lowerCamelCase : Tuple = '''https://api.github.com'''
# https://docs.github.com/en/free-pro-team@latest/rest/reference/users#get-the-authenticated-user
lowerCamelCase : int = BASE_URL + '''/user'''
# https://github.com/settings/tokens
lowerCamelCase : Any = os.environ.get('''USER_TOKEN''', '''''')
def __lowerCAmelCase ( __snake_case ):
__lowerCAmelCase = {
"Authorization": F"""token {auth_token}""",
"Accept": "application/vnd.github.v3+json",
}
return requests.get(__snake_case , headers=__snake_case ).json()
if __name__ == "__main__": # pragma: no cover
if USER_TOKEN:
for key, value in fetch_github_info(USER_TOKEN).items():
print(F'''{key}: {value}''')
else:
raise ValueError('''\'USER_TOKEN\' field cannot be empty.''')
| 367 | 1 |
import re
import tempfile
from pathlib import Path
import pytest
import yaml
from datasets.utils.readme import ReadMe
# @pytest.fixture
# def example_yaml_structure():
_snake_case : Optional[Any] = yaml.safe_load(
'\\nname: ""\nallow_empty: false\nallow_empty_text: true\nsubsections:\n - name: "Dataset Card for X" # First-level markdown heading\n allow_empty: false\n allow_empty_text: true\n subsections:\n - name: "Table of Contents"\n allow_empty: false\n allow_empty_text: false\n subsections: null\n - name: "Dataset Description"\n allow_empty: false\n allow_empty_text: false\n subsections:\n - name: "Dataset Summary"\n allow_empty: false\n allow_empty_text: false\n subsections: null\n - name: "Supported Tasks and Leaderboards"\n allow_empty: true\n allow_empty_text: true\n subsections: null\n - name: Languages\n allow_empty: false\n allow_empty_text: true\n subsections: null\n'
)
_snake_case : List[str] = {
'name': 'root',
'text': '',
'is_empty_text': True,
'subsections': [
{
'name': 'Dataset Card for My Dataset',
'text': '',
'is_empty_text': True,
'subsections': [
{'name': 'Table of Contents', 'text': 'Some text here.', 'is_empty_text': False, 'subsections': []},
{
'name': 'Dataset Description',
'text': 'Some text here.',
'is_empty_text': False,
'subsections': [
{
'name': 'Dataset Summary',
'text': 'Some text here.',
'is_empty_text': False,
'subsections': [],
},
{
'name': 'Supported Tasks and Leaderboards',
'text': '',
'is_empty_text': True,
'subsections': [],
},
{'name': 'Languages', 'text': 'Language Text', 'is_empty_text': False, 'subsections': []},
],
},
],
}
],
}
_snake_case : Optional[int] = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n'
_snake_case : List[str] = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n#### Extra Ignored Subsection\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n'
_snake_case : Union[str, Any] = {
'name': 'root',
'text': '',
'is_empty_text': True,
'subsections': [
{
'name': 'Dataset Card for My Dataset',
'text': '',
'is_empty_text': True,
'subsections': [
{'name': 'Table of Contents', 'text': 'Some text here.', 'is_empty_text': False, 'subsections': []},
{
'name': 'Dataset Description',
'text': 'Some text here.',
'is_empty_text': False,
'subsections': [
{
'name': 'Dataset Summary',
'text': 'Some text here.',
'is_empty_text': False,
'subsections': [
{
'name': 'Extra Ignored Subsection',
'text': '',
'is_empty_text': True,
'subsections': [],
}
],
},
{
'name': 'Supported Tasks and Leaderboards',
'text': '',
'is_empty_text': True,
'subsections': [],
},
{'name': 'Languages', 'text': 'Language Text', 'is_empty_text': False, 'subsections': []},
],
},
],
}
],
}
_snake_case : Any = '\\n---\n---\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n'
_snake_case : List[str] = (
'The following issues were found for the README at `{path}`:\n-\tEmpty YAML markers are present in the README.'
)
_snake_case : Optional[Any] = '\\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n'
_snake_case : Tuple = (
'The following issues were found for the README at `{path}`:\n-\tNo YAML markers are present in the README.'
)
_snake_case : Any = '\\n---\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n'
_snake_case : List[str] = 'The following issues were found for the README at `{path}`:\n-\tOnly the start of YAML tags present in the README.'
_snake_case : Optional[Any] = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n'
_snake_case : Optional[int] = 'The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Summary` but it is empty.\n-\tExpected some text in section `Dataset Summary` but it is empty (text in subsections are ignored).'
_snake_case : str = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n'
_snake_case : Optional[Any] = 'The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Card for My Dataset` but it is empty.\n-\tSection `Dataset Card for My Dataset` expected the following subsections: `Table of Contents`, `Dataset Description`. Found \'None\'.'
_snake_case : int = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Languages\nLanguage Text\n'
_snake_case : int = 'The following issues were found for the README at `{path}`:\n-\tSection `Dataset Description` is missing subsection: `Supported Tasks and Leaderboards`.'
_snake_case : Optional[int] = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\n'
_snake_case : List[str] = 'The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Languages` but it is empty.'
_snake_case : str = '\\n---\nlanguage:\n- zh\n- en\n---\n\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n'
_snake_case : List[str] = 'The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.'
_snake_case : Union[str, Any] = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n# Dataset Card My Dataset\n'
_snake_case : Optional[int] = 'The following issues were found for the README at `{path}`:\n-\tThe README has several first-level headings: `Dataset Card for My Dataset`, `Dataset Card My Dataset`. Only one heading is expected. Skipping further validation for this README.'
_snake_case : int = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n'
_snake_case : List[Any] = 'The following issues were found for the README at `{path}`:\n-\tNo first-level heading starting with `Dataset Card for` found in README. Skipping further validation for this README.'
_snake_case : str = ''
_snake_case : Any = 'The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.\n-\tNo YAML markers are present in the README.'
_snake_case : str = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n'
_snake_case : List[str] = 'The following issues were found while parsing the README at `{path}`:\n-\tMultiple sections with the same heading `Dataset Card for My Dataset` have been found. Please keep only one of these sections.'
@pytest.mark.parametrize(
'readme_md, expected_dict', [
(README_CORRECT, CORRECT_DICT),
(README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL),
], )
def a_ ( lowerCAmelCase_ : Dict, lowerCAmelCase_ : List[Any] ):
assert ReadMe.from_string(lowerCAmelCase_, lowerCAmelCase_ ).to_dict() == expected_dict
@pytest.mark.parametrize(
'readme_md, expected_error', [
(README_NO_YAML, EXPECTED_ERROR_README_NO_YAML),
(README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML),
(README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML),
(README_EMPTY, EXPECTED_ERROR_README_EMPTY),
(README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION),
(README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL),
(README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION),
(README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT),
(README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL),
(README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL),
(README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT),
], )
def a_ ( lowerCAmelCase_ : Union[str, Any], lowerCAmelCase_ : str ):
with pytest.raises(lowerCAmelCase_, match=re.escape(expected_error.format(path='root' ) ) ):
__lowerCAmelCase = ReadMe.from_string(lowerCAmelCase_, lowerCAmelCase_ )
readme.validate()
@pytest.mark.parametrize(
'readme_md, expected_error', [
(README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1),
], )
def a_ ( lowerCAmelCase_ : Optional[Any], lowerCAmelCase_ : str ):
with pytest.raises(lowerCAmelCase_, match=re.escape(expected_error.format(path='root' ) ) ):
ReadMe.from_string(lowerCAmelCase_, lowerCAmelCase_ )
@pytest.mark.parametrize(
'readme_md,', [
(README_MULTIPLE_SAME_HEADING_1),
], )
def a_ ( lowerCAmelCase_ : Tuple ):
ReadMe.from_string(lowerCAmelCase_, lowerCAmelCase_, suppress_parsing_errors=lowerCAmelCase_ )
@pytest.mark.parametrize(
'readme_md, expected_dict', [
(README_CORRECT, CORRECT_DICT),
(README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL),
], )
def a_ ( lowerCAmelCase_ : Optional[int], lowerCAmelCase_ : Tuple ):
with tempfile.TemporaryDirectory() as tmp_dir:
__lowerCAmelCase = Path(lowerCAmelCase_ ) / 'README.md'
with open(lowerCAmelCase_, 'w+' ) as readme_file:
readme_file.write(lowerCAmelCase_ )
__lowerCAmelCase = ReadMe.from_readme(lowerCAmelCase_, lowerCAmelCase_ ).to_dict()
assert out["name"] == path
assert out["text"] == ""
assert out["is_empty_text"]
assert out["subsections"] == expected_dict["subsections"]
@pytest.mark.parametrize(
'readme_md, expected_error', [
(README_NO_YAML, EXPECTED_ERROR_README_NO_YAML),
(README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML),
(README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML),
(README_EMPTY, EXPECTED_ERROR_README_EMPTY),
(README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION),
(README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL),
(README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION),
(README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT),
(README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL),
(README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL),
(README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT),
], )
def a_ ( lowerCAmelCase_ : Optional[Any], lowerCAmelCase_ : Optional[int] ):
with tempfile.TemporaryDirectory() as tmp_dir:
__lowerCAmelCase = Path(lowerCAmelCase_ ) / 'README.md'
with open(lowerCAmelCase_, 'w+' ) as readme_file:
readme_file.write(lowerCAmelCase_ )
__lowerCAmelCase = expected_error.format(path=lowerCAmelCase_ )
with pytest.raises(lowerCAmelCase_, match=re.escape(lowerCAmelCase_ ) ):
__lowerCAmelCase = ReadMe.from_readme(lowerCAmelCase_, lowerCAmelCase_ )
readme.validate()
@pytest.mark.parametrize(
'readme_md, expected_error', [
(README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1),
], )
def a_ ( lowerCAmelCase_ : Tuple, lowerCAmelCase_ : List[Any] ):
with tempfile.TemporaryDirectory() as tmp_dir:
__lowerCAmelCase = Path(lowerCAmelCase_ ) / 'README.md'
with open(lowerCAmelCase_, 'w+' ) as readme_file:
readme_file.write(lowerCAmelCase_ )
__lowerCAmelCase = expected_error.format(path=lowerCAmelCase_ )
with pytest.raises(lowerCAmelCase_, match=re.escape(lowerCAmelCase_ ) ):
ReadMe.from_readme(lowerCAmelCase_, lowerCAmelCase_ )
@pytest.mark.parametrize(
'readme_md,', [
(README_MULTIPLE_SAME_HEADING_1),
], )
def a_ ( lowerCAmelCase_ : Any ):
with tempfile.TemporaryDirectory() as tmp_dir:
__lowerCAmelCase = Path(lowerCAmelCase_ ) / 'README.md'
with open(lowerCAmelCase_, 'w+' ) as readme_file:
readme_file.write(lowerCAmelCase_ )
ReadMe.from_readme(lowerCAmelCase_, lowerCAmelCase_, suppress_parsing_errors=lowerCAmelCase_ )
| 421 |
import unittest
import numpy as np
from transformers import RobertaPreLayerNormConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roberta_prelayernorm.modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
)
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Dict , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Union[str, Any]=1_3 , lowerCAmelCase_ : List[str]=7 , lowerCAmelCase_ : Union[str, Any]=True , lowerCAmelCase_ : str=True , lowerCAmelCase_ : Optional[Any]=True , lowerCAmelCase_ : Union[str, Any]=True , lowerCAmelCase_ : List[Any]=9_9 , lowerCAmelCase_ : Tuple=3_2 , lowerCAmelCase_ : Any=5 , lowerCAmelCase_ : Any=4 , lowerCAmelCase_ : Dict=3_7 , lowerCAmelCase_ : Tuple="gelu" , lowerCAmelCase_ : List[Any]=0.1 , lowerCAmelCase_ : str=0.1 , lowerCAmelCase_ : str=5_1_2 , lowerCAmelCase_ : Dict=1_6 , lowerCAmelCase_ : Tuple=2 , lowerCAmelCase_ : Optional[int]=0.02 , lowerCAmelCase_ : Optional[int]=4 , ) -> List[Any]:
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = seq_length
__lowerCAmelCase = is_training
__lowerCAmelCase = use_attention_mask
__lowerCAmelCase = use_token_type_ids
__lowerCAmelCase = use_labels
__lowerCAmelCase = vocab_size
__lowerCAmelCase = hidden_size
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_act
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = max_position_embeddings
__lowerCAmelCase = type_vocab_size
__lowerCAmelCase = type_sequence_label_size
__lowerCAmelCase = initializer_range
__lowerCAmelCase = num_choices
def lowercase ( self : List[str] ) -> Optional[int]:
__lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCAmelCase = None
if self.use_attention_mask:
__lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCAmelCase = None
if self.use_token_type_ids:
__lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowerCAmelCase = RobertaPreLayerNormConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCAmelCase_ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def lowercase ( self : Dict ) -> Dict:
__lowerCAmelCase = self.prepare_config_and_inputs()
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = config_and_inputs
__lowerCAmelCase = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask}
return config, inputs_dict
def lowercase ( self : Tuple ) -> int:
__lowerCAmelCase = self.prepare_config_and_inputs()
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = config_and_inputs
__lowerCAmelCase = True
__lowerCAmelCase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
# Copied from tests.models.roberta.test_modelling_flax_roberta.FlaxRobertaPreLayerNormModelTest with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta-base->andreasmadsen/efficient_mlm_m0.40
class _UpperCAmelCase ( _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
a_ = True
a_ = (
(
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowercase ( self : Any ) -> Dict:
__lowerCAmelCase = FlaxRobertaPreLayerNormModelTester(self )
@slow
def lowercase ( self : Tuple ) -> List[str]:
for model_class_name in self.all_model_classes:
__lowerCAmelCase = model_class_name.from_pretrained('andreasmadsen/efficient_mlm_m0.40' , from_pt=lowerCAmelCase_ )
__lowerCAmelCase = model(np.ones((1, 1) ) )
self.assertIsNotNone(lowerCAmelCase_ )
@require_flax
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowercase ( self : int ) -> int:
__lowerCAmelCase = FlaxRobertaPreLayerNormForMaskedLM.from_pretrained('andreasmadsen/efficient_mlm_m0.40' , from_pt=lowerCAmelCase_ )
__lowerCAmelCase = np.array([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] , dtype=jnp.intaa )
__lowerCAmelCase = model(lowerCAmelCase_ )[0]
__lowerCAmelCase = [1, 1_1, 5_0_2_6_5]
self.assertEqual(list(output.shape ) , lowerCAmelCase_ )
# compare the actual values for a slice.
__lowerCAmelCase = np.array(
[[[40.48_80, 18.01_99, -5.23_67], [-1.88_77, -4.08_85, 10.70_85], [-2.26_13, -5.61_10, 7.26_65]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , lowerCAmelCase_ , atol=1e-4 ) )
@slow
def lowercase ( self : Union[str, Any] ) -> Tuple:
__lowerCAmelCase = FlaxRobertaPreLayerNormModel.from_pretrained('andreasmadsen/efficient_mlm_m0.40' , from_pt=lowerCAmelCase_ )
__lowerCAmelCase = np.array([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] , dtype=jnp.intaa )
__lowerCAmelCase = model(lowerCAmelCase_ )[0]
# compare the actual values for a slice.
__lowerCAmelCase = np.array(
[[[0.02_08, -0.03_56, 0.02_37], [-0.15_69, -0.04_11, -0.26_26], [0.18_79, 0.01_25, -0.00_89]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , lowerCAmelCase_ , atol=1e-4 ) )
| 421 | 1 |
import argparse
from collections import defaultdict
import yaml
a__ = """docs/source/en/_toctree.yml"""
def _UpperCAmelCase ( a : str ):
snake_case__ = defaultdict(a )
for doc in model_doc:
counts[doc["local"]] += 1
snake_case__ = [key for key, value in counts.items() if value > 1]
snake_case__ = []
for duplicate_key in duplicates:
snake_case__ = list({doc["""title"""] for doc in model_doc if doc["""local"""] == duplicate_key} )
if len(a ) > 1:
raise ValueError(
F'''{duplicate_key} is present several times in the documentation table of content at '''
"""`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the """
"""others.""" )
# Only add this once
new_doc.append({"""local""": duplicate_key, """title""": titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in model_doc if counts[doc["""local"""]] == 1] )
# Sort
return sorted(a , key=lambda a : s["title"].lower() )
def _UpperCAmelCase ( a : Optional[int]=False ):
with open(a , encoding="""utf-8""" ) as f:
snake_case__ = yaml.safe_load(f.read() )
# Get to the API doc
snake_case__ = 0
while content[api_idx]["title"] != "API":
api_idx += 1
snake_case__ = content[api_idx]["""sections"""]
# Then to the model doc
snake_case__ = 0
while api_doc[model_idx]["title"] != "Models":
model_idx += 1
snake_case__ = api_doc[model_idx]["""sections"""]
snake_case__ = [(idx, section) for idx, section in enumerate(a ) if """sections""" in section]
snake_case__ = False
for idx, modality_doc in modalities_docs:
snake_case__ = modality_doc["""sections"""]
snake_case__ = clean_model_doc_toc(a )
if old_modality_doc != new_modality_doc:
snake_case__ = True
if overwrite:
snake_case__ = new_modality_doc
if diff:
if overwrite:
snake_case__ = model_doc
snake_case__ = api_doc
with open(a , """w""" , encoding="""utf-8""" ) as f:
f.write(yaml.dump(a , allow_unicode=a ) )
else:
raise ValueError(
"""The model doc part of the table of content is not properly sorted, run `make style` to fix this.""" )
if __name__ == "__main__":
a__ = argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
a__ = parser.parse_args()
check_model_doc(args.fix_and_overwrite)
| 654 |
import tempfile
import torch
from diffusers import IPNDMScheduler
from .test_schedulers import SchedulerCommonTest
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
_lowercase : int = (IPNDMScheduler,)
_lowercase : int = (('''num_inference_steps''', 50),)
def __magic_name__ ( self : Any , **UpperCamelCase__ : Tuple):
'''simple docstring'''
snake_case__ = {"""num_train_timesteps""": 1_0_0_0}
config.update(**UpperCamelCase__)
return config
def __magic_name__ ( self : int , UpperCamelCase__ : Dict=0 , **UpperCamelCase__ : int):
'''simple docstring'''
snake_case__ = dict(self.forward_default_kwargs)
snake_case__ = kwargs.pop("""num_inference_steps""" , UpperCamelCase__)
snake_case__ = self.dummy_sample
snake_case__ = 0.1 * sample
snake_case__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
snake_case__ = self.get_scheduler_config(**UpperCamelCase__)
snake_case__ = scheduler_class(**UpperCamelCase__)
scheduler.set_timesteps(UpperCamelCase__)
# copy over dummy past residuals
snake_case__ = dummy_past_residuals[:]
if time_step is None:
snake_case__ = scheduler.timesteps[len(scheduler.timesteps) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(UpperCamelCase__)
snake_case__ = scheduler_class.from_pretrained(UpperCamelCase__)
new_scheduler.set_timesteps(UpperCamelCase__)
# copy over dummy past residuals
snake_case__ = dummy_past_residuals[:]
snake_case__ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__).prev_sample
snake_case__ = new_scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical"
snake_case__ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__).prev_sample
snake_case__ = new_scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical"
def __magic_name__ ( self : List[Any]):
'''simple docstring'''
pass
def __magic_name__ ( self : Tuple , UpperCamelCase__ : Union[str, Any]=0 , **UpperCamelCase__ : Tuple):
'''simple docstring'''
snake_case__ = dict(self.forward_default_kwargs)
snake_case__ = kwargs.pop("""num_inference_steps""" , UpperCamelCase__)
snake_case__ = self.dummy_sample
snake_case__ = 0.1 * sample
snake_case__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
snake_case__ = self.get_scheduler_config()
snake_case__ = scheduler_class(**UpperCamelCase__)
scheduler.set_timesteps(UpperCamelCase__)
# copy over dummy past residuals (must be after setting timesteps)
snake_case__ = dummy_past_residuals[:]
if time_step is None:
snake_case__ = scheduler.timesteps[len(scheduler.timesteps) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(UpperCamelCase__)
snake_case__ = scheduler_class.from_pretrained(UpperCamelCase__)
# copy over dummy past residuals
new_scheduler.set_timesteps(UpperCamelCase__)
# copy over dummy past residual (must be after setting timesteps)
snake_case__ = dummy_past_residuals[:]
snake_case__ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__).prev_sample
snake_case__ = new_scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical"
snake_case__ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__).prev_sample
snake_case__ = new_scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical"
def __magic_name__ ( self : Union[str, Any] , **UpperCamelCase__ : Dict):
'''simple docstring'''
snake_case__ = self.scheduler_classes[0]
snake_case__ = self.get_scheduler_config(**UpperCamelCase__)
snake_case__ = scheduler_class(**UpperCamelCase__)
snake_case__ = 1_0
snake_case__ = self.dummy_model()
snake_case__ = self.dummy_sample_deter
scheduler.set_timesteps(UpperCamelCase__)
for i, t in enumerate(scheduler.timesteps):
snake_case__ = model(UpperCamelCase__ , UpperCamelCase__)
snake_case__ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__).prev_sample
for i, t in enumerate(scheduler.timesteps):
snake_case__ = model(UpperCamelCase__ , UpperCamelCase__)
snake_case__ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__).prev_sample
return sample
def __magic_name__ ( self : Optional[int]):
'''simple docstring'''
snake_case__ = dict(self.forward_default_kwargs)
snake_case__ = kwargs.pop("""num_inference_steps""" , UpperCamelCase__)
for scheduler_class in self.scheduler_classes:
snake_case__ = self.get_scheduler_config()
snake_case__ = scheduler_class(**UpperCamelCase__)
snake_case__ = self.dummy_sample
snake_case__ = 0.1 * sample
if num_inference_steps is not None and hasattr(UpperCamelCase__ , """set_timesteps"""):
scheduler.set_timesteps(UpperCamelCase__)
elif num_inference_steps is not None and not hasattr(UpperCamelCase__ , """set_timesteps"""):
snake_case__ = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
snake_case__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
snake_case__ = dummy_past_residuals[:]
snake_case__ = scheduler.timesteps[5]
snake_case__ = scheduler.timesteps[6]
snake_case__ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__).prev_sample
snake_case__ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__).prev_sample
self.assertEqual(output_a.shape , sample.shape)
self.assertEqual(output_a.shape , output_a.shape)
snake_case__ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__).prev_sample
snake_case__ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__).prev_sample
self.assertEqual(output_a.shape , sample.shape)
self.assertEqual(output_a.shape , output_a.shape)
def __magic_name__ ( self : Union[str, Any]):
'''simple docstring'''
for timesteps in [1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=UpperCamelCase__ , time_step=UpperCamelCase__)
def __magic_name__ ( self : Dict):
'''simple docstring'''
for t, num_inference_steps in zip([1, 5, 1_0] , [1_0, 5_0, 1_0_0]):
self.check_over_forward(num_inference_steps=UpperCamelCase__ , time_step=UpperCamelCase__)
def __magic_name__ ( self : List[str]):
'''simple docstring'''
snake_case__ = self.full_loop()
snake_case__ = torch.mean(torch.abs(UpperCamelCase__))
assert abs(result_mean.item() - 2_5_4_0_5_2_9) < 1_0
| 654 | 1 |
'''simple docstring'''
def _UpperCAmelCase ( _UpperCamelCase : int = 50 ) -> int:
A_ = [1] * (length + 1)
for row_length in range(length + 1 ):
for tile_length in range(2, 5 ):
for tile_start in range(row_length - tile_length + 1 ):
ways_number[row_length] += ways_number[
row_length - tile_start - tile_length
]
return ways_number[length]
if __name__ == "__main__":
print(F"""{solution() = }""")
| 703 | '''simple docstring'''
import json
import os
import unittest
from transformers import OpenAIGPTTokenizer, OpenAIGPTTokenizerFast
from transformers.models.openai.tokenization_openai import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_spacy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __UpperCAmelCase ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__lowercase : List[str] = OpenAIGPTTokenizer
__lowercase : Union[str, Any] = OpenAIGPTTokenizerFast
__lowercase : str = True
__lowercase : List[Any] = False
def __A ( self ) -> int:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
A_ = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''w</w>''',
'''r</w>''',
'''t</w>''',
'''lo''',
'''low''',
'''er</w>''',
'''low</w>''',
'''lowest</w>''',
'''newer</w>''',
'''wider</w>''',
'''<unk>''',
]
A_ = dict(zip(_SCREAMING_SNAKE_CASE , range(len(_SCREAMING_SNAKE_CASE ) ) ) )
A_ = ['''#version: 0.2''', '''l o''', '''lo w''', '''e r</w>''', '''''']
A_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
A_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' ) as fp:
fp.write(json.dumps(_SCREAMING_SNAKE_CASE ) )
with open(self.merges_file , '''w''' ) as fp:
fp.write('''\n'''.join(_SCREAMING_SNAKE_CASE ) )
def __A ( self , _SCREAMING_SNAKE_CASE ) -> Optional[int]:
return "lower newer", "lower newer"
def __A ( self ) -> Optional[Any]:
A_ = OpenAIGPTTokenizer(self.vocab_file , self.merges_file )
A_ = '''lower'''
A_ = ['''low''', '''er</w>''']
A_ = tokenizer.tokenize(_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
A_ = tokens + ['''<unk>''']
A_ = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
def __A ( self , _SCREAMING_SNAKE_CASE=15 ) -> List[str]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
A_ = self.rust_tokenizer_class.from_pretrained(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
# Simple input
A_ = '''This is a simple input'''
A_ = ['''This is a simple input 1''', '''This is a simple input 2''']
A_ = ('''This is a simple input''', '''This is a pair''')
A_ = [
('''This is a simple input 1''', '''This is a simple input 2'''),
('''This is a simple pair 1''', '''This is a simple pair 2'''),
]
# Simple input tests
self.assertRaises(_SCREAMING_SNAKE_CASE , tokenizer_r.encode , _SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE , padding='''max_length''' )
# Simple input
self.assertRaises(_SCREAMING_SNAKE_CASE , tokenizer_r.encode_plus , _SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE , padding='''max_length''' )
# Simple input
self.assertRaises(
_SCREAMING_SNAKE_CASE , tokenizer_r.batch_encode_plus , _SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE , padding='''max_length''' , )
# Pair input
self.assertRaises(_SCREAMING_SNAKE_CASE , tokenizer_r.encode , _SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE , padding='''max_length''' )
# Pair input
self.assertRaises(_SCREAMING_SNAKE_CASE , tokenizer_r.encode_plus , _SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE , padding='''max_length''' )
# Pair input
self.assertRaises(
_SCREAMING_SNAKE_CASE , tokenizer_r.batch_encode_plus , _SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE , padding='''max_length''' , )
def __A ( self ) -> List[Any]:
pass
@require_ftfy
@require_spacy
@require_tokenizers
class __UpperCAmelCase ( _UpperCamelCase ):
'''simple docstring'''
pass
| 174 | 0 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.utils import is_vision_available
from transformers.utils.generic import TensorType
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import logging
if is_vision_available():
import PIL
_UpperCamelCase : List[str] = logging.get_logger(__name__)
def __UpperCAmelCase ( A : Optional[Any] ) -> List[List[ImageInput]]:
if isinstance(A , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(A , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(A ):
return [[videos]]
raise ValueError(F"Could not make batched video from {videos}" )
class snake_case__ ( UpperCamelCase):
a_ = ["pixel_values"]
def __init__( self : Union[str, Any] , _A : bool = True , _A : Dict[str, int] = None , _A : PILImageResampling = PILImageResampling.BILINEAR , _A : bool = True , _A : Dict[str, int] = None , _A : bool = True , _A : Union[int, float] = 1 / 2_55 , _A : bool = True , _A : bool = True , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[float, List[float]]] = None , **_A : Dict , ) -> None:
super().__init__(**_A )
UpperCAmelCase_ : Dict = size if size is not None else {'''shortest_edge''': 2_56}
UpperCAmelCase_ : Dict = get_size_dict(_A , default_to_square=_A )
UpperCAmelCase_ : List[Any] = crop_size if crop_size is not None else {'''height''': 2_24, '''width''': 2_24}
UpperCAmelCase_ : Dict = get_size_dict(_A , param_name='''crop_size''' )
UpperCAmelCase_ : Union[str, Any] = do_resize
UpperCAmelCase_ : Union[str, Any] = size
UpperCAmelCase_ : Optional[Any] = do_center_crop
UpperCAmelCase_ : Dict = crop_size
UpperCAmelCase_ : Any = resample
UpperCAmelCase_ : Optional[int] = do_rescale
UpperCAmelCase_ : Dict = rescale_factor
UpperCAmelCase_ : Tuple = offset
UpperCAmelCase_ : Dict = do_normalize
UpperCAmelCase_ : int = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCAmelCase_ : List[str] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def A ( self : int , _A : np.ndarray , _A : Dict[str, int] , _A : PILImageResampling = PILImageResampling.BILINEAR , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Optional[Any] , ) -> np.ndarray:
UpperCAmelCase_ : Dict = get_size_dict(_A , default_to_square=_A )
if "shortest_edge" in size:
UpperCAmelCase_ : List[Any] = get_resize_output_image_size(_A , size['''shortest_edge'''] , default_to_square=_A )
elif "height" in size and "width" in size:
UpperCAmelCase_ : str = (size['''height'''], size['''width'''])
else:
raise ValueError(F"Size must have 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}" )
return resize(_A , size=_A , resample=_A , data_format=_A , **_A )
def A ( self : Dict , _A : np.ndarray , _A : Dict[str, int] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : List[str] , ) -> np.ndarray:
UpperCAmelCase_ : int = get_size_dict(_A )
if "height" not in size or "width" not in size:
raise ValueError(F"Size must have 'height' and 'width' as keys. Got {size.keys()}" )
return center_crop(_A , size=(size['''height'''], size['''width''']) , data_format=_A , **_A )
def A ( self : Any , _A : np.ndarray , _A : Union[int, float] , _A : bool = True , _A : Optional[Union[str, ChannelDimension]] = None , **_A : int , ) -> Tuple:
UpperCAmelCase_ : str = image.astype(np.floataa )
if offset:
UpperCAmelCase_ : Optional[int] = image - (scale / 2)
return rescale(_A , scale=_A , data_format=_A , **_A )
def A ( self : Union[str, Any] , _A : np.ndarray , _A : Union[float, List[float]] , _A : Union[float, List[float]] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Union[str, Any] , ) -> np.ndarray:
return normalize(_A , mean=_A , std=_A , data_format=_A , **_A )
def A ( self : Dict , _A : ImageInput , _A : bool = None , _A : Dict[str, int] = None , _A : PILImageResampling = None , _A : bool = None , _A : Dict[str, int] = None , _A : bool = None , _A : float = None , _A : bool = None , _A : bool = None , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[float, List[float]]] = None , _A : Optional[ChannelDimension] = ChannelDimension.FIRST , ) -> np.ndarray:
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
if offset and not do_rescale:
raise ValueError('''For offset, do_rescale must also be set to True.''' )
# All transformations expect numpy arrays.
UpperCAmelCase_ : List[Any] = to_numpy_array(_A )
if do_resize:
UpperCAmelCase_ : Optional[Any] = self.resize(image=_A , size=_A , resample=_A )
if do_center_crop:
UpperCAmelCase_ : int = self.center_crop(_A , size=_A )
if do_rescale:
UpperCAmelCase_ : Any = self.rescale(image=_A , scale=_A , offset=_A )
if do_normalize:
UpperCAmelCase_ : Dict = self.normalize(image=_A , mean=_A , std=_A )
UpperCAmelCase_ : Optional[Any] = to_channel_dimension_format(_A , _A )
return image
def A ( self : str , _A : ImageInput , _A : bool = None , _A : Dict[str, int] = None , _A : PILImageResampling = None , _A : bool = None , _A : Dict[str, int] = None , _A : bool = None , _A : float = None , _A : bool = None , _A : bool = None , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[str, TensorType]] = None , _A : ChannelDimension = ChannelDimension.FIRST , **_A : List[Any] , ) -> PIL.Image.Image:
UpperCAmelCase_ : List[Any] = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase_ : List[Any] = resample if resample is not None else self.resample
UpperCAmelCase_ : Optional[int] = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCAmelCase_ : Dict = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase_ : Optional[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase_ : Union[str, Any] = offset if offset is not None else self.offset
UpperCAmelCase_ : int = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase_ : Optional[int] = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase_ : Tuple = image_std if image_std is not None else self.image_std
UpperCAmelCase_ : Dict = size if size is not None else self.size
UpperCAmelCase_ : List[Any] = get_size_dict(_A , default_to_square=_A )
UpperCAmelCase_ : List[Any] = crop_size if crop_size is not None else self.crop_size
UpperCAmelCase_ : Optional[Any] = get_size_dict(_A , param_name='''crop_size''' )
if not valid_images(_A ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
UpperCAmelCase_ : Dict = make_batched(_A )
UpperCAmelCase_ : int = [
[
self._preprocess_image(
image=_A , do_resize=_A , size=_A , resample=_A , do_center_crop=_A , crop_size=_A , do_rescale=_A , rescale_factor=_A , offset=_A , do_normalize=_A , image_mean=_A , image_std=_A , data_format=_A , )
for img in video
]
for video in videos
]
UpperCAmelCase_ : Optional[int] = {'''pixel_values''': videos}
return BatchFeature(data=_A , tensor_type=_A )
| 541 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_speech_available, is_torch_available
_UpperCamelCase : Tuple = {
'configuration_audio_spectrogram_transformer': [
'AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'ASTConfig',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : Optional[int] = [
'AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'ASTForAudioClassification',
'ASTModel',
'ASTPreTrainedModel',
]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : int = ['ASTFeatureExtractor']
if TYPE_CHECKING:
from .configuration_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
ASTConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ASTForAudioClassification,
ASTModel,
ASTPreTrainedModel,
)
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_audio_spectrogram_transformer import ASTFeatureExtractor
else:
import sys
_UpperCamelCase : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 541 | 1 |
'''simple docstring'''
__snake_case : Dict = range(2, 20 + 1)
__snake_case : Any = [10**k for k in range(ks[-1] + 1)]
__snake_case : dict[int, dict[int, list[list[int]]]] = {}
def _lowercase ( lowerCamelCase__ : int, lowerCamelCase__ : Optional[Any], lowerCamelCase__ : Optional[Any], lowerCamelCase__ : str ):
_a = sum(a_i[j] for j in range(lowerCamelCase__, len(lowerCamelCase__ ) ) )
_a = sum(a_i[j] * base[j] for j in range(min(len(lowerCamelCase__ ), lowerCamelCase__ ) ) )
_a , _a = 0, 0
_a = n - i
_a = memo.get(lowerCamelCase__ )
if sub_memo is not None:
_a = sub_memo.get(lowerCamelCase__ )
if jumps is not None and len(lowerCamelCase__ ) > 0:
# find and make the largest jump without going over
_a = -1
for _k in range(len(lowerCamelCase__ ) - 1, -1, -1 ):
if jumps[_k][2] <= k and jumps[_k][1] <= max_dn:
_a = _k
break
if max_jump >= 0:
_a , _a , _a = jumps[max_jump]
# since the difference between jumps is cached, add c
_a = diff + c
for j in range(min(lowerCamelCase__, len(lowerCamelCase__ ) ) ):
_a , _a = divmod(lowerCamelCase__, 10 )
if new_c > 0:
add(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ )
else:
_a = []
else:
_a = {c: []}
_a = sub_memo
if dn >= max_dn or c + diff >= base[k]:
return diff, dn
if k > ks[0]:
while True:
# keep doing smaller jumps
_a , _a = next_term(lowerCamelCase__, k - 1, i + dn, lowerCamelCase__ )
diff += _diff
dn += terms_jumped
if dn >= max_dn or c + diff >= base[k]:
break
else:
# would be too small a jump, just compute sequential terms instead
_a , _a = compute(lowerCamelCase__, lowerCamelCase__, i + dn, lowerCamelCase__ )
diff += _diff
dn += terms_jumped
_a = sub_memo[c]
# keep jumps sorted by # of terms skipped
_a = 0
while j < len(lowerCamelCase__ ):
if jumps[j][1] > dn:
break
j += 1
# cache the jump for this value digitsum(b) and c
sub_memo[c].insert(lowerCamelCase__, (diff, dn, k) )
return (diff, dn)
def _lowercase ( lowerCamelCase__ : str, lowerCamelCase__ : Any, lowerCamelCase__ : Optional[int], lowerCamelCase__ : Optional[int] ):
if i >= n:
return 0, i
if k > len(lowerCamelCase__ ):
a_i.extend([0 for _ in range(k - len(lowerCamelCase__ ) )] )
# note: a_i -> b * 10^k + c
# ds_b -> digitsum(b)
# ds_c -> digitsum(c)
_a = i
_a , _a , _a = 0, 0, 0
for j in range(len(lowerCamelCase__ ) ):
if j >= k:
ds_b += a_i[j]
else:
ds_c += a_i[j]
while i < n:
i += 1
_a = ds_c + ds_b
diff += addend
_a = 0
for j in range(lowerCamelCase__ ):
_a = a_i[j] + addend
_a , _a = divmod(lowerCamelCase__, 10 )
ds_c += a_i[j]
if addend > 0:
break
if addend > 0:
add(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ )
return diff, i - start_i
def _lowercase ( lowerCamelCase__ : Optional[int], lowerCamelCase__ : Tuple, lowerCamelCase__ : List[str] ):
for j in range(lowerCamelCase__, len(lowerCamelCase__ ) ):
_a = digits[j] + addend
if s >= 10:
_a , _a = divmod(lowerCamelCase__, 10 )
_a = addend // 10 + quotient
else:
_a = s
_a = addend // 10
if addend == 0:
break
while addend > 0:
_a , _a = divmod(lowerCamelCase__, 10 )
digits.append(lowerCamelCase__ )
def _lowercase ( lowerCamelCase__ : int = 10**15 ):
_a = [1]
_a = 1
_a = 0
while True:
_a , _a = next_term(lowerCamelCase__, 20, i + dn, lowerCamelCase__ )
dn += terms_jumped
if dn == n - i:
break
_a = 0
for j in range(len(lowerCamelCase__ ) ):
a_n += digits[j] * 10**j
return a_n
if __name__ == "__main__":
print(f'''{solution() = }''')
| 718 |
'''simple docstring'''
import os
import unittest
from transformers import BatchEncoding
from transformers.models.bert.tokenization_bert import (
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.models.prophetnet.tokenization_prophetnet import VOCAB_FILES_NAMES, ProphetNetTokenizer
from transformers.testing_utils import require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
class A ( a , unittest.TestCase ):
__UpperCAmelCase : List[Any] = ProphetNetTokenizer
__UpperCAmelCase : Optional[Any] = False
def __lowerCAmelCase ( self ) -> Tuple:
super().setUp()
_a = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
_a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def __lowerCAmelCase ( self , snake_case_ ) -> Any:
_a = "UNwant\u00E9d,running"
_a = "unwanted, running"
return input_text, output_text
def __lowerCAmelCase ( self ) -> Any:
_a = self.tokenizer_class(self.vocab_file )
_a = tokenizer.tokenize("UNwant\u00E9d,running" )
self.assertListEqual(snake_case_ , ["un", "##want", "##ed", ",", "runn", "##ing"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case_ ) , [9, 6, 7, 1_2, 1_0, 1_1] )
def __lowerCAmelCase ( self ) -> List[str]:
_a = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize("ah\u535A\u63A8zz" ) , ["ah", "\u535A", "\u63A8", "zz"] )
def __lowerCAmelCase ( self ) -> Any:
_a = BasicTokenizer(do_lower_case=snake_case_ )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["hello", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
_a = BasicTokenizer(do_lower_case=snake_case_ , strip_accents=snake_case_ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hällo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["h\u00E9llo"] )
def __lowerCAmelCase ( self ) -> Tuple:
_a = BasicTokenizer(do_lower_case=snake_case_ , strip_accents=snake_case_ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def __lowerCAmelCase ( self ) -> Any:
_a = BasicTokenizer(do_lower_case=snake_case_ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def __lowerCAmelCase ( self ) -> List[Any]:
_a = BasicTokenizer(do_lower_case=snake_case_ )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["HeLLo", "!", "how", "Are", "yoU", "?"] )
def __lowerCAmelCase ( self ) -> int:
_a = BasicTokenizer(do_lower_case=snake_case_ , strip_accents=snake_case_ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HäLLo", "!", "how", "Are", "yoU", "?"] )
def __lowerCAmelCase ( self ) -> Tuple:
_a = BasicTokenizer(do_lower_case=snake_case_ , strip_accents=snake_case_ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HaLLo", "!", "how", "Are", "yoU", "?"] )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
_a = BasicTokenizer(do_lower_case=snake_case_ , never_split=["[UNK]"] )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? [UNK]" ) , ["HeLLo", "!", "how", "Are", "yoU", "?", "[UNK]"] )
def __lowerCAmelCase ( self ) -> List[str]:
_a = ["[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing"]
_a = {}
for i, token in enumerate(snake_case_ ):
_a = i
_a = WordpieceTokenizer(vocab=snake_case_ , unk_token="[UNK]" )
self.assertListEqual(tokenizer.tokenize("" ) , [] )
self.assertListEqual(tokenizer.tokenize("unwanted running" ) , ["un", "##want", "##ed", "runn", "##ing"] )
self.assertListEqual(tokenizer.tokenize("unwantedX running" ) , ["[UNK]", "runn", "##ing"] )
@require_torch
def __lowerCAmelCase ( self ) -> Tuple:
_a = self.tokenizer_class.from_pretrained("microsoft/prophetnet-large-uncased" )
_a = ["A long paragraph for summarization.", "Another paragraph for summarization."]
_a = [1_0_3_7, 2_1_4_6, 2_0_4_2_3, 2_0_0_5, 7_6_8_0, 7_8_4_9, 3_9_8_9, 1_0_1_2, 1_0_2]
_a = tokenizer(snake_case_ , padding=snake_case_ , return_tensors="pt" )
self.assertIsInstance(snake_case_ , snake_case_ )
_a = list(batch.input_ids.numpy()[0] )
self.assertListEqual(snake_case_ , snake_case_ )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
def __lowerCAmelCase ( self ) -> List[Any]:
self.assertTrue(_is_whitespace(" " ) )
self.assertTrue(_is_whitespace("\t" ) )
self.assertTrue(_is_whitespace("\r" ) )
self.assertTrue(_is_whitespace("\n" ) )
self.assertTrue(_is_whitespace("\u00A0" ) )
self.assertFalse(_is_whitespace("A" ) )
self.assertFalse(_is_whitespace("-" ) )
def __lowerCAmelCase ( self ) -> Optional[Any]:
self.assertTrue(_is_control("\u0005" ) )
self.assertFalse(_is_control("A" ) )
self.assertFalse(_is_control(" " ) )
self.assertFalse(_is_control("\t" ) )
self.assertFalse(_is_control("\r" ) )
def __lowerCAmelCase ( self ) -> List[Any]:
self.assertTrue(_is_punctuation("-" ) )
self.assertTrue(_is_punctuation("$" ) )
self.assertTrue(_is_punctuation("`" ) )
self.assertTrue(_is_punctuation("." ) )
self.assertFalse(_is_punctuation("A" ) )
self.assertFalse(_is_punctuation(" " ) )
@slow
def __lowerCAmelCase ( self ) -> Optional[Any]:
_a = self.tokenizer_class.from_pretrained("microsoft/prophetnet-large-uncased" )
_a = tokenizer.encode("sequence builders" , add_special_tokens=snake_case_ )
_a = tokenizer.encode("multi-sequence build" , add_special_tokens=snake_case_ )
_a = tokenizer.build_inputs_with_special_tokens(snake_case_ )
_a = tokenizer.build_inputs_with_special_tokens(snake_case_ , snake_case_ )
assert encoded_sentence == text + [1_0_2]
assert encoded_pair == text + [1_0_2] + text_a + [1_0_2]
| 691 | 0 |
from __future__ import annotations
from math import pow, sqrt
def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : Any , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Any ) -> Union[str, Any]:
if (resistance, reactance, impedance).count(0 ) != 1:
raise ValueError('''One and only one argument must be 0''' )
if resistance == 0:
return {"resistance": sqrt(pow(A__ , 2 ) - pow(A__ , 2 ) )}
elif reactance == 0:
return {"reactance": sqrt(pow(A__ , 2 ) - pow(A__ , 2 ) )}
elif impedance == 0:
return {"impedance": sqrt(pow(A__ , 2 ) + pow(A__ , 2 ) )}
else:
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 443 |
import argparse
import requests
import torch
from PIL import Image
from transformers import SwinConfig, SwinForMaskedImageModeling, ViTImageProcessor
def _lowerCAmelCase ( A__ ):
lowercase__ = SwinConfig(image_size=192 )
if "base" in model_name:
lowercase__ = 6
lowercase__ = 128
lowercase__ = (2, 2, 18, 2)
lowercase__ = (4, 8, 16, 32)
elif "large" in model_name:
lowercase__ = 12
lowercase__ = 192
lowercase__ = (2, 2, 18, 2)
lowercase__ = (6, 12, 24, 48)
else:
raise ValueError('Model not supported, only supports base and large variants' )
lowercase__ = window_size
lowercase__ = embed_dim
lowercase__ = depths
lowercase__ = num_heads
return config
def _lowerCAmelCase ( A__ ):
if "encoder.mask_token" in name:
lowercase__ = name.replace('encoder.mask_token' , 'embeddings.mask_token' )
if "encoder.patch_embed.proj" in name:
lowercase__ = name.replace('encoder.patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "encoder.patch_embed.norm" in name:
lowercase__ = name.replace('encoder.patch_embed.norm' , 'embeddings.norm' )
if "attn.proj" in name:
lowercase__ = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name:
lowercase__ = name.replace('attn' , 'attention.self' )
if "norm1" in name:
lowercase__ = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
lowercase__ = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
lowercase__ = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
lowercase__ = name.replace('mlp.fc2' , 'output.dense' )
if name == "encoder.norm.weight":
lowercase__ = 'layernorm.weight'
if name == "encoder.norm.bias":
lowercase__ = 'layernorm.bias'
if "decoder" in name:
pass
else:
lowercase__ = 'swin.' + name
return name
def _lowerCAmelCase ( A__ , A__ ):
for key in orig_state_dict.copy().keys():
lowercase__ = orig_state_dict.pop(A__ )
if "attn_mask" in key:
pass
elif "qkv" in key:
lowercase__ = key.split('.' )
lowercase__ = int(key_split[2] )
lowercase__ = int(key_split[4] )
lowercase__ = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
lowercase__ = val[:dim, :]
lowercase__ = val[
dim : dim * 2, :
]
lowercase__ = val[-dim:, :]
else:
lowercase__ = val[
:dim
]
lowercase__ = val[
dim : dim * 2
]
lowercase__ = val[
-dim:
]
else:
lowercase__ = val
return orig_state_dict
def _lowerCAmelCase ( A__ , A__ , A__ , A__ ):
lowercase__ = torch.load(A__ , map_location='cpu' )['model']
lowercase__ = get_swin_config(A__ )
lowercase__ = SwinForMaskedImageModeling(A__ )
model.eval()
lowercase__ = convert_state_dict(A__ , A__ )
model.load_state_dict(A__ )
lowercase__ = 'http://images.cocodataset.org/val2017/000000039769.jpg'
lowercase__ = ViTImageProcessor(size={'height': 192, 'width': 192} )
lowercase__ = Image.open(requests.get(A__ , stream=A__ ).raw )
lowercase__ = image_processor(images=A__ , return_tensors='pt' )
with torch.no_grad():
lowercase__ = model(**A__ ).logits
print(outputs.keys() )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(A__ )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(A__ )
if push_to_hub:
print(F'''Pushing model and image processor for {model_name} to hub''' )
model.push_to_hub(F'''microsoft/{model_name}''' )
image_processor.push_to_hub(F'''microsoft/{model_name}''' )
if __name__ == "__main__":
a__ : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="swin-base-simmim-window6-192",
type=str,
choices=["swin-base-simmim-window6-192", "swin-large-simmim-window12-192"],
help="Name of the Swin SimMIM model you'd like to convert.",
)
parser.add_argument(
"--checkpoint_path",
default="/Users/nielsrogge/Documents/SwinSimMIM/simmim_pretrain__swin_base__img192_window6__100ep.pth",
type=str,
help="Path to the original PyTorch checkpoint (.pth file).",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
a__ : Optional[int] = parser.parse_args()
convert_swin_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 622 | 0 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCamelCase ="▁"
UpperCamelCase ={"vocab_file": "spiece.model"}
UpperCamelCase ={
"vocab_file": {"google/pegasus-xsum": "https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model"}
}
UpperCamelCase ={
"google/pegasus-xsum": 512,
}
UpperCamelCase =logging.get_logger(__name__)
class A ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
__a : List[Any] = VOCAB_FILES_NAMES
__a : Dict = VOCAB_FILES_NAMES
__a : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
__a : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__a : str = ['''input_ids''', '''attention_mask''']
def __init__( self , __lowerCAmelCase , __lowerCAmelCase="<pad>" , __lowerCAmelCase="</s>" , __lowerCAmelCase="<unk>" , __lowerCAmelCase="<mask_2>" , __lowerCAmelCase="<mask_1>" , __lowerCAmelCase=None , __lowerCAmelCase=1_03 , __lowerCAmelCase = None , **__lowerCAmelCase , ):
UpperCamelCase_ : Any = offset
if additional_special_tokens is not None:
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise TypeError(
F"additional_special_tokens should be of type {type(__lowerCAmelCase )}, but is"
F" {type(__lowerCAmelCase )}" )
UpperCamelCase_ : List[Any] = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
F"<unk_{i}>" for i in range(len(__lowerCAmelCase ) , self.offset - 1 )
]
if len(set(__lowerCAmelCase ) ) != len(__lowerCAmelCase ):
raise ValueError(
"""Please make sure that the provided additional_special_tokens do not contain an incorrectly"""
F" shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}." )
UpperCamelCase_ : Dict = additional_special_tokens_extended
else:
UpperCamelCase_ : str = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [F"<unk_{i}>" for i in range(2 , self.offset )]
UpperCamelCase_ : Union[str, Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=__lowerCAmelCase , unk_token=__lowerCAmelCase , mask_token=__lowerCAmelCase , pad_token=__lowerCAmelCase , mask_token_sent=__lowerCAmelCase , offset=__lowerCAmelCase , additional_special_tokens=__lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **__lowerCAmelCase , )
UpperCamelCase_ : Optional[int] = mask_token_sent
UpperCamelCase_ : Any = vocab_file
UpperCamelCase_ : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__lowerCAmelCase )
# add special tokens to encoder dict
UpperCamelCase_ : Dict[int, str] = {
0: self.pad_token,
1: self.eos_token,
}
if self.mask_token_sent is not None:
self.encoder.update(
{
2: self.mask_token_sent,
3: self.mask_token,
} )
if self.offset > 0:
# entries 2-104 are only used for pretraining and called <mask_1>, <mask_2>, unk_2, ...unk_102
# mask_token_sent is already added to list -> so start at 1
self.encoder.update({i + 3: additional_special_tokens[i] for i in range(1 , self.offset - 1 )} )
UpperCamelCase_ : Dict[str, int] = {v: k for k, v in self.encoder.items()}
@property
def _UpperCAmelCase ( self ):
return len(self.sp_model ) + self.offset
def _UpperCAmelCase ( self ):
UpperCamelCase_ : List[str] = {self.convert_ids_to_tokens(__lowerCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
UpperCamelCase_ : Optional[int] = self.__dict__.copy()
UpperCamelCase_ : Optional[int] = None
return state
def __setstate__( self , __lowerCAmelCase ):
UpperCamelCase_ : Union[str, Any] = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
UpperCamelCase_ : str = {}
UpperCamelCase_ : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _UpperCAmelCase ( self , __lowerCAmelCase ):
return self.sp_model.encode(__lowerCAmelCase , out_type=__lowerCAmelCase )
def _UpperCAmelCase ( self , __lowerCAmelCase ):
if token in self.decoder:
return self.decoder[token]
elif token in self.added_tokens_decoder:
return self.added_tokens_decoder[token]
UpperCamelCase_ : str = self.sp_model.piece_to_id(__lowerCAmelCase )
return sp_id + self.offset
def _UpperCAmelCase ( self , __lowerCAmelCase ):
if index in self.encoder:
return self.encoder[index]
elif index in self.added_tokens_encoder:
return self.added_tokens_encoder[index]
else:
UpperCamelCase_ : str = self.sp_model.IdToPiece(index - self.offset )
return token
def _UpperCAmelCase ( self , __lowerCAmelCase ):
UpperCamelCase_ : List[Any] = []
UpperCamelCase_ : List[Any] = """"""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(__lowerCAmelCase ) + token
UpperCamelCase_ : List[Any] = []
else:
current_sub_tokens.append(__lowerCAmelCase )
out_string += self.sp_model.decode(__lowerCAmelCase )
return out_string.strip()
def _UpperCAmelCase ( self , __lowerCAmelCase=False ):
return 1
def _UpperCAmelCase ( self , __lowerCAmelCase ):
UpperCamelCase_ : int = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
return [1 if x in all_special_ids else 0 for x in seq]
def _UpperCAmelCase ( self , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = False ):
if already_has_special_tokens:
return self._special_token_mask(__lowerCAmelCase )
elif token_ids_a is None:
return self._special_token_mask(__lowerCAmelCase ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def _UpperCAmelCase ( self , __lowerCAmelCase , __lowerCAmelCase=None ):
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def _UpperCAmelCase ( self , __lowerCAmelCase , __lowerCAmelCase = None ):
if not os.path.isdir(__lowerCAmelCase ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
UpperCamelCase_ : Optional[int] = os.path.join(
__lowerCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __lowerCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__lowerCAmelCase , """wb""" ) as fi:
UpperCamelCase_ : List[str] = self.sp_model.serialized_model_proto()
fi.write(__lowerCAmelCase )
return (out_vocab_file,)
| 543 |
'''simple docstring'''
import warnings
from .generation import TFGenerationMixin
class A ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
warnings.warn(
'''Importing `TFGenerationMixin` from `src/transformers/generation_tf_utils.py` is deprecated and will '''
'''be removed in Transformers v5. Import as `from transformers import TFGenerationMixin` instead.''', SCREAMING_SNAKE_CASE__, )
| 543 | 1 |
'''simple docstring'''
import numpy as np
from PIL import Image
def _lowercase ( __A ,__A ,__A ):
'''simple docstring'''
__UpperCamelCase = np.array(UpperCAmelCase_ )
if arr.shape[0] != arr.shape[1]:
raise ValueError("""The input array is not a square matrix""" )
__UpperCamelCase = 0
__UpperCamelCase = 0
__UpperCamelCase = 0
__UpperCamelCase = 0
# compute the shape of the output matrix
__UpperCamelCase = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape maxpool_shape
__UpperCamelCase = np.zeros((maxpool_shape, maxpool_shape) )
while i < arr.shape[0]:
if i + size > arr.shape[0]:
# if the end of the matrix is reached, break
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the maximum of the pooling matrix
__UpperCamelCase = np.max(arr[i : i + size, j : j + size] )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
__UpperCamelCase = 0
__UpperCamelCase = 0
return updated_arr
def _lowercase ( __A ,__A ,__A ):
'''simple docstring'''
__UpperCamelCase = np.array(UpperCAmelCase_ )
if arr.shape[0] != arr.shape[1]:
raise ValueError("""The input array is not a square matrix""" )
__UpperCamelCase = 0
__UpperCamelCase = 0
__UpperCamelCase = 0
__UpperCamelCase = 0
# compute the shape of the output matrix
__UpperCamelCase = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape avgpool_shape
__UpperCamelCase = np.zeros((avgpool_shape, avgpool_shape) )
while i < arr.shape[0]:
# if the end of the matrix is reached, break
if i + size > arr.shape[0]:
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the average of the pooling matrix
__UpperCamelCase = int(np.average(arr[i : i + size, j : j + size] ) )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
__UpperCamelCase = 0
__UpperCamelCase = 0
return updated_arr
# Main Function
if __name__ == "__main__":
from doctest import testmod
testmod(name='avgpooling', verbose=True)
# Loading the image
a__ : Any = Image.open('path_to_image')
# Converting the image to numpy array and maxpooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(maxpooling(np.array(image), size=3, stride=2)).show()
# Converting the image to numpy array and averagepooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(avgpooling(np.array(image), size=3, stride=2)).show()
| 601 |
import unittest
from transformers import BertGenerationConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import BertGenerationDecoder, BertGenerationEncoder
class lowercase__ :
"""simple docstring"""
def __init__( self : Dict , __a : List[str] , __a : List[str]=1_3 , __a : List[str]=7 , __a : Dict=True , __a : str=True , __a : str=9_9 , __a : Dict=3_2 , __a : Optional[int]=5 , __a : List[Any]=4 , __a : Dict=3_7 , __a : List[Any]="gelu" , __a : str=0.1 , __a : Dict=0.1 , __a : Optional[Any]=5_0 , __a : Dict=0.02 , __a : List[Any]=True , __a : str=None , ):
snake_case__ : int = parent
snake_case__ : Any = batch_size
snake_case__ : Any = seq_length
snake_case__ : Dict = is_training
snake_case__ : str = use_input_mask
snake_case__ : Optional[Any] = vocab_size
snake_case__ : List[Any] = hidden_size
snake_case__ : Any = num_hidden_layers
snake_case__ : Any = num_attention_heads
snake_case__ : Optional[Any] = intermediate_size
snake_case__ : str = hidden_act
snake_case__ : Any = hidden_dropout_prob
snake_case__ : List[str] = attention_probs_dropout_prob
snake_case__ : Any = max_position_embeddings
snake_case__ : Tuple = initializer_range
snake_case__ : str = use_labels
snake_case__ : List[str] = scope
def lowercase ( self : Optional[Any] ):
snake_case__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case__ : Any = None
if self.use_input_mask:
snake_case__ : int = random_attention_mask([self.batch_size, self.seq_length] )
if self.use_labels:
snake_case__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case__ : Optional[int] = self.get_config()
return config, input_ids, input_mask, token_labels
def lowercase ( self : Optional[Any] ):
return BertGenerationConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , is_decoder=__a , initializer_range=self.initializer_range , )
def lowercase ( self : List[str] ):
(
(
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) ,
) : Any = self.prepare_config_and_inputs()
snake_case__ : Union[str, Any] = True
snake_case__ : str = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
snake_case__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
token_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def lowercase ( self : int , __a : List[Any] , __a : Optional[Any] , __a : Tuple , __a : Optional[int] , **__a : Union[str, Any] , ):
snake_case__ : Optional[Any] = BertGenerationEncoder(config=__a )
model.to(__a )
model.eval()
snake_case__ : Optional[int] = model(__a , attention_mask=__a )
snake_case__ : str = model(__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase ( self : str , __a : Union[str, Any] , __a : Any , __a : int , __a : int , __a : List[Any] , __a : List[str] , **__a : List[Any] , ):
snake_case__ : Union[str, Any] = True
snake_case__ : int = BertGenerationEncoder(config=__a )
model.to(__a )
model.eval()
snake_case__ : List[str] = model(
__a , attention_mask=__a , encoder_hidden_states=__a , encoder_attention_mask=__a , )
snake_case__ : List[Any] = model(
__a , attention_mask=__a , encoder_hidden_states=__a , )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase ( self : int , __a : Tuple , __a : Union[str, Any] , __a : Optional[Any] , __a : str , __a : Union[str, Any] , __a : str , **__a : List[Any] , ):
snake_case__ : int = True
snake_case__ : Optional[int] = True
snake_case__ : List[str] = BertGenerationDecoder(config=__a ).to(__a ).eval()
# first forward pass
snake_case__ : Union[str, Any] = model(
__a , attention_mask=__a , encoder_hidden_states=__a , encoder_attention_mask=__a , use_cache=__a , )
snake_case__ : List[Any] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
snake_case__ : Tuple = ids_tensor((self.batch_size, 3) , config.vocab_size )
snake_case__ : int = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
snake_case__ : Any = torch.cat([input_ids, next_tokens] , dim=-1 )
snake_case__ : Tuple = torch.cat([input_mask, next_mask] , dim=-1 )
snake_case__ : Any = model(
__a , attention_mask=__a , encoder_hidden_states=__a , encoder_attention_mask=__a , output_hidden_states=__a , )["""hidden_states"""][0]
snake_case__ : Any = model(
__a , attention_mask=__a , encoder_hidden_states=__a , encoder_attention_mask=__a , past_key_values=__a , output_hidden_states=__a , )["""hidden_states"""][0]
# select random slice
snake_case__ : List[Any] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
snake_case__ : Optional[Any] = output_from_no_past[:, -3:, random_slice_idx].detach()
snake_case__ : Any = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__a , __a , atol=1e-3 ) )
def lowercase ( self : Union[str, Any] , __a : List[Any] , __a : int , __a : Union[str, Any] , __a : int , *__a : str , ):
snake_case__ : Union[str, Any] = BertGenerationDecoder(__a )
model.to(__a )
model.eval()
snake_case__ : List[Any] = model(__a , attention_mask=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase ( self : Any ):
snake_case__ , snake_case__ , snake_case__ , snake_case__ : str = self.prepare_config_and_inputs()
snake_case__ : str = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class lowercase__ (__snake_case , __snake_case , __snake_case , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase : Optional[Any] = (BertGenerationEncoder, BertGenerationDecoder) if is_torch_available() else ()
__UpperCamelCase : str = (BertGenerationDecoder,) if is_torch_available() else ()
__UpperCamelCase : str = (
{'feature-extraction': BertGenerationEncoder, 'text-generation': BertGenerationDecoder}
if is_torch_available()
else {}
)
def lowercase ( self : str ):
snake_case__ : Dict = BertGenerationEncoderTester(self )
snake_case__ : Tuple = ConfigTester(self , config_class=__a , hidden_size=3_7 )
def lowercase ( self : Optional[int] ):
self.config_tester.run_common_tests()
def lowercase ( self : Optional[int] ):
snake_case__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def lowercase ( self : List[Any] ):
snake_case__ , snake_case__ , snake_case__ , snake_case__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
snake_case__ : Optional[int] = """bert"""
self.model_tester.create_and_check_model(__a , __a , __a , __a )
def lowercase ( self : str ):
snake_case__ : Any = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*__a )
def lowercase ( self : Any ):
snake_case__ : Any = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*__a )
def lowercase ( self : Tuple ):
# This regression test was failing with PyTorch < 1.3
(
(
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) ,
) : int = self.model_tester.prepare_config_and_inputs_for_decoder()
snake_case__ : int = None
self.model_tester.create_and_check_model_as_decoder(
__a , __a , __a , __a , __a , __a , )
def lowercase ( self : int ):
snake_case__ : int = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_for_causal_lm(*__a )
@slow
def lowercase ( self : List[str] ):
snake_case__ : int = BertGenerationEncoder.from_pretrained("""google/bert_for_seq_generation_L-24_bbc_encoder""" )
self.assertIsNotNone(__a )
@require_torch
class lowercase__ (unittest.TestCase ):
"""simple docstring"""
@slow
def lowercase ( self : Optional[int] ):
snake_case__ : Dict = BertGenerationEncoder.from_pretrained("""google/bert_for_seq_generation_L-24_bbc_encoder""" )
snake_case__ : Optional[Any] = torch.tensor([[1_0_1, 7_5_9_2, 1_0_1_0, 2_0_2_6, 3_8_9_9, 2_0_0_3, 1_0_1_4_0, 1_0_2]] )
with torch.no_grad():
snake_case__ : Union[str, Any] = model(__a )[0]
snake_case__ : Optional[Any] = torch.Size([1, 8, 1_0_2_4] )
self.assertEqual(output.shape , __a )
snake_case__ : List[Any] = torch.tensor(
[[[0.1775, 0.0083, -0.0321], [1.6002, 0.1287, 0.3912], [2.1473, 0.5791, 0.6066]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __a , atol=1e-4 ) )
@require_torch
class lowercase__ (unittest.TestCase ):
"""simple docstring"""
@slow
def lowercase ( self : Any ):
snake_case__ : List[str] = BertGenerationDecoder.from_pretrained("""google/bert_for_seq_generation_L-24_bbc_encoder""" )
snake_case__ : Tuple = torch.tensor([[1_0_1, 7_5_9_2, 1_0_1_0, 2_0_2_6, 3_8_9_9, 2_0_0_3, 1_0_1_4_0, 1_0_2]] )
with torch.no_grad():
snake_case__ : Any = model(__a )[0]
snake_case__ : Optional[Any] = torch.Size([1, 8, 5_0_3_5_8] )
self.assertEqual(output.shape , __a )
snake_case__ : int = torch.tensor(
[[[-0.5788, -2.5994, -3.7054], [0.0438, 4.7997, 1.8795], [1.5862, 6.6409, 4.4638]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __a , atol=1e-4 ) )
| 648 | 0 |
'''simple docstring'''
from itertools import zip_longest
import requests
from bsa import BeautifulSoup
from pandas import DataFrame
def A_ ( _lowerCAmelCase : str = "laptop" ):
"""simple docstring"""
_lowerCamelCase : Dict = F'https://www.amazon.in/laptop/s?k={product}'
_lowerCamelCase : Tuple = {
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36\n (KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36",
"Accept-Language": "en-US, en;q=0.5",
}
_lowerCamelCase : int = BeautifulSoup(requests.get(_lowerCAmelCase , headers=_lowerCAmelCase ).text )
# Initialize a Pandas dataframe with the column titles
_lowerCamelCase : int = DataFrame(
columns=[
"Product Title",
"Product Link",
"Current Price of the product",
"Product Rating",
"MRP of the product",
"Discount",
] )
# Loop through each entry and store them in the dataframe
for item, _ in zip_longest(
soup.find_all(
"div" , attrs={"class": "s-result-item", "data-component-type": "s-search-result"} , ) , soup.find_all("div" , attrs={"class": "a-row a-size-base a-color-base"} ) , ):
try:
_lowerCamelCase : Dict = item.ha.text
_lowerCamelCase : Optional[int] = "https://www.amazon.in/" + item.ha.a["href"]
_lowerCamelCase : Optional[int] = item.find("span" , attrs={"class": "a-offscreen"} ).text
try:
_lowerCamelCase : Union[str, Any] = item.find("span" , attrs={"class": "a-icon-alt"} ).text
except AttributeError:
_lowerCamelCase : Optional[Any] = "Not available"
try:
_lowerCamelCase : Dict = (
"₹"
+ item.find(
"span" , attrs={"class": "a-price a-text-price"} ).text.split("₹" )[1]
)
except AttributeError:
_lowerCamelCase : Union[str, Any] = ""
try:
_lowerCamelCase : Union[str, Any] = float(
(
(
float(product_mrp.strip("₹" ).replace("," , "" ) )
- float(product_price.strip("₹" ).replace("," , "" ) )
)
/ float(product_mrp.strip("₹" ).replace("," , "" ) )
)
* 100 )
except ValueError:
_lowerCamelCase : List[str] = float("nan" )
except AttributeError:
pass
_lowerCamelCase : Dict = [
product_title,
product_link,
product_price,
product_rating,
product_mrp,
discount,
]
_lowerCamelCase : List[str] = " "
_lowerCamelCase : Tuple = " "
data_frame.index += 1
return data_frame
if __name__ == "__main__":
UpperCAmelCase_ : str = 'headphones'
get_amazon_product_data(product).to_csv(f'''Amazon Product Data for {product}.csv''') | 711 |
'''simple docstring'''
import warnings
from ..trainer import Trainer
from ..utils import logging
UpperCAmelCase_ : Union[str, Any] = logging.get_logger(__name__)
class UpperCAmelCase__ ( A ):
def __init__( self : int,__A : Any=None,**__A : Optional[Any] ):
warnings.warn(
"`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` "
"instead.",__A,)
super().__init__(args=__A,**__A ) | 11 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.